hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7907b5defff558818aeaf6006e2b2b5c51929273
| 3,086
|
py
|
Python
|
lib/markov_usernames.py
|
jabbalaci/Bash-Utils
|
c6fb115834a221c4aaba8eaa37f650beea45ef29
|
[
"MIT"
] | 73
|
2015-03-31T01:12:26.000Z
|
2021-07-10T19:45:04.000Z
|
lib/markov_usernames.py
|
doc22940/Bash-Utils
|
c6fb115834a221c4aaba8eaa37f650beea45ef29
|
[
"MIT"
] | 2
|
2017-01-06T17:17:42.000Z
|
2017-08-23T18:35:55.000Z
|
lib/markov_usernames.py
|
doc22940/Bash-Utils
|
c6fb115834a221c4aaba8eaa37f650beea45ef29
|
[
"MIT"
] | 27
|
2015-01-03T18:51:23.000Z
|
2020-11-15T11:49:51.000Z
|
#!/usr/bin/env python3
import random
import sys
"""
Markov chains name generator in Python
From http://roguebasin.roguelikedevelopment.org/index.php?title=Markov_chains_name_generator_in_Python .
"""
# from http://www.geocities.com/anvrill/names/cc_goth.html
PLACES = ['Adara', 'Adena', 'Adrianne', 'Alarice', 'Alvita', 'Amara', 'Ambika', 'Antonia', 'Araceli', 'Balandria', 'Basha',
'Beryl', 'Bryn', 'Callia', 'Caryssa', 'Cassandra', 'Casondrah', 'Chatha', 'Ciara', 'Cynara', 'Cytheria', 'Dabria', 'Darcei',
'Deandra', 'Deirdre', 'Delores', 'Desdomna', 'Devi', 'Dominique', 'Drucilla', 'Duvessa', 'Ebony', 'Fantine', 'Fuscienne',
'Gabi', 'Gallia', 'Hanna', 'Hedda', 'Jerica', 'Jetta', 'Joby', 'Kacila', 'Kagami', 'Kala', 'Kallie', 'Keelia', 'Kerry',
'Kerry-Ann', 'Kimberly', 'Killian', 'Kory', 'Lilith', 'Lucretia', 'Lysha', 'Mercedes', 'Mia', 'Maura', 'Perdita', 'Quella',
'Riona', 'Safiya', 'Salina', 'Severin', 'Sidonia', 'Sirena', 'Solita', 'Tempest', 'Thea', 'Treva', 'Trista', 'Vala', 'Winta']
###############################################################################
# Markov Name model
# A random name generator, by Peter Corbett
# http://www.pick.ucam.org/~ptc24/mchain.html
# This script is hereby entered into the public domain
###############################################################################
class Mdict:
def __init__(self):
self.d = {}
def __getitem__(self, key):
if key in self.d:
return self.d[key]
else:
raise KeyError(key)
def add_key(self, prefix, suffix):
if prefix in self.d:
self.d[prefix].append(suffix)
else:
self.d[prefix] = [suffix]
def get_suffix(self,prefix):
l = self[prefix]
return random.choice(l)
class MName:
"""
A name from a Markov chain
"""
def __init__(self, chainlen = 2):
"""
Building the dictionary
"""
if chainlen > 10 or chainlen < 1:
print("Chain length must be between 1 and 10, inclusive")
sys.exit(0)
self.mcd = Mdict()
oldnames = []
self.chainlen = chainlen
for l in PLACES:
l = l.strip()
oldnames.append(l)
s = " " * chainlen + l
for n in range(0,len(l)):
self.mcd.add_key(s[n:n+chainlen], s[n+chainlen])
self.mcd.add_key(s[len(l):len(l)+chainlen], "\n")
def New(self):
"""
New name from the Markov chain
"""
prefix = " " * self.chainlen
name = ""
suffix = ""
while True:
suffix = self.mcd.get_suffix(prefix)
if suffix == "\n" or len(name) > 9:
break
else:
name = name + suffix
prefix = prefix[1:] + suffix
return name.capitalize()
#############################################################################
if __name__ == "__main__":
li = []
for i in range(10):
li.append(MName().New())
for e in sorted(li):
print(e.lower())
| 34.288889
| 125
| 0.515554
|
import random
import sys
PLACES = ['Adara', 'Adena', 'Adrianne', 'Alarice', 'Alvita', 'Amara', 'Ambika', 'Antonia', 'Araceli', 'Balandria', 'Basha',
'Beryl', 'Bryn', 'Callia', 'Caryssa', 'Cassandra', 'Casondrah', 'Chatha', 'Ciara', 'Cynara', 'Cytheria', 'Dabria', 'Darcei',
'Deandra', 'Deirdre', 'Delores', 'Desdomna', 'Devi', 'Dominique', 'Drucilla', 'Duvessa', 'Ebony', 'Fantine', 'Fuscienne',
'Gabi', 'Gallia', 'Hanna', 'Hedda', 'Jerica', 'Jetta', 'Joby', 'Kacila', 'Kagami', 'Kala', 'Kallie', 'Keelia', 'Kerry',
'Kerry-Ann', 'Kimberly', 'Killian', 'Kory', 'Lilith', 'Lucretia', 'Lysha', 'Mercedes', 'Mia', 'Maura', 'Perdita', 'Quella',
'Riona', 'Safiya', 'Salina', 'Severin', 'Sidonia', 'Sirena', 'Solita', 'Tempest', 'Thea', 'Treva', 'Trista', 'Vala', 'Winta']
| true
| true
|
7907b60fa02e035476ed63d1da9f37e07224dad8
| 92
|
py
|
Python
|
server/ascii_art_server/api/ascii_art/apps.py
|
jmacera95/ascii-art
|
ad37b3e8f15f6ca87e4ac8237385c5aa18d6176a
|
[
"MIT"
] | 29
|
2020-10-01T12:23:46.000Z
|
2022-01-30T10:46:39.000Z
|
server/ascii_art_server/api/ascii_art/apps.py
|
jmacera95/ascii-art
|
ad37b3e8f15f6ca87e4ac8237385c5aa18d6176a
|
[
"MIT"
] | 101
|
2020-10-01T05:31:33.000Z
|
2021-10-05T11:39:15.000Z
|
server/ascii_art_server/api/ascii_art/apps.py
|
jmacera95/ascii-art
|
ad37b3e8f15f6ca87e4ac8237385c5aa18d6176a
|
[
"MIT"
] | 163
|
2020-10-01T07:15:05.000Z
|
2022-03-07T17:57:27.000Z
|
from django.apps import AppConfig
class AsciiArtConfig(AppConfig):
name = 'ascii_art'
| 15.333333
| 33
| 0.76087
|
from django.apps import AppConfig
class AsciiArtConfig(AppConfig):
name = 'ascii_art'
| true
| true
|
7907b70c20a6aaa8d7acbddba4f082ba383a82db
| 812
|
py
|
Python
|
model/contact.py
|
Valeryiar/python_training
|
c4e92ee783134bb22a38b6cb38f912cf99bef2d8
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
Valeryiar/python_training
|
c4e92ee783134bb22a38b6cb38f912cf99bef2d8
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
Valeryiar/python_training
|
c4e92ee783134bb22a38b6cb38f912cf99bef2d8
|
[
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class Contact:
def __init__(self, firstname=None, lastname=None, homephone=None, mobilephone=None,workphone=None, secondaryphone=None, id=None):
self.firstname=firstname
self.lastname=lastname
self.homephone=homephone
self.workphone = workphone
self.mobilephone=mobilephone
self.secondaryphone=secondaryphone
self.id=id
def __repr__(self):
return "%s:%s %s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id==other.id)\
and self.firstname==other.firstname and self.lastname==other.lastname
def id_or_max (self):
if self.id:
return int (self.id)
else:
return maxsize
| 31.230769
| 134
| 0.64532
|
from sys import maxsize
class Contact:
def __init__(self, firstname=None, lastname=None, homephone=None, mobilephone=None,workphone=None, secondaryphone=None, id=None):
self.firstname=firstname
self.lastname=lastname
self.homephone=homephone
self.workphone = workphone
self.mobilephone=mobilephone
self.secondaryphone=secondaryphone
self.id=id
def __repr__(self):
return "%s:%s %s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id==other.id)\
and self.firstname==other.firstname and self.lastname==other.lastname
def id_or_max (self):
if self.id:
return int (self.id)
else:
return maxsize
| true
| true
|
7907b7928e68a7671c81899ce83e679795100c4c
| 5,048
|
py
|
Python
|
network/layers/convolution_im2col.py
|
metataro/DirectFeedbackAlignment
|
7e2cbc3f001ac2290a15440628bb2b97d4ec52ab
|
[
"MIT"
] | 5
|
2020-04-30T11:36:46.000Z
|
2021-09-09T06:08:34.000Z
|
network/layers/convolution_im2col.py
|
metataro/DirectFeedbackAlignment
|
7e2cbc3f001ac2290a15440628bb2b97d4ec52ab
|
[
"MIT"
] | null | null | null |
network/layers/convolution_im2col.py
|
metataro/DirectFeedbackAlignment
|
7e2cbc3f001ac2290a15440628bb2b97d4ec52ab
|
[
"MIT"
] | 1
|
2021-01-07T03:10:32.000Z
|
2021-01-07T03:10:32.000Z
|
import numpy as np
from network.activation import Activation
from network.layer import Layer
from network.utils.im2col_cython import im2col_cython, col2im_cython
class Convolution(Layer):
def __init__(self, filter_shape, stride, padding, dropout_rate: float = 0, activation: Activation = None,
last_layer=False, weight_initializer=None, fb_weight_initializer=None) -> None:
assert len(filter_shape) == 4, \
"invalid filter shape: 4-tuple required, {}-tuple given".format(len(filter_shape))
super().__init__()
self.filter_shape = filter_shape
self.stride = stride
self.padding = padding
self.dropout_rate = dropout_rate
self.activation = activation
self.last_layer = last_layer
self.weight_initializer = weight_initializer
self.fb_weight_initializer = fb_weight_initializer
def initialize(self, input_size, num_classes, train_method) -> tuple:
assert np.size(input_size) == 3, \
"invalid input size: 3-tuple required for convolution layer"
c_in, h_in, w_in = input_size
f, c_f, h_f, w_f = self.filter_shape
assert c_in == c_f, \
"input channel dimension ({}) not compatible with filter channel dimension ({})".format(c_in, c_f)
assert (h_in - h_f + 2 * self.padding) % self.stride == 0, \
"filter width ({}) not compatible with input width ({})".format(h_f, h_in)
assert (w_in - w_f + 2 * self.padding) % self.stride == 0, \
"filter height ({}) not compatible with input height ({})".format(h_f, h_in)
self.h_out = ((h_in - h_f + 2 * self.padding) // self.stride) + 1
self.w_out = ((w_in - w_f + 2 * self.padding) // self.stride) + 1
# initialize weights
if self.weight_initializer is None:
sqrt_fan_in = np.sqrt(c_in * h_in * w_in)
self.W = np.random.uniform(low=-1 / sqrt_fan_in, high=1 / sqrt_fan_in, size=self.filter_shape)
else:
self.W = self.weight_initializer.init(dim=(f, c_f, h_f, w_f))
# initialize feedback weights
if self.fb_weight_initializer is None:
sqrt_fan_out = np.sqrt(f * self.h_out * self.w_out)
# self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f, self.h_out, self.w_out))
self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f * self.h_out * self.w_out))
else:
# self.B = self.fb_weight_initializer.init(dim=(num_classes, f, self.h_out, self.w_out))
self.B = self.fb_weight_initializer.init(dim=(num_classes, f * self.h_out * self.w_out))
# initialize bias units
self.b = np.zeros(f)
return f, self.h_out, self.w_out
def forward(self, X, mode='predict') -> np.ndarray:
n_in, c, h_in, w_in = X.shape
n_f, c, h_f, w_f = self.W.shape
self.x_cols = im2col_cython(X, h_f, w_f, self.padding, self.stride) # <->
z = self.W.reshape((n_f, -1)).dot(self.x_cols)
z += self.b.reshape(-1, 1) # +
z = z.reshape(n_f, self.h_out, self.w_out, n_in).transpose(3, 0, 1, 2)
self.a_in = X
if self.activation is None:
self.a_out = z
else:
self.a_out = self.activation.forward(z)
if mode == 'train' and self.dropout_rate > 0:
# self.dropout_mask = np.random.binomial(size=self.a_out.shape, n=1, p=1 - self.dropout_rate)
self.dropout_mask = (np.random.rand(*self.a_out.shape) > self.dropout_rate).astype(int)
self.a_out *= self.dropout_mask
return self.a_out
def dfa(self, E: np.ndarray) -> tuple:
# E = np.einsum('ij,jklm->iklm', E, self.B)
n_f, c_f, h_f, w_f = self.W.shape
E = np.dot(E, self.B).reshape((-1, n_f, self.h_out, self.w_out))
if self.dropout_rate > 0:
E *= self.dropout_mask
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
dW = E.transpose((1, 2, 3, 0)).reshape(n_f, -1).dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dW, db
def back_prob(self, E: np.ndarray) -> tuple:
if self.dropout_rate > 0:
E *= self.dropout_mask
n_in, c_in, h_in, w_in = self.a_in.shape
n_f, c_f, h_f, w_f = self.W.shape
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
delta_reshaped = E.transpose((1, 2, 3, 0)).reshape(n_f, -1)
dX_cols = self.W.reshape(n_f, -1).T.dot(delta_reshaped)
dX = col2im_cython(dX_cols, n_in, c_in, h_in, w_in, h_f, w_f, self.padding, self.stride)
dW = delta_reshaped.dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dX, dW, db
def has_weights(self) -> bool:
return True
| 40.384
| 133
| 0.602813
|
import numpy as np
from network.activation import Activation
from network.layer import Layer
from network.utils.im2col_cython import im2col_cython, col2im_cython
class Convolution(Layer):
def __init__(self, filter_shape, stride, padding, dropout_rate: float = 0, activation: Activation = None,
last_layer=False, weight_initializer=None, fb_weight_initializer=None) -> None:
assert len(filter_shape) == 4, \
"invalid filter shape: 4-tuple required, {}-tuple given".format(len(filter_shape))
super().__init__()
self.filter_shape = filter_shape
self.stride = stride
self.padding = padding
self.dropout_rate = dropout_rate
self.activation = activation
self.last_layer = last_layer
self.weight_initializer = weight_initializer
self.fb_weight_initializer = fb_weight_initializer
def initialize(self, input_size, num_classes, train_method) -> tuple:
assert np.size(input_size) == 3, \
"invalid input size: 3-tuple required for convolution layer"
c_in, h_in, w_in = input_size
f, c_f, h_f, w_f = self.filter_shape
assert c_in == c_f, \
"input channel dimension ({}) not compatible with filter channel dimension ({})".format(c_in, c_f)
assert (h_in - h_f + 2 * self.padding) % self.stride == 0, \
"filter width ({}) not compatible with input width ({})".format(h_f, h_in)
assert (w_in - w_f + 2 * self.padding) % self.stride == 0, \
"filter height ({}) not compatible with input height ({})".format(h_f, h_in)
self.h_out = ((h_in - h_f + 2 * self.padding) // self.stride) + 1
self.w_out = ((w_in - w_f + 2 * self.padding) // self.stride) + 1
if self.weight_initializer is None:
sqrt_fan_in = np.sqrt(c_in * h_in * w_in)
self.W = np.random.uniform(low=-1 / sqrt_fan_in, high=1 / sqrt_fan_in, size=self.filter_shape)
else:
self.W = self.weight_initializer.init(dim=(f, c_f, h_f, w_f))
if self.fb_weight_initializer is None:
sqrt_fan_out = np.sqrt(f * self.h_out * self.w_out)
self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f * self.h_out * self.w_out))
else:
self.B = self.fb_weight_initializer.init(dim=(num_classes, f * self.h_out * self.w_out))
self.b = np.zeros(f)
return f, self.h_out, self.w_out
def forward(self, X, mode='predict') -> np.ndarray:
n_in, c, h_in, w_in = X.shape
n_f, c, h_f, w_f = self.W.shape
self.x_cols = im2col_cython(X, h_f, w_f, self.padding, self.stride)
z = self.W.reshape((n_f, -1)).dot(self.x_cols)
z += self.b.reshape(-1, 1)
z = z.reshape(n_f, self.h_out, self.w_out, n_in).transpose(3, 0, 1, 2)
self.a_in = X
if self.activation is None:
self.a_out = z
else:
self.a_out = self.activation.forward(z)
if mode == 'train' and self.dropout_rate > 0:
self.dropout_mask = (np.random.rand(*self.a_out.shape) > self.dropout_rate).astype(int)
self.a_out *= self.dropout_mask
return self.a_out
def dfa(self, E: np.ndarray) -> tuple:
n_f, c_f, h_f, w_f = self.W.shape
E = np.dot(E, self.B).reshape((-1, n_f, self.h_out, self.w_out))
if self.dropout_rate > 0:
E *= self.dropout_mask
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
dW = E.transpose((1, 2, 3, 0)).reshape(n_f, -1).dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dW, db
def back_prob(self, E: np.ndarray) -> tuple:
if self.dropout_rate > 0:
E *= self.dropout_mask
n_in, c_in, h_in, w_in = self.a_in.shape
n_f, c_f, h_f, w_f = self.W.shape
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
delta_reshaped = E.transpose((1, 2, 3, 0)).reshape(n_f, -1)
dX_cols = self.W.reshape(n_f, -1).T.dot(delta_reshaped)
dX = col2im_cython(dX_cols, n_in, c_in, h_in, w_in, h_f, w_f, self.padding, self.stride)
dW = delta_reshaped.dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dX, dW, db
def has_weights(self) -> bool:
return True
| true
| true
|
7907b8df1fef2c3216b7a8ddf3f4e55cf27d281e
| 580
|
py
|
Python
|
var/spack/repos/builtin/packages/perl-file-listing/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2018-11-27T03:39:44.000Z
|
2021-09-06T15:50:35.000Z
|
var/spack/repos/builtin/packages/perl-file-listing/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/perl-file-listing/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlFileListing(PerlPackage):
"""Parse directory listing"""
homepage = "http://search.cpan.org/~gaas/File-Listing-6.04/lib/File/Listing.pm"
url = "http://search.cpan.org/CPAN/authors/id/G/GA/GAAS/File-Listing-6.04.tar.gz"
version('6.04', '83f636b477741f3a014585bb9cc079a6')
depends_on('perl-http-date', type=('build', 'run'))
| 32.222222
| 90
| 0.715517
|
from spack import *
class PerlFileListing(PerlPackage):
homepage = "http://search.cpan.org/~gaas/File-Listing-6.04/lib/File/Listing.pm"
url = "http://search.cpan.org/CPAN/authors/id/G/GA/GAAS/File-Listing-6.04.tar.gz"
version('6.04', '83f636b477741f3a014585bb9cc079a6')
depends_on('perl-http-date', type=('build', 'run'))
| true
| true
|
7907b99c86f63ebda9f0e3eb4ef5f5e50c0aacaa
| 204
|
py
|
Python
|
maingui/urls.py
|
edgarceron/agent_console
|
a75501957722a349c7276e4d199425897f351bc0
|
[
"BSD-3-Clause"
] | null | null | null |
maingui/urls.py
|
edgarceron/agent_console
|
a75501957722a349c7276e4d199425897f351bc0
|
[
"BSD-3-Clause"
] | 3
|
2021-03-30T13:46:24.000Z
|
2021-09-22T19:18:18.000Z
|
maingui/urls.py
|
edgarceron/agent_console
|
a75501957722a349c7276e4d199425897f351bc0
|
[
"BSD-3-Clause"
] | null | null | null |
""" Contains the urls for the maingui module"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('login', views.login, name='login'),
]
| 20.4
| 47
| 0.661765
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('login', views.login, name='login'),
]
| true
| true
|
7907b9d4eae1ce641ff64a556aea8dba136a09bb
| 5,022
|
py
|
Python
|
tests/test_headerpage.py
|
BradleyPelton/NetflixSelenium
|
ec4cb51266538b5ed4679f8c265723751b906a7c
|
[
"MIT"
] | 1
|
2020-04-29T01:54:28.000Z
|
2020-04-29T01:54:28.000Z
|
tests/test_headerpage.py
|
Souleymane03/NetflixSelenium
|
ec4cb51266538b5ed4679f8c265723751b906a7c
|
[
"MIT"
] | null | null | null |
tests/test_headerpage.py
|
Souleymane03/NetflixSelenium
|
ec4cb51266538b5ed4679f8c265723751b906a7c
|
[
"MIT"
] | 2
|
2021-09-13T12:45:57.000Z
|
2022-01-14T23:36:26.000Z
|
import unittest
import xmlrunner
# from selenium import webdriver
import pagemodels.headerpage
import tests.pickledlogin
import browserconfig
# VIDEO OF EXECUTION
# https://gyazo.com/b20fd223076bf34c1f2c9b94a4f1fe0a
# 2020-04-20 All tests passing, refactor complete
# All tests passed 5 executions in a row. v1 ready to ship.
# BUG- First execution will murder the cookies and break the following tests.
# interestingly, every subsequent test will pass once cookies are hard reset.
class HeaderPageTests(unittest.TestCase):
"""Test cases for the use of the header features atop most netflix pages."""
@classmethod
def setUpClass(cls):
"""Launch the webdriver of choice with selected options(see browserconfig.py).
Then login using pickled cookies(see tests/pickledlogin.py)."""
if browserconfig.current_browser in ['chrome', 'firefox']:
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
desired_capabilities=browserconfig.capabilities
)
elif browserconfig.current_browser == 'edge':
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
capabilities=browserconfig.capabilities
)
tests.pickledlogin.pickled_login(cls.driver)
@classmethod
def tearDownClass(cls):
"""Closes the browser and shuts down the driver executable."""
cls.driver.quit()
def setUp(self):
"""Return to the home page, netflix.com/browse, the staging place for header tests."""
self.driver.get("https://netflix.com/browse")
def test_logout_from_header(self):
"""Logout from the header."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.logout()
# user is redirected to https://www.netflix.com/logout after loging out
self.assertIn('logout', self.driver.current_url)
# CLEANUP
# log back in using the pickled cookies
tests.pickledlogin.pickled_login(self.driver)
def test_navigate_home_from_my_list(self):
"""Using the giant Netflix logo in the top left, navigate to the home page /browse/
from the my-list page."""
self.driver.get("https://www.netflix.com/browse/my-list")
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_home()
self.assertEqual("https://www.netflix.com/browse", self.driver.current_url)
def test_navigate_to_manage_profile(self):
"""Using the header account dropdown, navigate to the manage profile page."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_manage_profile()
# user is redirected to https://www.netflix.com/profiles/manage
self.assertIn('profiles/manage', self.driver.current_url)
def test_search_for_shawshank(self):
"""Using the search field, search for 'shawshank' and assert that shawshank was found."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.search("shawshank")
self.assertIn("The Shawshank Redemption", self.driver.page_source)
# I kind of like this assert now that I think about it. Its testing both the search
# function and Netflix's search algorithm.
# NOTE- test will not fail if "The Shawkshank Redemeption" is removed. Netflix displays
# "similar to {title_name}" for titles its search algorithm recognizes
def test_click_top_notification(self):
"""Click the top notification and assert that the page has changed."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.click_top_notification()
# Assert that we navigated to a notification page or a title page(only 2 options)
self.assertTrue(
'title' in self.driver.current_url or 'notification' in self.driver.current_url
)
# DIDNT MAKE THE FIRST CUT OF TESTS
# I could have 5 more test here for each one of the header buttons.
# Those are about as elementary of tests as possible. Skipping them but TODO- OKAY TO HAVE
# def test_clear_all_notifications(self):
# """ this is easy to do, but impossible to perfect. Netflix doesnt allow any sort of
# 'mark notification as unread' so I have no way of generating notifications. Since I have
# no way of managing the state, THIS TEST CAN NEVER BE RAN MORE THAN ONCE A DAY. Thus I am
# forced to leave it out in order to avoid inconsistent test results"""
# header_page = pagemodels.headerpage.HeaderPage(self.driver)
# header_page.clear_notifications()
if __name__ == '__main__':
with open(r'xmltestresults\pretestresults.xml', 'wb') as output:
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False)
| 41.504132
| 98
| 0.696734
|
import unittest
import xmlrunner
import pagemodels.headerpage
import tests.pickledlogin
import browserconfig
class HeaderPageTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
if browserconfig.current_browser in ['chrome', 'firefox']:
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
desired_capabilities=browserconfig.capabilities
)
elif browserconfig.current_browser == 'edge':
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
capabilities=browserconfig.capabilities
)
tests.pickledlogin.pickled_login(cls.driver)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def setUp(self):
self.driver.get("https://netflix.com/browse")
def test_logout_from_header(self):
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.logout()
self.assertIn('logout', self.driver.current_url)
tests.pickledlogin.pickled_login(self.driver)
def test_navigate_home_from_my_list(self):
self.driver.get("https://www.netflix.com/browse/my-list")
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_home()
self.assertEqual("https://www.netflix.com/browse", self.driver.current_url)
def test_navigate_to_manage_profile(self):
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_manage_profile()
self.assertIn('profiles/manage', self.driver.current_url)
def test_search_for_shawshank(self):
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.search("shawshank")
self.assertIn("The Shawshank Redemption", self.driver.page_source)
# NOTE- test will not fail if "The Shawkshank Redemeption" is removed. Netflix displays
# "similar to {title_name}" for titles its search algorithm recognizes
def test_click_top_notification(self):
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.click_top_notification()
# Assert that we navigated to a notification page or a title page(only 2 options)
self.assertTrue(
'title' in self.driver.current_url or 'notification' in self.driver.current_url
)
# DIDNT MAKE THE FIRST CUT OF TESTS
# I could have 5 more test here for each one of the header buttons.
# Those are about as elementary of tests as possible. Skipping them but TODO- OKAY TO HAVE
# def test_clear_all_notifications(self):
# """ this is easy to do, but impossible to perfect. Netflix doesnt allow any sort of
# 'mark notification as unread' so I have no way of generating notifications. Since I have
# no way of managing the state, THIS TEST CAN NEVER BE RAN MORE THAN ONCE A DAY. Thus I am
# forced to leave it out in order to avoid inconsistent test results"""
# header_page = pagemodels.headerpage.HeaderPage(self.driver)
# header_page.clear_notifications()
if __name__ == '__main__':
with open(r'xmltestresults\pretestresults.xml', 'wb') as output:
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False)
| true
| true
|
7907ba669ecb1cd7087e5929d798def7f2883838
| 22,279
|
py
|
Python
|
setup.py
|
312day/airflow
|
3ecf919f01a1d96a4dc6b1c8a0a9494539ed5a65
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-09-15T02:32:55.000Z
|
2020-09-15T02:32:55.000Z
|
setup.py
|
312day/airflow
|
3ecf919f01a1d96a4dc6b1c8a0a9494539ed5a65
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
setup.py
|
312day/airflow
|
3ecf919f01a1d96a4dc6b1c8a0a9494539ed5a65
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Airflow project."""
import io
import logging
import os
import subprocess
import sys
import unittest
from importlib import util
from os.path import dirname
from textwrap import wrap
from typing import Dict, Iterable, List
from setuptools import Command, find_packages, setup
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
spec = util.spec_from_file_location("airflow.version", os.path.join('airflow', 'version.py')) # noqa
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
version = mod.version # type: ignore
PY3 = sys.version_info[0] == 3
PY38 = PY3 and sys.version_info[1] >= 8
my_dir = dirname(__file__)
try:
with io.open(os.path.join(my_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
def airflow_test_suite():
"""Test suite for Airflow tests"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
"""
Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
"""
description = "Tidy up the project root"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run command to remove temporary files and directories."""
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
"""
description = "Compile and build the frontend assets"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run a command to compile and build assets."""
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
"""
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
"""
description = "List available extras"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""List extras."""
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
"""
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version_, sha=sha)
else:
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])):
"""
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
"""
text = "{}".format(git_version(version))
with open(filename, 'w') as file:
file.write(text)
# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py
# If you change this mark you should also change ./scripts/ci/check_order_setup.py
# Start dependencies group
amazon = [
'boto3>=1.12.0,<2.0.0',
'watchtower~=0.7.3',
]
apache_beam = [
'apache-beam[gcp]',
]
async_packages = [
'eventlet>= 0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=3.0.1,<4',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault>=4.1.0',
'azure-kusto-data>=0.0.43,<0.1',
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
'azure-storage>=0.34.0, <0.37.0',
'azure-storage-blob<12.0',
]
cassandra = [
'cassandra-driver>=3.13.0,<3.21.0',
]
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
'tornado>=4.2.0, <6.0', # Dep of flower. Pin to a version that works on Py3.5.2
'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = [
'cloudant>=2.0',
]
dask = [
'cloudpickle>=1.4.1, <1.5.0',
'distributed>=2.11.1, <2.20'
]
databricks = [
'requests>=2.20.0, <3',
]
datadog = [
'datadog>=0.14.0',
]
doc = [
'sphinx>=2.1.2',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi==1.0.0',
'sphinx-copybutton',
'sphinx-jinja~=1.1',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
"sphinxcontrib-redoc>=1.6.0",
"sphinxcontrib-spelling==5.2.1"
]
docker = [
'docker~=3.0',
]
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7, <7.6.0',
'elasticsearch-dbapi==0.1.0',
'elasticsearch-dsl>=5.0.0',
]
exasol = [
'pyexasol>=0.5.1,<1.0.0',
]
facebook = [
'facebook-business>=6.0.2',
]
flask_oauth = [
'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib==1.1.0',
]
google = [
'PyOpenSSL',
'google-ads>=4.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0,<2.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-automl>=0.4.0,<2.0.0',
'google-cloud-bigquery-datatransfer>=0.4.0,<2.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-container>=0.1.1,<2.0.0',
'google-cloud-datacatalog>=0.5.0, <0.8', # TODO: we should migrate to 1.0 likely and add <2.0.0 then
'google-cloud-dataproc>=1.0.1,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=1.2.1,<2.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=1.14.0,<2.0.0',
'google-cloud-monitoring>=0.34.0,<2.0.0',
'google-cloud-pubsub>=1.0.0,<2.0.0',
'google-cloud-redis>=0.3.0,<2.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.16,<2.0.0',
'google-cloud-tasks>=1.2.1,<2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'grpcio-gcp>=0.2.2',
'pandas-gbq',
]
grpc = [
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac~=0.10',
]
hdfs = [
'snakebite-py3',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
'kubernetes>=3.0.0',
]
kylin = [
'kylinpy>=2.6'
]
ldap = [
'ldap3>=2.5.1',
]
mongo = [
'dnspython>=1.13.0,<2.0.0',
'pymongo>=3.6.0',
]
mssql = [
'pymssql~=2.1.1',
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.18',
'mysqlclient>=1.3.6,<1.4',
]
odbc = [
'pyodbc',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pypd>=1.1.0',
]
papermill = [
'papermill[all]>=1.2.1',
'nteract-scrapbook[all]>=0.3.1',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
'pinotdb==0.1.1',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = [
'presto-python-client>=0.7.0,<0.8'
]
qds = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp',
]
redis = [
'redis~=3.2',
]
salesforce = [
'simple-salesforce>=1.0.0',
]
samba = [
'pysmbclient>=0.1.3',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0,<7',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slackclient>=2.0.0,<3.0.0',
]
snowflake = [
'snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.1.4,<0.2',
]
statsd = [
'statsd>=3.3.0, <4.0',
]
tableau = [
'tableauserverclient~=0.12',
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm~=0.4',
]
yandexcloud = [
'yandexcloud>=0.22.0',
]
zendesk = [
'zdesk',
]
# End dependencies group
all_dbs = (cassandra + cloudant + druid + exasol + hdfs + hive + mongo + mssql + mysql +
pinot + postgres + presto + vertica)
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
devel = [
'beautifulsoup4~=4.7.1',
'blinker',
'bowler',
'click==6.7',
'contextdecorator;python_version<"3.4"',
'coverage',
'docutils',
'flake8>=3.6.0',
'flake8-colors',
'flaky',
'freezegun',
'github3.py',
'gitpython',
'ipdb',
'jira',
'mongomock',
'moto>=1.3.14,<2.0.0',
'parameterized',
'paramiko',
'pipdeptree',
'pre-commit',
'pylint==2.5.3',
'pysftp',
'pytest',
'pytest-cov',
'pytest-instafail',
'pytest-rerunfailures',
'pytest-timeouts',
'pytest-xdist',
'pywinrm',
'qds-sdk>=1.9.6',
'requests_mock',
'setuptools',
'wheel',
'yamllint',
]
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from the above list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
if PY3:
devel += ['mypy==0.770']
else:
devel += ['unittest2']
devel_minreq = cgroups + devel + doc + kubernetes + mysql + password
devel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs
PROVIDERS_REQUIREMENTS: Dict[str, Iterable[str]] = {
"amazon": amazon,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.livy": [],
"apache.pig": [],
"apache.pinot": pinot,
"apache.spark": spark,
"apache.sqoop": [],
"celery": celery,
"cloudant": cloudant,
"cncf.kubernetes": kubernetes,
"databricks": databricks,
"datadog": datadog,
"dingding": [],
"discord": [],
"docker": docker,
"elasticsearch": [],
"exasol": exasol,
"facebook": facebook,
"ftp": [],
"google": google,
"grpc": grpc,
"hashicorp": hashicorp,
"http": [],
"imap": [],
"jdbc": jdbc,
"jenkins": jenkins,
"jira": jira,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
"mongo": mongo,
"mysql": mysql,
"odbc": odbc,
"openfaas": [],
"opsgenie": [],
"oracle": oracle,
"pagerduty": pagerduty,
"papermill": papermill,
"plexus": plexus,
"postgres": postgres,
"presto": presto,
"qubole": qds,
"redis": redis,
"salesforce": salesforce,
"samba": samba,
"segment": segment,
"sftp": ssh,
"singularity": singularity,
"slack": slack,
"snowflake": snowflake,
"sqlite": [],
"ssh": ssh,
"vertica": vertica,
"yandex": yandexcloud,
"zendesk": zendesk,
}
EXTRAS_REQUIREMENTS: Dict[str, Iterable[str]] = {
'all_dbs': all_dbs,
'amazon': amazon,
'apache.atlas': atlas,
'apache.beam': apache_beam,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.pinot": pinot,
"apache.webhdfs": webhdfs,
'async': async_packages,
'atlas': atlas, # TODO: remove this in Airflow 2.1
'aws': amazon, # TODO: remove this in Airflow 2.1
'azure': azure, # TODO: remove this in Airflow 2.1
'cassandra': cassandra, # TODO: remove this in Airflow 2.1
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'cncf.kubernetes': kubernetes,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid, # TODO: remove this in Airflow 2.1
'elasticsearch': elasticsearch,
'exasol': exasol,
'facebook': facebook,
'gcp': google, # TODO: remove this in Airflow 2.1
'gcp_api': google, # TODO: remove this in Airflow 2.1
'github_enterprise': flask_oauth,
'google': google,
'google_auth': flask_oauth,
'grpc': grpc,
'hashicorp': hashicorp,
'hdfs': hdfs, # TODO: remove this in Airflow 2.1
'hive': hive, # TODO: remove this in Airflow 2.1
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes, # TODO: remove this in Airflow 2.1
'ldap': ldap,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
'mongo': mongo,
'mssql': mssql, # TODO: remove this in Airflow 2.1
'mysql': mysql,
'odbc': odbc,
'oracle': oracle,
'pagerduty': pagerduty,
'papermill': papermill,
'password': password,
'pinot': pinot, # TODO: remove this in Airflow 2.1
'plexus': plexus,
'postgres': postgres,
'presto': presto,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
'salesforce': salesforce,
'samba': samba,
'segment': segment,
'sendgrid': sendgrid,
'sentry': sentry,
'singularity': singularity,
'slack': slack,
'snowflake': snowflake,
'spark': spark,
'ssh': ssh,
'statsd': statsd,
'tableau': tableau,
'vertica': vertica,
'virtualenv': virtualenv,
'webhdfs': webhdfs, # TODO: remove this in Airflow 2.1
'winrm': winrm, # TODO: remove this in Airflow 2.1
'yandexcloud': yandexcloud,
}
# Make devel_all contain all providers + extras + unique
devel_all = list(set(devel +
[req for req_list in EXTRAS_REQUIREMENTS.values() for req in req_list] +
[req for req_list in PROVIDERS_REQUIREMENTS.values() for req in req_list]))
PACKAGES_EXCLUDED_FOR_ALL = [
]
if PY3:
PACKAGES_EXCLUDED_FOR_ALL.extend([
'snakebite',
])
if PY38:
PACKAGES_EXCLUDED_FOR_ALL.extend([
'pymssql',
])
# Those packages are excluded because they break tests (downgrading mock) and they are
# not needed to run our test suite.
PACKAGES_EXCLUDED_FOR_CI = [
'apache-beam',
]
def is_package_excluded(package: str, exclusion_list: List[str]):
"""
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
"""
return any([package.startswith(excluded_package) for excluded_package in exclusion_list])
devel_all = [package for package in devel_all if not is_package_excluded(
package=package,
exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)
]
devel_ci = [package for package in devel_all if not is_package_excluded(
package=package,
exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL)
]
EXTRAS_REQUIREMENTS.update(
{
'all': devel_all,
'devel_ci': devel_ci,
}
)
#####################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
#####################################################################################################
INSTALL_REQUIREMENTS = [
'alembic>=1.2, <2.0',
'argcomplete~=1.10',
'attrs~=19.3',
'cached_property~=1.5',
'cattrs~=1.0',
'colorlog==4.0.2',
'connexion[swagger-ui,flask]>=2.6.0,<3',
'croniter>=0.3.17, <0.4',
'cryptography>=0.9.3',
'dill>=0.2.2, <0.4',
'flask>=1.1.0, <2.0',
'flask-appbuilder>2.3.4,~=3.0',
'flask-caching>=1.3.3, <2.0.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs>=1.0.0, <2.0.0',
'graphviz>=0.12',
'gunicorn>=19.5.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.10.1, <2.12.0',
'json-merge-patch==0.2',
'jsonschema~=3.0',
'lazy_object_proxy~=1.3',
'lockfile>=0.12.2',
'markdown>=2.5.2, <3.0',
'markupsafe>=1.1.1, <2.0',
'marshmallow-oneofschema>=2.0.1',
'pandas>=0.17.1, <2.0',
'pendulum~=2.0',
'pep562~=1.0;python_version<"3.7"',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1',
'python-dateutil>=2.3, <3',
'python-nvd3~=0.15.0',
'python-slugify>=3.0.0,<5.0',
'requests>=2.20.0, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy~=1.3',
'sqlalchemy_jsonfield~=0.9',
'tabulate>=0.7.5, <0.9',
'tenacity>=4.12.0, <5.2',
'termcolor>=1.1.0',
'thrift>=0.9.2',
'typing;python_version<"3.6"',
'typing-extensions>=3.7.4;python_version<"3.8"',
'tzlocal>=1.4,<2.0.0',
'unicodecsv>=0.14.1',
'werkzeug<1.0.0',
]
def do_setup():
"""Perform the Airflow package setup."""
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version,
packages=find_packages(include=['airflow', 'airflow.*']),
package_data={
'airflow': ['py.typed'],
'': ['airflow/alembic.ini', "airflow/git_version", "*.ipynb",
"airflow/providers/cncf/kubernetes/example_dags/*.yaml"],
'airflow.api_connexion.openapi': ['*.yaml'],
'airflow.serialization': ["*.json"],
},
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": [
"airflow = airflow.__main__:main",
],
},
install_requires=INSTALL_REQUIREMENTS,
setup_requires=[
'bowler',
'docutils',
'gitpython',
'setuptools',
'wheel',
],
extras_require=EXTRAS_REQUIREMENTS,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.apache.org',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/airflow/' + version),
cmdclass={
'extra_clean': CleanCommand,
'compile_assets': CompileAssets,
'list_extras': ListExtras,
},
test_suite='setup.airflow_test_suite',
python_requires='~=3.6',
)
if __name__ == "__main__":
do_setup()
| 27.471023
| 108
| 0.586786
|
import io
import logging
import os
import subprocess
import sys
import unittest
from importlib import util
from os.path import dirname
from textwrap import wrap
from typing import Dict, Iterable, List
from setuptools import Command, find_packages, setup
logger = logging.getLogger(__name__)
spec = util.spec_from_file_location("airflow.version", os.path.join('airflow', 'version.py'))
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
version = mod.version
PY3 = sys.version_info[0] == 3
PY38 = PY3 and sys.version_info[1] >= 8
my_dir = dirname(__file__)
try:
with io.open(os.path.join(my_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
def airflow_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
description = "Tidy up the project root"
user_options = []
def initialize_options(self):
def finalize_options(self):
def run(self):
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
description = "Compile and build the frontend assets"
user_options = []
def initialize_options(self):
def finalize_options(self):
def run(self):
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
description = "List available extras"
user_options = []
def initialize_options(self):
def finalize_options(self):
def run(self):
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
return '.release:{version}+{sha}'.format(version=version_, sha=sha)
else:
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])):
text = "{}".format(git_version(version))
with open(filename, 'w') as file:
file.write(text)
amazon = [
'boto3>=1.12.0,<2.0.0',
'watchtower~=0.7.3',
]
apache_beam = [
'apache-beam[gcp]',
]
async_packages = [
'eventlet>= 0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=3.0.1,<4',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault>=4.1.0',
'azure-kusto-data>=0.0.43,<0.1',
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
'azure-storage>=0.34.0, <0.37.0',
'azure-storage-blob<12.0',
]
cassandra = [
'cassandra-driver>=3.13.0,<3.21.0',
]
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
'tornado>=4.2.0, <6.0',
'vine~=1.3',
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = [
'cloudant>=2.0',
]
dask = [
'cloudpickle>=1.4.1, <1.5.0',
'distributed>=2.11.1, <2.20'
]
databricks = [
'requests>=2.20.0, <3',
]
datadog = [
'datadog>=0.14.0',
]
doc = [
'sphinx>=2.1.2',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi==1.0.0',
'sphinx-copybutton',
'sphinx-jinja~=1.1',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
"sphinxcontrib-redoc>=1.6.0",
"sphinxcontrib-spelling==5.2.1"
]
docker = [
'docker~=3.0',
]
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7, <7.6.0',
'elasticsearch-dbapi==0.1.0',
'elasticsearch-dsl>=5.0.0',
]
exasol = [
'pyexasol>=0.5.1,<1.0.0',
]
facebook = [
'facebook-business>=6.0.2',
]
flask_oauth = [
'Flask-OAuthlib>=0.9.1,<0.9.6',
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib==1.1.0',
]
google = [
'PyOpenSSL',
'google-ads>=4.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0,<2.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-automl>=0.4.0,<2.0.0',
'google-cloud-bigquery-datatransfer>=0.4.0,<2.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-container>=0.1.1,<2.0.0',
'google-cloud-datacatalog>=0.5.0, <0.8',
'google-cloud-dataproc>=1.0.1,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=1.2.1,<2.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=1.14.0,<2.0.0',
'google-cloud-monitoring>=0.34.0,<2.0.0',
'google-cloud-pubsub>=1.0.0,<2.0.0',
'google-cloud-redis>=0.3.0,<2.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.16,<2.0.0',
'google-cloud-tasks>=1.2.1,<2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'grpcio-gcp>=0.2.2',
'pandas-gbq',
]
grpc = [
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac~=0.10',
]
hdfs = [
'snakebite-py3',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
'kubernetes>=3.0.0',
]
kylin = [
'kylinpy>=2.6'
]
ldap = [
'ldap3>=2.5.1',
]
mongo = [
'dnspython>=1.13.0,<2.0.0',
'pymongo>=3.6.0',
]
mssql = [
'pymssql~=2.1.1',
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.18',
'mysqlclient>=1.3.6,<1.4',
]
odbc = [
'pyodbc',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pypd>=1.1.0',
]
papermill = [
'papermill[all]>=1.2.1',
'nteract-scrapbook[all]>=0.3.1',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
'pinotdb==0.1.1',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = [
'presto-python-client>=0.7.0,<0.8'
]
qds = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp',
]
redis = [
'redis~=3.2',
]
salesforce = [
'simple-salesforce>=1.0.0',
]
samba = [
'pysmbclient>=0.1.3',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0,<7',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slackclient>=2.0.0,<3.0.0',
]
snowflake = [
'snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.1.4,<0.2',
]
statsd = [
'statsd>=3.3.0, <4.0',
]
tableau = [
'tableauserverclient~=0.12',
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm~=0.4',
]
yandexcloud = [
'yandexcloud>=0.22.0',
]
zendesk = [
'zdesk',
]
all_dbs = (cassandra + cloudant + druid + exasol + hdfs + hive + mongo + mssql + mysql +
pinot + postgres + presto + vertica)
| true
| true
|
7907bbde602b8418e4c329c85b5cbbce7741029a
| 14,197
|
py
|
Python
|
lib/pb_io.py
|
NingAnMe/snow_cover_of_remote_sensing
|
aabd0f4754eb5200203fc8a90f06b603dcd260a8
|
[
"Apache-2.0"
] | 1
|
2020-08-19T08:34:53.000Z
|
2020-08-19T08:34:53.000Z
|
lib/pb_io.py
|
NingAnMe/snow_cover_of_remote_sensing
|
aabd0f4754eb5200203fc8a90f06b603dcd260a8
|
[
"Apache-2.0"
] | null | null | null |
lib/pb_io.py
|
NingAnMe/snow_cover_of_remote_sensing
|
aabd0f4754eb5200203fc8a90f06b603dcd260a8
|
[
"Apache-2.0"
] | 1
|
2020-07-01T16:32:15.000Z
|
2020-07-01T16:32:15.000Z
|
# coding: utf-8
import errno
import os
import random
import re
from contextlib import contextmanager
import h5py
import numpy as np
import time
import yaml
from datetime import datetime
def write_yaml_file(yaml_dict, file_yaml):
path_yaml = os.path.dirname(file_yaml)
if not os.path.isdir(path_yaml):
os.makedirs(path_yaml)
with open(file_yaml, 'w') as stream:
yaml.dump(yaml_dict, stream, default_flow_style=False)
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def find_file(path, reg):
"""
path: 要遍历的目录
reg: 符合条件的文件
"""
FileLst = []
try:
lst = os.walk(path)
for root, dirs, files in lst:
for name in files:
try:
m = re.match(reg, name)
except Exception as e:
continue
if m:
FileLst.append(os.path.join(root, name))
except Exception as e:
print(str(e))
return sorted(FileLst)
def path_replace_ymd(path, ymd):
"""
path:替换路径中的日期 ,path中%YYYY%MM%DD%JJJ 等关键字会被ymd日期实例
ymd: yyyymmdd (20180101)
"""
# 转成datetime类型
ymd = datetime.strptime(ymd, '%Y%m%d')
yy = ymd.strftime('%Y')
mm = ymd.strftime('%m')
dd = ymd.strftime('%d')
jj = ymd.strftime('%j')
path = path.replace('%YYYY', yy)
path = path.replace('%MM', mm)
path = path.replace('%DD', dd)
path = path.replace('%JJJ', jj)
return path
def is_none(*args):
"""
判断传入的变量中是否有 None
:param args:
:return:
"""
has_none = False
for arg in args:
if arg is None:
has_none = True
return has_none
def copy_attrs_h5py(pre_object, out_object):
"""
复制 file、dataset 或者 group 的属性
:param pre_object: 被复制属性的 dataset 或者 group
:param out_object: 复制属性的 dataset 或者 group
:return:
"""
for akey in list(pre_object.attrs.keys()):
out_object.attrs[akey] = pre_object.attrs[akey]
def read_dataset_hdf5(file_path, set_name):
"""
读取 hdf5 文件,返回一个 numpy 多维数组
:param file_path: (unicode)文件路径
:param set_name: (str or list)表的名字
:return: 如果传入的表名字是一个字符串,返回 numpy.ndarray
如果传入的表名字是一个列表,返回一个字典,key 是表名字,
value 是 numpy.ndarry
"""
if isinstance(set_name, str):
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
data = file_h5py.get(set_name)[:]
dataset = np.array(data)
file_h5py.close()
return dataset
else:
raise ValueError('value error: file_path')
elif isinstance(set_name, list):
datasets = {}
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
for name in set_name:
data = file_h5py.get(name)[:]
dataset = np.array(data)
datasets[name] = dataset
file_h5py.close()
return datasets
else:
raise ValueError('value error: file_path')
else:
raise ValueError('value error: set_name')
def attrs2dict(attrs):
"""
将一个 HDF5 attr 类转为 Dict 类
:return:
"""
d = {}
for k, v in list(attrs.items()):
d[k] = v
return d
@contextmanager
def progress_lock(max_wait_time=5):
try:
sleep_time = 0
lock = "progress.lock"
while True:
if os.path.isfile(lock):
if sleep_time > max_wait_time:
try:
os.remove(lock)
break
except:
continue
else:
random_number = random.random() * 0.1
sleep_time += random_number
time.sleep(random_number)
else:
break
with open(lock, "w"):
pass
yield
finally:
try:
os.remove(lock)
except:
pass
def write_txt(in_file, head, bodys, keylens=8):
"""
description: wangpeng add 20180615 (写入或更新txt)
:in_file 写入文件位置
:head 文件头信息
:bodys 文件体
:keylens 更新文件使用的第一列关键字长度
"""
allLines = []
DICT_D = {}
FilePath = os.path.dirname(in_file)
if not os.path.exists(FilePath):
os.makedirs(FilePath)
if os.path.isfile(in_file) and os.path.getsize(in_file) != 0:
fp = open(in_file, 'r')
fp.readline()
Lines = fp.readlines()
fp.close()
# 使用字典特性,保证时间唯一,读取数据
for Line in Lines:
DICT_D[Line[:keylens]] = Line[keylens:]
# 添加或更改数据
for Line in bodys:
DICT_D[Line[:keylens]] = Line[keylens:]
# 按照时间排序
newLines = sorted(
iter(DICT_D.items()), key=lambda d: d[0], reverse=False)
for i in range(len(newLines)):
allLines.append(str(newLines[i][0]) + str(newLines[i][1]))
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(allLines)
fp.close()
else:
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(bodys)
fp.close()
def str_format(string, values):
"""
格式化字符串
:param string:(str) "DCC: %sat_sensor_Projection_%ymd(分辨率 %resolution 度)"
:param values:(dict) {"sat_sensor": sat_sensor, "resolution": str(resolution), "ymd": ymd}
:return: DCC: FY3D+MERSI_Projection_201712(分辨率 1 度)
"""
if not isinstance(string, str):
return
for k, v in values.items():
string = string.replace("%" + str(k), str(v))
return string
def get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None):
"""
:param dir_path: 文件夹
:param time_start: 开始时间
:param time_end: 结束时间
:param ext: 后缀名, '.hdf5'
:param pattern_ymd: 匹配时间的模式, 可以是 r".*(\d{8})_(\d{4})_"
:return: list
"""
files_found = []
if pattern_ymd is not None:
pattern = pattern_ymd
else:
pattern = r".*(\d{8})"
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if ext is not None:
if '.' not in ext:
ext = '.' + ext
if os.path.splitext(file_name)[1].lower() != ext.lower():
continue
re_result = re.match(pattern, file_name)
if re_result is not None:
time_file = ''.join(re_result.groups())
else:
continue
if int(time_start) <= int(time_file) <= int(time_end):
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
class ReadOrbitCrossFile(object):
"""
test
"""
@staticmethod
def read_cross_file(in_file, file_type):
"""
:param in_file:
:param file_type:
:return:
"""
data = {
'ymdhms1': None,
'ymdhms2': None,
'lon1': None,
'lat1': None,
'lon2': None,
'lat2': None,
'fix_name': None # 只有固定点才有
}
if not os.path.isfile(in_file):
print('***WARNING***File is not exist: {}'.format(in_file))
return data
# with open(in_file, 'r') as fp:
# lines_10 = fp.readlines()[0: 10]
#
# count = 0
# for line in lines_10:
# print count, line.split()
# count += 1
if file_type == 'leo_area':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif file_type == 'leo_leo':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'),
'formats': ('S8', 'S8', 'f4', 'f4', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d5']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d3']
data['lon1'] = data_raw['d4']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif file_type == 'leo_fix':
# 数据
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8',),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d2']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d6']
data['lon1'] = data_raw['d7']
data['lat2'] = data_raw['d4']
data['lon2'] = data_raw['d5']
data['fix_name'] = data_raw['d3']
elif file_type == 'geo_leo':
# 信息
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
else:
raise KeyError('Cant handle this file type: {}'.format(file_type))
return data
def ymdhms2date(ymd, hms):
"""
ymd = 20180101
hms = 04:04:04
"""
ymdhms = ymd + hms
return datetime.strptime(ymdhms, '%Y%m%d%H:%M:%S')
def CombineTimeList(TimeList):
# 将时间段list中有重叠的时间段进行融合为新的时间段
newTimeList = []
# 默认排序,升序
TimeList.sort()
# 标记有时间融合的时间
stime = TimeList[0][0]
etime = TimeList[0][1]
for i in range(1, len(TimeList), 1):
if TimeList[i][1] <= etime:
continue
elif TimeList[i][0] <= etime <= TimeList[i][1]:
etime = TimeList[i][1]
elif TimeList[i][0] > etime:
newTimeList.append([stime, etime])
stime = TimeList[i][0]
etime = TimeList[i][1]
newTimeList.append([stime, etime])
return newTimeList
def get_files_by_date(dir_path, time_start=None, time_end=None, ext=None, pattern=None):
"""
:param dir_path: 文件夹
:param time_start: 开始时间
:param time_end: 结束时间
:param ext: 后缀名, '.hdf5'
:param pattern: 匹配时间的模式
:return: list
"""
files_found = []
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if ext is not None:
if '.' not in ext:
ext = '.' + ext
if os.path.splitext(file_name)[1].lower() != ext.lower():
continue
if pattern is not None:
re_result = re.match(pattern, file_name)
if re_result is None:
continue
if time_start is not None:
time_file = ''.join(re_result.groups())
if not int(time_start) <= int(time_file) <= int(time_end):
continue
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
if __name__ == '__main__':
pass
path_out_map = str_format('/abc/%YYYY%MM%DD', {
'YYYY': '20180101',
'MM': '01',
'DD': '01',
})
print(path_out_map)
# path1 = "E:/projects/ocrs/cfg/global.cfg"
# path2 = "E:/projects/ocrs/cfg/FY3B+MERSI.yaml"
# c = Config(path1)
# c = Config(path2)
# print c.error
# l = c.__dict__.keys()
# l = sorted(l)
# for k in l:
# print k, ":", c.__dict__[k]
# print k
# ################# test ReadOrbitCrossFile ################
# LEO_AREA
# leo_area_name = r'C:\Users\wangpeng\Desktop\tmp\cross\AQUA_australia_LEO_AREA_20171221.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(leo_area_name, 'leo_area')
# LEO_LEO
# leo_leo_name = r'C:\Users\wangpeng\Desktop\tmp\cross\FENGYUN-3D_NPP_LEO_LEO_20180901.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(leo_leo_name, 'leo_leo')
# LEO_FIX
# leo_fix_name = r'C:\Users\wangpeng\Desktop\tmp\cross\AQUA_FIX_LEO_FIX_20181101.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(leo_fix_name, 'leo_fix')
# GEO_LEO
# geo_leo_name = r'C:\Users\wangpeng\Desktop\tmp\cross\FENGYUN-2F_METOP-A_GEO_LEO20181101.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(geo_leo_name, 'geo_leo')
# keys = read_data.keys()
# keys.sort()
# for data_name in keys:
# print data_name, type(read_data[data_name]), read_data[data_name]
| 29.332645
| 98
| 0.520885
|
import errno
import os
import random
import re
from contextlib import contextmanager
import h5py
import numpy as np
import time
import yaml
from datetime import datetime
def write_yaml_file(yaml_dict, file_yaml):
path_yaml = os.path.dirname(file_yaml)
if not os.path.isdir(path_yaml):
os.makedirs(path_yaml)
with open(file_yaml, 'w') as stream:
yaml.dump(yaml_dict, stream, default_flow_style=False)
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def find_file(path, reg):
FileLst = []
try:
lst = os.walk(path)
for root, dirs, files in lst:
for name in files:
try:
m = re.match(reg, name)
except Exception as e:
continue
if m:
FileLst.append(os.path.join(root, name))
except Exception as e:
print(str(e))
return sorted(FileLst)
def path_replace_ymd(path, ymd):
ymd = datetime.strptime(ymd, '%Y%m%d')
yy = ymd.strftime('%Y')
mm = ymd.strftime('%m')
dd = ymd.strftime('%d')
jj = ymd.strftime('%j')
path = path.replace('%YYYY', yy)
path = path.replace('%MM', mm)
path = path.replace('%DD', dd)
path = path.replace('%JJJ', jj)
return path
def is_none(*args):
has_none = False
for arg in args:
if arg is None:
has_none = True
return has_none
def copy_attrs_h5py(pre_object, out_object):
for akey in list(pre_object.attrs.keys()):
out_object.attrs[akey] = pre_object.attrs[akey]
def read_dataset_hdf5(file_path, set_name):
if isinstance(set_name, str):
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
data = file_h5py.get(set_name)[:]
dataset = np.array(data)
file_h5py.close()
return dataset
else:
raise ValueError('value error: file_path')
elif isinstance(set_name, list):
datasets = {}
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
for name in set_name:
data = file_h5py.get(name)[:]
dataset = np.array(data)
datasets[name] = dataset
file_h5py.close()
return datasets
else:
raise ValueError('value error: file_path')
else:
raise ValueError('value error: set_name')
def attrs2dict(attrs):
d = {}
for k, v in list(attrs.items()):
d[k] = v
return d
@contextmanager
def progress_lock(max_wait_time=5):
try:
sleep_time = 0
lock = "progress.lock"
while True:
if os.path.isfile(lock):
if sleep_time > max_wait_time:
try:
os.remove(lock)
break
except:
continue
else:
random_number = random.random() * 0.1
sleep_time += random_number
time.sleep(random_number)
else:
break
with open(lock, "w"):
pass
yield
finally:
try:
os.remove(lock)
except:
pass
def write_txt(in_file, head, bodys, keylens=8):
allLines = []
DICT_D = {}
FilePath = os.path.dirname(in_file)
if not os.path.exists(FilePath):
os.makedirs(FilePath)
if os.path.isfile(in_file) and os.path.getsize(in_file) != 0:
fp = open(in_file, 'r')
fp.readline()
Lines = fp.readlines()
fp.close()
for Line in Lines:
DICT_D[Line[:keylens]] = Line[keylens:]
for Line in bodys:
DICT_D[Line[:keylens]] = Line[keylens:]
newLines = sorted(
iter(DICT_D.items()), key=lambda d: d[0], reverse=False)
for i in range(len(newLines)):
allLines.append(str(newLines[i][0]) + str(newLines[i][1]))
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(allLines)
fp.close()
else:
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(bodys)
fp.close()
def str_format(string, values):
if not isinstance(string, str):
return
for k, v in values.items():
string = string.replace("%" + str(k), str(v))
return string
def get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None):
files_found = []
if pattern_ymd is not None:
pattern = pattern_ymd
else:
pattern = r".*(\d{8})"
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if ext is not None:
if '.' not in ext:
ext = '.' + ext
if os.path.splitext(file_name)[1].lower() != ext.lower():
continue
re_result = re.match(pattern, file_name)
if re_result is not None:
time_file = ''.join(re_result.groups())
else:
continue
if int(time_start) <= int(time_file) <= int(time_end):
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
class ReadOrbitCrossFile(object):
@staticmethod
def read_cross_file(in_file, file_type):
data = {
'ymdhms1': None,
'ymdhms2': None,
'lon1': None,
'lat1': None,
'lon2': None,
'lat2': None,
'fix_name': None
}
if not os.path.isfile(in_file):
print('***WARNING***File is not exist: {}'.format(in_file))
return data
if file_type == 'leo_area':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif file_type == 'leo_leo':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'),
'formats': ('S8', 'S8', 'f4', 'f4', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d5']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d3']
data['lon1'] = data_raw['d4']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif file_type == 'leo_fix':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8',),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d2']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d6']
data['lon1'] = data_raw['d7']
data['lat2'] = data_raw['d4']
data['lon2'] = data_raw['d5']
data['fix_name'] = data_raw['d3']
elif file_type == 'geo_leo':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
else:
raise KeyError('Cant handle this file type: {}'.format(file_type))
return data
def ymdhms2date(ymd, hms):
ymdhms = ymd + hms
return datetime.strptime(ymdhms, '%Y%m%d%H:%M:%S')
def CombineTimeList(TimeList):
newTimeList = []
TimeList.sort()
stime = TimeList[0][0]
etime = TimeList[0][1]
for i in range(1, len(TimeList), 1):
if TimeList[i][1] <= etime:
continue
elif TimeList[i][0] <= etime <= TimeList[i][1]:
etime = TimeList[i][1]
elif TimeList[i][0] > etime:
newTimeList.append([stime, etime])
stime = TimeList[i][0]
etime = TimeList[i][1]
newTimeList.append([stime, etime])
return newTimeList
def get_files_by_date(dir_path, time_start=None, time_end=None, ext=None, pattern=None):
files_found = []
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if ext is not None:
if '.' not in ext:
ext = '.' + ext
if os.path.splitext(file_name)[1].lower() != ext.lower():
continue
if pattern is not None:
re_result = re.match(pattern, file_name)
if re_result is None:
continue
if time_start is not None:
time_file = ''.join(re_result.groups())
if not int(time_start) <= int(time_file) <= int(time_end):
continue
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
if __name__ == '__main__':
pass
path_out_map = str_format('/abc/%YYYY%MM%DD', {
'YYYY': '20180101',
'MM': '01',
'DD': '01',
})
print(path_out_map)
| true
| true
|
7907bcae44e61c9e2873b378dd845bff6c95d2e0
| 5,898
|
py
|
Python
|
src/cryptography/x509/ocsp.py
|
g-goessel/cryptography
|
a07de31096767abd3b4529ae29c0487c8f21310b
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/cryptography/x509/ocsp.py
|
g-goessel/cryptography
|
a07de31096767abd3b4529ae29c0487c8f21310b
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/cryptography/x509/ocsp.py
|
g-goessel/cryptography
|
a07de31096767abd3b4529ae29c0487c8f21310b
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
from enum import Enum
import six
from cryptography.hazmat.primitives import hashes
from cryptography.x509 import Certificate
_OIDS_TO_HASH = {
"1.3.14.3.2.26": hashes.SHA1(),
"2.16.840.1.101.3.4.2.4": hashes.SHA224(),
"2.16.840.1.101.3.4.2.1": hashes.SHA256(),
"2.16.840.1.101.3.4.2.2": hashes.SHA384(),
"2.16.840.1.101.3.4.2.3": hashes.SHA512(),
}
class OCSPResponseStatus(Enum):
SUCCESSFUL = 0
MALFORMED_REQUEST = 1
INTERNAL_ERROR = 2
TRY_LATER = 3
SIG_REQUIRED = 5
UNAUTHORIZED = 6
_RESPONSE_STATUS_TO_ENUM = dict((x.value, x) for x in OCSPResponseStatus)
class OCSPCertStatus(Enum):
GOOD = 0
REVOKED = 1
UNKNOWN = 2
_CERT_STATUS_TO_ENUM = dict((x.value, x) for x in OCSPCertStatus)
def load_der_ocsp_request(data):
from cryptography.hazmat.backends.openssl.backend import backend
return backend.load_der_ocsp_request(data)
def load_der_ocsp_response(data):
from cryptography.hazmat.backends.openssl.backend import backend
return backend.load_der_ocsp_response(data)
class OCSPRequestBuilder(object):
def __init__(self, request=None):
self._request = request
def add_certificate(self, cert, issuer, algorithm):
if self._request is not None:
raise ValueError("Only one certificate can be added to a request")
allowed_hashes = (
hashes.SHA1, hashes.SHA224, hashes.SHA256,
hashes.SHA384, hashes.SHA512
)
if not isinstance(algorithm, allowed_hashes):
raise ValueError(
"Algorithm must be SHA1, SHA224, SHA256, SHA384, or SHA512"
)
if (
not isinstance(cert, Certificate) or
not isinstance(issuer, Certificate)
):
raise TypeError("cert and issuer must be a Certificate")
return OCSPRequestBuilder((cert, issuer, algorithm))
def build(self):
from cryptography.hazmat.backends.openssl.backend import backend
if self._request is None:
raise ValueError("You must add a certificate before building")
return backend.create_ocsp_request(self)
@six.add_metaclass(abc.ABCMeta)
class OCSPRequest(object):
@abc.abstractproperty
def issuer_key_hash(self):
"""
The hash of the issuer public key
"""
@abc.abstractproperty
def issuer_name_hash(self):
"""
The hash of the issuer name
"""
@abc.abstractproperty
def hash_algorithm(self):
"""
The hash algorithm used in the issuer name and key hashes
"""
@abc.abstractproperty
def serial_number(self):
"""
The serial number of the cert whose status is being checked
"""
@abc.abstractmethod
def public_bytes(self, encoding):
"""
Serializes the request to DER
"""
@abc.abstractproperty
def extensions(self):
"""
The list of request extensions. Not single request extensions.
"""
@six.add_metaclass(abc.ABCMeta)
class OCSPResponse(object):
@abc.abstractproperty
def response_status(self):
"""
The status of the response. This is a value from the OCSPResponseStatus
enumeration
"""
@abc.abstractproperty
def signature_algorithm_oid(self):
"""
The ObjectIdentifier of the signature algorithm
"""
@abc.abstractproperty
def signature(self):
"""
The signature bytes
"""
@abc.abstractproperty
def tbs_response_bytes(self):
"""
The tbsResponseData bytes
"""
@abc.abstractproperty
def certificates(self):
"""
A list of certificates used to help build a chain to verify the OCSP
response. This situation occurs when the OCSP responder uses a delegate
certificate.
"""
@abc.abstractproperty
def responder_key_hash(self):
"""
The responder's key hash or None
"""
@abc.abstractproperty
def responder_name(self):
"""
The responder's Name or None
"""
@abc.abstractproperty
def produced_at(self):
"""
The time the response was produced
"""
@abc.abstractproperty
def certificate_status(self):
"""
The status of the certificate (an element from the OCSPCertStatus enum)
"""
@abc.abstractproperty
def revocation_time(self):
"""
The date of when the certificate was revoked or None if not
revoked.
"""
@abc.abstractproperty
def revocation_reason(self):
"""
The reason the certificate was revoked or None if not specified or
not revoked.
"""
@abc.abstractproperty
def this_update(self):
"""
The most recent time at which the status being indicated is known by
the responder to have been correct
"""
@abc.abstractproperty
def next_update(self):
"""
The time when newer information will be available
"""
@abc.abstractproperty
def issuer_key_hash(self):
"""
The hash of the issuer public key
"""
@abc.abstractproperty
def issuer_name_hash(self):
"""
The hash of the issuer name
"""
@abc.abstractproperty
def hash_algorithm(self):
"""
The hash algorithm used in the issuer name and key hashes
"""
@abc.abstractproperty
def serial_number(self):
"""
The serial number of the cert whose status is being checked
"""
| 25.097872
| 79
| 0.62784
|
from __future__ import absolute_import, division, print_function
import abc
from enum import Enum
import six
from cryptography.hazmat.primitives import hashes
from cryptography.x509 import Certificate
_OIDS_TO_HASH = {
"1.3.14.3.2.26": hashes.SHA1(),
"2.16.840.1.101.3.4.2.4": hashes.SHA224(),
"2.16.840.1.101.3.4.2.1": hashes.SHA256(),
"2.16.840.1.101.3.4.2.2": hashes.SHA384(),
"2.16.840.1.101.3.4.2.3": hashes.SHA512(),
}
class OCSPResponseStatus(Enum):
SUCCESSFUL = 0
MALFORMED_REQUEST = 1
INTERNAL_ERROR = 2
TRY_LATER = 3
SIG_REQUIRED = 5
UNAUTHORIZED = 6
_RESPONSE_STATUS_TO_ENUM = dict((x.value, x) for x in OCSPResponseStatus)
class OCSPCertStatus(Enum):
GOOD = 0
REVOKED = 1
UNKNOWN = 2
_CERT_STATUS_TO_ENUM = dict((x.value, x) for x in OCSPCertStatus)
def load_der_ocsp_request(data):
from cryptography.hazmat.backends.openssl.backend import backend
return backend.load_der_ocsp_request(data)
def load_der_ocsp_response(data):
from cryptography.hazmat.backends.openssl.backend import backend
return backend.load_der_ocsp_response(data)
class OCSPRequestBuilder(object):
def __init__(self, request=None):
self._request = request
def add_certificate(self, cert, issuer, algorithm):
if self._request is not None:
raise ValueError("Only one certificate can be added to a request")
allowed_hashes = (
hashes.SHA1, hashes.SHA224, hashes.SHA256,
hashes.SHA384, hashes.SHA512
)
if not isinstance(algorithm, allowed_hashes):
raise ValueError(
"Algorithm must be SHA1, SHA224, SHA256, SHA384, or SHA512"
)
if (
not isinstance(cert, Certificate) or
not isinstance(issuer, Certificate)
):
raise TypeError("cert and issuer must be a Certificate")
return OCSPRequestBuilder((cert, issuer, algorithm))
def build(self):
from cryptography.hazmat.backends.openssl.backend import backend
if self._request is None:
raise ValueError("You must add a certificate before building")
return backend.create_ocsp_request(self)
@six.add_metaclass(abc.ABCMeta)
class OCSPRequest(object):
@abc.abstractproperty
def issuer_key_hash(self):
@abc.abstractproperty
def issuer_name_hash(self):
@abc.abstractproperty
def hash_algorithm(self):
@abc.abstractproperty
def serial_number(self):
@abc.abstractmethod
def public_bytes(self, encoding):
@abc.abstractproperty
def extensions(self):
@six.add_metaclass(abc.ABCMeta)
class OCSPResponse(object):
@abc.abstractproperty
def response_status(self):
@abc.abstractproperty
def signature_algorithm_oid(self):
@abc.abstractproperty
def signature(self):
@abc.abstractproperty
def tbs_response_bytes(self):
@abc.abstractproperty
def certificates(self):
@abc.abstractproperty
def responder_key_hash(self):
@abc.abstractproperty
def responder_name(self):
@abc.abstractproperty
def produced_at(self):
@abc.abstractproperty
def certificate_status(self):
@abc.abstractproperty
def revocation_time(self):
@abc.abstractproperty
def revocation_reason(self):
@abc.abstractproperty
def this_update(self):
@abc.abstractproperty
def next_update(self):
@abc.abstractproperty
def issuer_key_hash(self):
@abc.abstractproperty
def issuer_name_hash(self):
@abc.abstractproperty
def hash_algorithm(self):
@abc.abstractproperty
def serial_number(self):
| true
| true
|
7907bd0d9cc99ca9d621701fa22b6c735cb6601d
| 5,290
|
py
|
Python
|
configs/bottom_up/hrnet/coco/hrnet_w32_coco_512x512.py
|
RuisongZhou/mmpose
|
a79c649ba07e8a9db24f1467826b9432c09134c6
|
[
"Apache-2.0"
] | null | null | null |
configs/bottom_up/hrnet/coco/hrnet_w32_coco_512x512.py
|
RuisongZhou/mmpose
|
a79c649ba07e8a9db24f1467826b9432c09134c6
|
[
"Apache-2.0"
] | null | null | null |
configs/bottom_up/hrnet/coco/hrnet_w32_coco_512x512.py
|
RuisongZhou/mmpose
|
a79c649ba07e8a9db24f1467826b9432c09134c6
|
[
"Apache-2.0"
] | 1
|
2021-12-29T08:21:50.000Z
|
2021-12-29T08:21:50.000Z
|
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=100, metric='mAP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='BottomUp',
pretrained='models/pytorch/imagenet/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='BottomUpSimpleHead',
in_channels=32,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=True,
with_ae_loss=[True],
extra=dict(final_conv_kernel=1, )),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True],
with_ae=[True],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0],
),
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=24,
workers_per_gpu=1,
train=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 26.852792
| 76
| 0.561248
|
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=100, metric='mAP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
)
model = dict(
type='BottomUp',
pretrained='models/pytorch/imagenet/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='BottomUpSimpleHead',
in_channels=32,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=True,
with_ae_loss=[True],
extra=dict(final_conv_kernel=1, )),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True],
with_ae=[True],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0],
),
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=24,
workers_per_gpu=1,
train=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| true
| true
|
7907bd73b4e8282649e6b7e28481dfe006bdaf9e
| 819
|
py
|
Python
|
personal_gallery/urls.py
|
mikengugy/The-Gallery
|
5943fdd8d2e8f9c58f14712ebb83f61c38064fcf
|
[
"MIT"
] | null | null | null |
personal_gallery/urls.py
|
mikengugy/The-Gallery
|
5943fdd8d2e8f9c58f14712ebb83f61c38064fcf
|
[
"MIT"
] | null | null | null |
personal_gallery/urls.py
|
mikengugy/The-Gallery
|
5943fdd8d2e8f9c58f14712ebb83f61c38064fcf
|
[
"MIT"
] | null | null | null |
"""personal_gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('gallery.urls'))
]
| 35.608696
| 79
| 0.703297
|
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('gallery.urls'))
]
| true
| true
|
7907bf0fe2c31c6210afad6b7212a1eed48833f9
| 21,296
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations(object):
"""RoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Route"]
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_08_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteListResult"]
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
| 48.290249
| 210
| 0.656884
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name,
route_table_name,
route_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def begin_delete(
self,
resource_group_name,
route_table_name,
route_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def get(
self,
resource_group_name,
route_table_name,
route_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def _create_or_update_initial(
self,
resource_group_name,
route_table_name,
route_name,
route_parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def begin_create_or_update(
self,
resource_group_name,
route_table_name,
route_name,
route_parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def list(
self,
resource_group_name,
route_table_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'}
| true
| true
|
7907bf7de4d5ddf50b6b47dc45d9746c62fd5862
| 385
|
py
|
Python
|
exercises/chapter01/exc_01_07.py
|
deep-diver/fastai-course
|
1a0a39311fba0e1b3f4720a612a17dc7c708d9bb
|
[
"MIT"
] | null | null | null |
exercises/chapter01/exc_01_07.py
|
deep-diver/fastai-course
|
1a0a39311fba0e1b3f4720a612a17dc7c708d9bb
|
[
"MIT"
] | null | null | null |
exercises/chapter01/exc_01_07.py
|
deep-diver/fastai-course
|
1a0a39311fba0e1b3f4720a612a17dc7c708d9bb
|
[
"MIT"
] | null | null | null |
from fastcore.foundation import L
# 0~11 숫자를 포함한 L을 생성합니다 (range 사용)
t = ____________
print(t)
# L의 내용을 두 배 불립니다
t __ 2
print(t)
# 0이 담긴 위치 (0, 12) 를 튜플 방식으로 찾아서 반환합니다
t_1 = t[_, __]
print(t_1)
# 0이 담긴 위치 (0, 12) 를 마스킹 방식으로 찾아서 반환합니다
# - 마스크를 만듭니다 0과 12번째 위치에만 True를 넣습니다
mask = L([True])
mask += L([False] * 11)
mask += L([True])
mask += L([False] * 11)
t_2 = t______
print(t_2)
| 16.73913
| 39
| 0.631169
|
from fastcore.foundation import L
t = ____________
print(t)
t __ 2
print(t)
t_1 = t[_, __]
print(t_1)
mask = L([True])
mask += L([False] * 11)
mask += L([True])
mask += L([False] * 11)
t_2 = t______
print(t_2)
| false
| true
|
7907c004db5251908f4e2a3c9cfc31f44a4e2609
| 5,237
|
py
|
Python
|
trim_segments.py
|
aerospike-examples/modeling-user-segmentation
|
17298905c2be913cf096be54bf4e3c0cfd014701
|
[
"MIT"
] | 4
|
2020-07-28T21:56:43.000Z
|
2020-10-24T21:58:07.000Z
|
trim_segments.py
|
aerospike-examples/modeling-user-segmentation
|
17298905c2be913cf096be54bf4e3c0cfd014701
|
[
"MIT"
] | null | null | null |
trim_segments.py
|
aerospike-examples/modeling-user-segmentation
|
17298905c2be913cf096be54bf4e3c0cfd014701
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import aerospike
from aerospike import exception as e
try:
from aerospike_helpers.operations import map_operations as mh
except:
pass # Needs Aerospike client >= 3.4.0
import datetime
import pprint
import random
import sys
import time
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
"--help", dest="help", action="store_true", help="Displays this message."
)
argparser.add_argument(
"-U",
"--username",
dest="username",
metavar="<USERNAME>",
help="Username to connect to database.",
)
argparser.add_argument(
"-P",
"--password",
dest="password",
metavar="<PASSWORD>",
help="Password to connect to database.",
)
argparser.add_argument(
"-h",
"--host",
dest="host",
default="127.0.0.1",
metavar="<ADDRESS>",
help="Address of Aerospike server.",
)
argparser.add_argument(
"-p",
"--port",
dest="port",
type=int,
default=3000,
metavar="<PORT>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-n",
"--namespace",
dest="namespace",
default="test",
metavar="<NS>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-s",
"--set",
dest="set",
default="profiles",
metavar="<SET>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-i",
"--interactive",
dest="interactive",
action="store_true",
help="Interactive Mode",
)
options = argparser.parse_args()
if options.help:
argparser.print_help()
print()
sys.exit(1)
def version_tuple(version):
return tuple(int(i) for i in version.split("."))
def pause():
input("Hit return to continue")
if options.namespace and options.namespace != "None":
namespace = options.namespace
else:
namespace = None
set = options.set if options.set and options.set != "None" else None
config = {"hosts": [(options.host, options.port)]}
try:
client = aerospike.client(config).connect(options.username, options.password)
policy = {"key": aerospike.POLICY_KEY_SEND}
except e.ClientError as e:
if not options.quiet:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(2)
version = client.info_all("version")
release = list(version.values())[0][1].split(" ")[-1]
if version_tuple(aerospike.__version__) < version_tuple("3.4.0") or version_tuple(
release
) < version_tuple("4.6"):
print(
"\nPlease use Python client >= 3.4.0, ",
"Aerospike database >= 4.6 for this example.",
)
sys.exit(3)
pp = pprint.PrettyPrinter(indent=2)
spacer = "=" * 30
epoch = datetime.datetime(2019, 1, 1)
now = datetime.datetime.now()
try:
# Find all segments whose TTL is before this hour
key = (namespace, set, "u3")
current_hour = int((now - epoch).total_seconds() / 3600)
print("\nCurrent hour is {} hours since epoch".format(current_hour))
if options.interactive:
pause()
ops = [
mh.map_get_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_KEY,
False,
),
mh.map_size("u"),
]
_, _, b = client.operate_ordered(key, ops)
stale_segments, total_segments = b
print("This user has a total of {} segments".format(total_segments[1]))
print(
"Of those, a total of {} segments should be cleaned".format(
len(stale_segments[1])
)
)
print("Show all segments with a segment TTL before the current hour:")
print(stale_segments)
print(spacer)
# Clean up the stale segments using a background scan with a transaction
# attached to it
print("Clean the stale segments from the entire namespace")
if options.interactive:
pause()
ops = [
mh.map_remove_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_NONE,
False,
)
]
# _, _, _ = client.operate_ordered(key, ops)
scan = client.scan(namespace, set)
scan.add_ops(ops)
job_id = scan.execute_background()
# wait for job to finish
while True:
response = client.job_info(job_id, aerospike.JOB_SCAN)
if response["status"] != aerospike.JOB_STATUS_INPROGRESS:
break
time.sleep(0.25)
ops = [
mh.map_get_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_KEY,
False,
),
mh.map_size("u"),
]
_, _, b = client.operate_ordered(key, ops)
stale_segments, total_segments = b
print("This user now has a total of {} segments".format(total_segments[1]))
print(
"Of those, a total of {} segments should be cleaned".format(
len(stale_segments[1])
)
)
print("Show all segments with a segment TTL before the current hour:")
print(stale_segments)
print(spacer)
except Exception as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
client.close()
| 26.185
| 82
| 0.618675
|
from __future__ import print_function
import argparse
import aerospike
from aerospike import exception as e
try:
from aerospike_helpers.operations import map_operations as mh
except:
pass
import datetime
import pprint
import random
import sys
import time
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
"--help", dest="help", action="store_true", help="Displays this message."
)
argparser.add_argument(
"-U",
"--username",
dest="username",
metavar="<USERNAME>",
help="Username to connect to database.",
)
argparser.add_argument(
"-P",
"--password",
dest="password",
metavar="<PASSWORD>",
help="Password to connect to database.",
)
argparser.add_argument(
"-h",
"--host",
dest="host",
default="127.0.0.1",
metavar="<ADDRESS>",
help="Address of Aerospike server.",
)
argparser.add_argument(
"-p",
"--port",
dest="port",
type=int,
default=3000,
metavar="<PORT>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-n",
"--namespace",
dest="namespace",
default="test",
metavar="<NS>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-s",
"--set",
dest="set",
default="profiles",
metavar="<SET>",
help="Port of the Aerospike server.",
)
argparser.add_argument(
"-i",
"--interactive",
dest="interactive",
action="store_true",
help="Interactive Mode",
)
options = argparser.parse_args()
if options.help:
argparser.print_help()
print()
sys.exit(1)
def version_tuple(version):
return tuple(int(i) for i in version.split("."))
def pause():
input("Hit return to continue")
if options.namespace and options.namespace != "None":
namespace = options.namespace
else:
namespace = None
set = options.set if options.set and options.set != "None" else None
config = {"hosts": [(options.host, options.port)]}
try:
client = aerospike.client(config).connect(options.username, options.password)
policy = {"key": aerospike.POLICY_KEY_SEND}
except e.ClientError as e:
if not options.quiet:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(2)
version = client.info_all("version")
release = list(version.values())[0][1].split(" ")[-1]
if version_tuple(aerospike.__version__) < version_tuple("3.4.0") or version_tuple(
release
) < version_tuple("4.6"):
print(
"\nPlease use Python client >= 3.4.0, ",
"Aerospike database >= 4.6 for this example.",
)
sys.exit(3)
pp = pprint.PrettyPrinter(indent=2)
spacer = "=" * 30
epoch = datetime.datetime(2019, 1, 1)
now = datetime.datetime.now()
try:
key = (namespace, set, "u3")
current_hour = int((now - epoch).total_seconds() / 3600)
print("\nCurrent hour is {} hours since epoch".format(current_hour))
if options.interactive:
pause()
ops = [
mh.map_get_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_KEY,
False,
),
mh.map_size("u"),
]
_, _, b = client.operate_ordered(key, ops)
stale_segments, total_segments = b
print("This user has a total of {} segments".format(total_segments[1]))
print(
"Of those, a total of {} segments should be cleaned".format(
len(stale_segments[1])
)
)
print("Show all segments with a segment TTL before the current hour:")
print(stale_segments)
print(spacer)
print("Clean the stale segments from the entire namespace")
if options.interactive:
pause()
ops = [
mh.map_remove_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_NONE,
False,
)
]
scan = client.scan(namespace, set)
scan.add_ops(ops)
job_id = scan.execute_background()
while True:
response = client.job_info(job_id, aerospike.JOB_SCAN)
if response["status"] != aerospike.JOB_STATUS_INPROGRESS:
break
time.sleep(0.25)
ops = [
mh.map_get_by_value_range(
"u",
[0, aerospike.null()],
[current_hour - 1, aerospike.null()],
aerospike.MAP_RETURN_KEY,
False,
),
mh.map_size("u"),
]
_, _, b = client.operate_ordered(key, ops)
stale_segments, total_segments = b
print("This user now has a total of {} segments".format(total_segments[1]))
print(
"Of those, a total of {} segments should be cleaned".format(
len(stale_segments[1])
)
)
print("Show all segments with a segment TTL before the current hour:")
print(stale_segments)
print(spacer)
except Exception as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
client.close()
| true
| true
|
7907c0368df1b812547c44bf06beb92abe4a726f
| 1,245
|
py
|
Python
|
handlers/brainstorming/chat.py
|
tech-sketch/Brain_Hacker
|
4bbd7cc6f680d8d94b7ffd63612c6374a7cd5b8c
|
[
"MIT"
] | 7
|
2015-09-10T06:36:36.000Z
|
2021-02-04T08:41:33.000Z
|
handlers/brainstorming/chat.py
|
tech-sketch/Brain_Hacker
|
4bbd7cc6f680d8d94b7ffd63612c6374a7cd5b8c
|
[
"MIT"
] | 40
|
2015-07-07T02:09:29.000Z
|
2015-08-10T01:28:35.000Z
|
handlers/brainstorming/chat.py
|
Hironsan/Brain_Hacker
|
4bbd7cc6f680d8d94b7ffd63612c6374a7cd5b8c
|
[
"MIT"
] | 3
|
2015-07-10T01:57:58.000Z
|
2016-07-11T06:09:45.000Z
|
# -*- coding: utf-8 -*-
import random
import itertools
from collections import defaultdict
class Chat(object):
cache_size = 200
# user_num = list(range(1, 100))
# random.shuffle(user_num)
colors = ['赤', '青', '黄', '緑', '紫', '黒', '茶', '灰色', '金', '銀']
fruits = ['りんご', 'みかん', 'メロン', 'パイナップル', 'ぶどう', '梨', 'いちご', 'もも', 'さくらんぼ', 'バナナ']
fruits_with_color = itertools.product(colors, fruits)
user_name = list(map(lambda n: n[0]+n[1], fruits_with_color))
random.shuffle(user_name)
def __init__(self):
self.cache = defaultdict(list)
self.nickname_dic = defaultdict(dict)
def set_nickname(self, room_id, client_name):
self.nickname_dic[room_id].update({client_name:str(self.__get_random_name())})
def get_nickname(self, room_id, client_name):
return self.nickname_dic[room_id][client_name]
def update_cache(self, chat, room_id):
self.cache[room_id].append(chat)
if len(self.cache[room_id]) > self.cache_size:
self.cache[room_id] = self.cache[-self.cache_size:]
def __get_random_name(self):
return self.user_name.pop()
def clear_caches(self, room_id):
del self.cache[room_id]
del self.nickname_dic[room_id]
| 31.923077
| 86
| 0.64257
|
import random
import itertools
from collections import defaultdict
class Chat(object):
cache_size = 200
colors = ['赤', '青', '黄', '緑', '紫', '黒', '茶', '灰色', '金', '銀']
fruits = ['りんご', 'みかん', 'メロン', 'パイナップル', 'ぶどう', '梨', 'いちご', 'もも', 'さくらんぼ', 'バナナ']
fruits_with_color = itertools.product(colors, fruits)
user_name = list(map(lambda n: n[0]+n[1], fruits_with_color))
random.shuffle(user_name)
def __init__(self):
self.cache = defaultdict(list)
self.nickname_dic = defaultdict(dict)
def set_nickname(self, room_id, client_name):
self.nickname_dic[room_id].update({client_name:str(self.__get_random_name())})
def get_nickname(self, room_id, client_name):
return self.nickname_dic[room_id][client_name]
def update_cache(self, chat, room_id):
self.cache[room_id].append(chat)
if len(self.cache[room_id]) > self.cache_size:
self.cache[room_id] = self.cache[-self.cache_size:]
def __get_random_name(self):
return self.user_name.pop()
def clear_caches(self, room_id):
del self.cache[room_id]
del self.nickname_dic[room_id]
| true
| true
|
7907c07549e9309d5fdbd156fb259124e7e693f6
| 9,869
|
py
|
Python
|
tests/api/test_all_apis.py
|
brighthive/authserver
|
848201324761269bc96b75ad9cb5242e2a6ee5a5
|
[
"MIT"
] | 3
|
2019-07-31T16:10:26.000Z
|
2021-05-14T20:06:07.000Z
|
tests/api/test_all_apis.py
|
brighthive/authserver
|
848201324761269bc96b75ad9cb5242e2a6ee5a5
|
[
"MIT"
] | 25
|
2019-08-20T20:19:59.000Z
|
2021-05-14T19:06:41.000Z
|
tests/api/test_all_apis.py
|
brighthive/authserver
|
848201324761269bc96b75ad9cb5242e2a6ee5a5
|
[
"MIT"
] | 1
|
2020-04-29T18:18:21.000Z
|
2020-04-29T18:18:21.000Z
|
"""Test all API endpoints.
This test class exercises all client facing APIs. It is also usesful as a tool for
demonstrating how to interact with the various APIs.
"""
import json
import pytest
from expects import (be, be_above, be_above_or_equal, contain, equal, expect,
raise_error)
from flask import Response
from tests.utils import post_users
from authserver.db import User, db
ROLES = [
{
'role': 'get:programs',
'description': 'Get from programs data resource',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
},
{
'role': 'administer:programs',
'description': 'All access on programs data resource',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
},
{
'role': 'edit:providers',
'description': 'Edit providers only'
},
{
'role': 'view:providers',
'description': 'View providers only',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
}
]
USERS = [
{
'username': 'user1',
'password': 'password',
'person_id': 'c0ffee-c0ffee-1'
},
{
'username': 'user2',
'password': 'password',
'person_id': 'c0ffee-c0ffee-2'
},
{
'username': 'user3',
'password': 'password',
'person_id': 'c0ffee-c0ffee-3'
},
{
'username': 'user4',
'password': 'password',
'person_id': 'c0ffee-c0ffee-4'
},
{
'username': 'user5',
'password': 'password',
'person_id': 'c0ffee-c0ffee-5'
},
{
'username': 'user6',
'password': 'password',
'person_id': 'c0ffee-c0ffee-6'
},
{
'username': 'user7',
'password': 'password',
'person_id': 'c0ffee-c0ffee-7'
},
{
'username': 'user8',
'password': 'password',
'person_id': 'c0ffee-c0ffee-8'
}
]
CLIENTS = [
{
'client_name': 'test client 1',
'user_id': ''
},
{
'client_name': 'test client 2',
'user_id': ''
},
{
'client_name': 'test client 3',
'user_id': ''
},
{
'client_name': 'test client 4',
'user_id': ''
},
{
'client_name': 'test client 5',
'user_id': ''
},
{
'client_name': 'test client6',
'user_id': ''
},
{
'client_name': 'test client 7',
'user_id': ''
},
{
'client_name': 'test client 8',
'user_id': ''
}
]
class TestAllAPIs(object):
def test_all_apis(self, client, token_generator):
# Common headers go in this dict
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
# Create users, and clients
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
# Create roles
role_ids = []
for role in ROLES:
response = client.post(
'/roles', data=json.dumps(role), headers=headers)
expect(response.status_code).to(equal(201))
role_ids.append(response.json['response'][0]['id'])
# Assign clients to users and roles to client
for i, client_id in enumerate(client_ids):
request_body = {
'user_id': user_ids[i],
'roles': role_ids
}
response = client.patch(
'/clients/{}'.format(client_id), data=json.dumps(request_body), headers=headers)
expect(response.status_code).to(equal(200))
# Ensure that clients actually have roles, users, and other crucial fields
for client_id in client_ids:
response = client.get(
'/clients/{}'.format(client_id), headers=headers)
result = response.json['response']
expect(result['id']).to(equal(client_id))
expect(result['client_id_issued_at']).to(be_above(0))
expect(user_ids).to(contain(result['user_id']))
expect(len(result['roles'])).to(equal(len(role_ids)))
self._cleanup(client, token_generator,
user_ids=user_ids, role_ids=role_ids)
def test_client_secret_delete_rotate(self, client, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
client_to_patch = client_ids[0]
response = client.post('/clients?action=delete_secret',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(200))
response = client.get('/clients/{}'.format(client_to_patch), headers=headers)
expect(response.json['response']['client_secret']).to(equal(None))
response = client.post('/clients?action=rotate_secret',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(200))
response = client.get('/clients/{}'.format(client_to_patch), headers=headers)
expect(len(response.json['response']['client_secret'])).to(equal(48))
self._cleanup(client, token_generator, user_ids=user_ids)
def test_client_post_invalid_action(self, client, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
client_to_patch = client_ids[0]
response = client.post('/clients?action=some_invalid_action',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(422))
expect(response.json['messages']).to(contain("Invalid query param!"))
self._cleanup(client, token_generator, user_ids=user_ids)
def _post_clients(self, client, user_ids, token_generator):
'''
Helper function that creates (and tests creating) a collection of Clients.
'''
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
client_ids = []
for i, api_client in enumerate(CLIENTS):
api_client['user_id'] = user_ids[i]
response = client.post('/clients', data=json.dumps(api_client), headers=headers)
expect(response.status_code).to(equal(201))
client_ids.append(response.json['response'][0]['id'])
expect(len(client_ids)).to(equal(8))
return client_ids
def _cleanup(self, client, token_generator, role_ids=[], user_ids=[]):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
for role_id in role_ids:
response = client.delete(
'/roles/{}'.format(role_id), headers=headers)
expect(response.status_code).to(equal(200))
for user_id in user_ids:
response = client.delete(
'/users/{}'.format(user_id), headers=headers)
expect(response.status_code).to(equal(200))
def test_assign_scope_to_user(self, client, token_generator):
CLIENT = {
}
USER = {
'username': 'test_user_scope',
'password': 'secret',
'person_id': 'c0ffee-c0ffee-c0ffee-99',
'role_id': ''
}
ROLE = {
'role': 'Administrator',
'description': 'An administrative user role.'
}
SCOPE = {
'scope': 'action:do-all-the-things',
'description': 'A scope that grants the holder superpowers'
}
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
# Create a role
response = client.post('/roles', data=json.dumps(ROLE), headers=headers)
expect(response.status_code).to(be(201))
role_id = response.json['response'][0]['id']
# Create a scope
response = client.post('/scopes', data=json.dumps(SCOPE), headers=headers)
expect(response.status_code).to(be(201))
scope_id = response.json['response'][0]['id']
# Bind the scope to the role
response = client.post(f'/roles/{role_id}/scopes', data=json.dumps({'scope_id': scope_id}), headers=headers)
expect(response.status_code).to(be(201))
# Create a user and make the user an administrator
USER['role_id'] = role_id
response = client.post('/users', data=json.dumps(USER), headers=headers)
expect(response.status_code).to(be(201))
user_id = response.json['response'][0]['id']
# Cleanup
response = client.delete(f'/users/{user_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/roles/{role_id}/scopes/{scope_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/roles/{role_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/scopes/{scope_id}', headers=headers)
expect(response.status_code).to(be(200))
| 34.031034
| 118
| 0.580201
|
import json
import pytest
from expects import (be, be_above, be_above_or_equal, contain, equal, expect,
raise_error)
from flask import Response
from tests.utils import post_users
from authserver.db import User, db
ROLES = [
{
'role': 'get:programs',
'description': 'Get from programs data resource',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
},
{
'role': 'administer:programs',
'description': 'All access on programs data resource',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
},
{
'role': 'edit:providers',
'description': 'Edit providers only'
},
{
'role': 'view:providers',
'description': 'View providers only',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
}
]
USERS = [
{
'username': 'user1',
'password': 'password',
'person_id': 'c0ffee-c0ffee-1'
},
{
'username': 'user2',
'password': 'password',
'person_id': 'c0ffee-c0ffee-2'
},
{
'username': 'user3',
'password': 'password',
'person_id': 'c0ffee-c0ffee-3'
},
{
'username': 'user4',
'password': 'password',
'person_id': 'c0ffee-c0ffee-4'
},
{
'username': 'user5',
'password': 'password',
'person_id': 'c0ffee-c0ffee-5'
},
{
'username': 'user6',
'password': 'password',
'person_id': 'c0ffee-c0ffee-6'
},
{
'username': 'user7',
'password': 'password',
'person_id': 'c0ffee-c0ffee-7'
},
{
'username': 'user8',
'password': 'password',
'person_id': 'c0ffee-c0ffee-8'
}
]
CLIENTS = [
{
'client_name': 'test client 1',
'user_id': ''
},
{
'client_name': 'test client 2',
'user_id': ''
},
{
'client_name': 'test client 3',
'user_id': ''
},
{
'client_name': 'test client 4',
'user_id': ''
},
{
'client_name': 'test client 5',
'user_id': ''
},
{
'client_name': 'test client6',
'user_id': ''
},
{
'client_name': 'test client 7',
'user_id': ''
},
{
'client_name': 'test client 8',
'user_id': ''
}
]
class TestAllAPIs(object):
def test_all_apis(self, client, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
role_ids = []
for role in ROLES:
response = client.post(
'/roles', data=json.dumps(role), headers=headers)
expect(response.status_code).to(equal(201))
role_ids.append(response.json['response'][0]['id'])
for i, client_id in enumerate(client_ids):
request_body = {
'user_id': user_ids[i],
'roles': role_ids
}
response = client.patch(
'/clients/{}'.format(client_id), data=json.dumps(request_body), headers=headers)
expect(response.status_code).to(equal(200))
for client_id in client_ids:
response = client.get(
'/clients/{}'.format(client_id), headers=headers)
result = response.json['response']
expect(result['id']).to(equal(client_id))
expect(result['client_id_issued_at']).to(be_above(0))
expect(user_ids).to(contain(result['user_id']))
expect(len(result['roles'])).to(equal(len(role_ids)))
self._cleanup(client, token_generator,
user_ids=user_ids, role_ids=role_ids)
def test_client_secret_delete_rotate(self, client, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
client_to_patch = client_ids[0]
response = client.post('/clients?action=delete_secret',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(200))
response = client.get('/clients/{}'.format(client_to_patch), headers=headers)
expect(response.json['response']['client_secret']).to(equal(None))
response = client.post('/clients?action=rotate_secret',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(200))
response = client.get('/clients/{}'.format(client_to_patch), headers=headers)
expect(len(response.json['response']['client_secret'])).to(equal(48))
self._cleanup(client, token_generator, user_ids=user_ids)
def test_client_post_invalid_action(self, client, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
client_to_patch = client_ids[0]
response = client.post('/clients?action=some_invalid_action',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(422))
expect(response.json['messages']).to(contain("Invalid query param!"))
self._cleanup(client, token_generator, user_ids=user_ids)
def _post_clients(self, client, user_ids, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
client_ids = []
for i, api_client in enumerate(CLIENTS):
api_client['user_id'] = user_ids[i]
response = client.post('/clients', data=json.dumps(api_client), headers=headers)
expect(response.status_code).to(equal(201))
client_ids.append(response.json['response'][0]['id'])
expect(len(client_ids)).to(equal(8))
return client_ids
def _cleanup(self, client, token_generator, role_ids=[], user_ids=[]):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
for role_id in role_ids:
response = client.delete(
'/roles/{}'.format(role_id), headers=headers)
expect(response.status_code).to(equal(200))
for user_id in user_ids:
response = client.delete(
'/users/{}'.format(user_id), headers=headers)
expect(response.status_code).to(equal(200))
def test_assign_scope_to_user(self, client, token_generator):
CLIENT = {
}
USER = {
'username': 'test_user_scope',
'password': 'secret',
'person_id': 'c0ffee-c0ffee-c0ffee-99',
'role_id': ''
}
ROLE = {
'role': 'Administrator',
'description': 'An administrative user role.'
}
SCOPE = {
'scope': 'action:do-all-the-things',
'description': 'A scope that grants the holder superpowers'
}
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
response = client.post('/roles', data=json.dumps(ROLE), headers=headers)
expect(response.status_code).to(be(201))
role_id = response.json['response'][0]['id']
response = client.post('/scopes', data=json.dumps(SCOPE), headers=headers)
expect(response.status_code).to(be(201))
scope_id = response.json['response'][0]['id']
response = client.post(f'/roles/{role_id}/scopes', data=json.dumps({'scope_id': scope_id}), headers=headers)
expect(response.status_code).to(be(201))
USER['role_id'] = role_id
response = client.post('/users', data=json.dumps(USER), headers=headers)
expect(response.status_code).to(be(201))
user_id = response.json['response'][0]['id']
response = client.delete(f'/users/{user_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/roles/{role_id}/scopes/{scope_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/roles/{role_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/scopes/{scope_id}', headers=headers)
expect(response.status_code).to(be(200))
| true
| true
|
7907c133d53c8bdfe722aa209a8f1b9d0cef570c
| 7,130
|
py
|
Python
|
scripts/schema-context.py
|
bedroesb/ro-crate
|
1d6a1423308c65549eeac17bcd785733e9078622
|
[
"Apache-2.0"
] | null | null | null |
scripts/schema-context.py
|
bedroesb/ro-crate
|
1d6a1423308c65549eeac17bcd785733e9078622
|
[
"Apache-2.0"
] | null | null | null |
scripts/schema-context.py
|
bedroesb/ro-crate
|
1d6a1423308c65549eeac17bcd785733e9078622
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 The University of Manchester UK
# Copyright 2019 RO-Crate contributors <https://github.com/ResearchObject/ro-crate/graphs/contributors>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script retrieves the schema.org properties to generate
the corresponding simplified @context for RO-Crate
adding our additional properties.
Run as:
./schema-context.py 0.3-DRAFT > ../docs/0.3-DRAFT/context.jsonld
"""
import sys
import json
import requests
from collections import OrderedDict
import urllib.request
# Our own version
ROCRATE_VERSION="1.1-DRAFT"
# Update version from http://schema.org/docs/releases.html
# NOTE: Breaks due to https://github.com/schemaorg/schemaorg/issues/2805
SCHEMA_VERSION="10.0"
# Update from https://bioschemas.org/profiles/Workflow/
BIOSCHEMA_WORKFLOW_PROFILE = "https://bioschemas.org/profiles/ComputationalWorkflow/0.5-DRAFT-2020_07_21"
BIOSCHEMA_WORKFLOW_NS = "https://bioschemas.org/ComputationalWorkflow"
BIOSCHEMA_FORMAL_PARAMETER_NS = "https://bioschemas.org/FormalParameter"
BIOSCHEMA_FORMAL_PARAMETER_PROFILE = "https://bioschemas.org/profiles/FormalParameter/0.1-DRAFT-2020_07_21"
def main():
#url="http://schema.org/version/%s/schemaorgcontext.jsonld" % SCHEMA_VERSION
# Workaround for https://github.com/schemaorg/schemaorg/issues/2805
url="https://raw.githubusercontent.com/schemaorg/schemaorg/V%s-release/data/releases/%s/schemaorgcontext.jsonld" % (SCHEMA_VERSION, SCHEMA_VERSION)
with urllib.request.urlopen(url) as f:
schema = json.load(f)
if len(sys.argv) > 2:
version = sys.argv[1]
tag = sys.argv[2]
elif len(sys.argv) > 1:
tag = version = sys.argv[1]
else:
tag = version = ROCRATE_VERSION
schemakeys = list(schema["@context"].keys())
schemakeys.sort() # they are usually sorted anyway
j = OrderedDict()
j["@id"] = "https://w3id.org/ro/crate/%s/context" % version
j["name"] = "RO-Crate JSON-LD Context",
j["version"] = tag
j["url"] = {"@id": "https://w3id.org/ro/crate/%s" % version}
j["schemaVersion"] = {"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION}
j["isBasedOn"] = [
{"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION},
{"@id": "https://pcdm.org/2016/04/18/models"},
{"@id": BIOSCHEMA_WORKFLOW_PROFILE },
{"@id": BIOSCHEMA_FORMAL_PARAMETER_PROFILE }
]
j["license"] = {"@id": "https://creativecommons.org/publicdomain/zero/1.0/"}
context = OrderedDict()
j["@context"] = context
for k in schemakeys:
if ":" in k: # URL like https://www.w3.org/wiki/WebSchemas/SchemaDotOrgSources#TP
continue
if "@" in k: # @vocab?
continue
definition = schema["@context"][k]
if not "@id" in definition or isinstance(definition, str):
continue # bibo etc.
context[k] = schema["@context"][k]["@id"].replace("schema:", "http://schema.org/")
context.update(ADDITIONAL)
json.dump(j, sys.stdout, ensure_ascii=False, indent=5) # indent4 to match existing!
print() ## newline
# Ordered so we keep a somewhat ordered presentation in the JSON
ADDITIONAL = OrderedDict([
# This list should correspond to listing in
# https://researchobject.github.io/ro-crate/0.3-DRAFT/#additional-metadata-standards
("File", "http://schema.org/MediaObject"),
("path", "http://schema.org/contentUrl"),
("Journal", "http://schema.org/Periodical"),
("cite-as", "https://www.w3.org/ns/iana/link-relations/relation#cite-as"),
("hasFile", "http://pcdm.org/models#hasFile"),
("hasMember", "http://pcdm.org/models#hasMember"),
("RepositoryCollection", "http://pcdm.org/models#Collection"),
("RepositoryObject", "http://pcdm.org/models#object"),
# Temporary namespace for properties/types
# proposed https://bioschemas.org/profiles/Workflow/ draft 0.5
# Remove if/when added to schema.org release!
## BEGIN
("ComputationalWorkflow", BIOSCHEMA_WORKFLOW_NS),
("input", BIOSCHEMA_WORKFLOW_NS + "#input"),
("output", BIOSCHEMA_WORKFLOW_NS + "#output"),
("FormalParameter", BIOSCHEMA_FORMAL_PARAMETER_NS),
# https://github.com/schemaorg/schemaorg/issues/383#issuecomment-651040576
("funding", "http://schema.org/funding"),
## END
("wasDerivedFrom", "http://www.w3.org/ns/prov#wasDerivedFrom"),
("importedFrom", "http://purl.org/pav/importedFrom"),
("importedOn", "http://purl.org/pav/importedOn"),
("importedBy", "http://purl.org/pav/importedBy"),
("retrievedFrom", "http://purl.org/pav/retrievedFrom"),
("retrievedOn", "http://purl.org/pav/retrievedOn"),
("retrievedBy", "http://purl.org/pav/retrievedBy"),
("conformsTo", "http://purl.org/dc/terms/conformsTo"),
("@label", "http://www.w3.org/2000/01/rdf-schema#label"),
("pcdm", "http://pcdm.org/models#"),
("bibo", "http://purl.org/ontology/bibo/"),
("cc", "http://creativecommons.org/ns#"),
("dct", "http://purl.org/dc/terms/"),
("foaf", "http://xmlns.com/foaf/0.1/"),
("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
("rdfa", "http://www.w3.org/ns/rdfa#"),
("rdfs", "http://www.w3.org/2000/01/rdf-schema#"),
("schema", "http://schema.org/"),
("frapo", "http://purl.org/cerif/frapo/"),
("rel", "https://www.w3.org/ns/iana/link-relations/relation#"),
("pav", "http://purl.org/pav/"),
("prov", "http://www.w3.org/ns/prov#"),
("wfdesc", "http://purl.org/ro/wfdesc#"),
("wfprov", "http://purl.org/ro/wfprov#"),
("roterms", "http://purl.org/ro/roterms#"),
("wf4ever", "http://purl.org/ro/wf4ever#"),
# Disabled, see https://github.com/ResearchObject/ro-crate/pull/73
# ("@base", None)
])
if __name__=="__main__":
if "-v" in sys.argv or "--version" in sys.argv:
print("schema-context.py %s" % ROCRATE_VERSION)
print("schema.org %s" % SCHEMA_VERSION)
sys.exit(0)
elif "-h" in sys.argv or "--help" in sys.argv:
print("schema-context.py [VERSION] [TAG]")
print("")
print("Generates context.jsonld from schema.org and additional terms")
print(" VERSION is RO-Crate Specification version (default: %s)" % ROCRATE_VERSION)
print(" TAG is RO-Crate Semantic Versioning tag (default same as VERSION)")
sys.exit(0)
else:
main()
| 40.511364
| 151
| 0.638289
|
import sys
import json
import requests
from collections import OrderedDict
import urllib.request
ROCRATE_VERSION="1.1-DRAFT"
SCHEMA_VERSION="10.0"
BIOSCHEMA_WORKFLOW_PROFILE = "https://bioschemas.org/profiles/ComputationalWorkflow/0.5-DRAFT-2020_07_21"
BIOSCHEMA_WORKFLOW_NS = "https://bioschemas.org/ComputationalWorkflow"
BIOSCHEMA_FORMAL_PARAMETER_NS = "https://bioschemas.org/FormalParameter"
BIOSCHEMA_FORMAL_PARAMETER_PROFILE = "https://bioschemas.org/profiles/FormalParameter/0.1-DRAFT-2020_07_21"
def main():
url="https://raw.githubusercontent.com/schemaorg/schemaorg/V%s-release/data/releases/%s/schemaorgcontext.jsonld" % (SCHEMA_VERSION, SCHEMA_VERSION)
with urllib.request.urlopen(url) as f:
schema = json.load(f)
if len(sys.argv) > 2:
version = sys.argv[1]
tag = sys.argv[2]
elif len(sys.argv) > 1:
tag = version = sys.argv[1]
else:
tag = version = ROCRATE_VERSION
schemakeys = list(schema["@context"].keys())
schemakeys.sort()
j = OrderedDict()
j["@id"] = "https://w3id.org/ro/crate/%s/context" % version
j["name"] = "RO-Crate JSON-LD Context",
j["version"] = tag
j["url"] = {"@id": "https://w3id.org/ro/crate/%s" % version}
j["schemaVersion"] = {"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION}
j["isBasedOn"] = [
{"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION},
{"@id": "https://pcdm.org/2016/04/18/models"},
{"@id": BIOSCHEMA_WORKFLOW_PROFILE },
{"@id": BIOSCHEMA_FORMAL_PARAMETER_PROFILE }
]
j["license"] = {"@id": "https://creativecommons.org/publicdomain/zero/1.0/"}
context = OrderedDict()
j["@context"] = context
for k in schemakeys:
if ":" in k: continue
if "@" in k:
continue
definition = schema["@context"][k]
if not "@id" in definition or isinstance(definition, str):
continue
context[k] = schema["@context"][k]["@id"].replace("schema:", "http://schema.org/")
context.update(ADDITIONAL)
json.dump(j, sys.stdout, ensure_ascii=False, indent=5)
print() IONAL = OrderedDict([
schema.org/MediaObject"),
("path", "http://schema.org/contentUrl"),
("Journal", "http://schema.org/Periodical"),
("cite-as", "https://www.w3.org/ns/iana/link-relations/relation#cite-as"),
("hasFile", "http://pcdm.org/models#hasFile"),
("hasMember", "http://pcdm.org/models#hasMember"),
("RepositoryCollection", "http://pcdm.org/models#Collection"),
("RepositoryObject", "http://pcdm.org/models#object"),
("ComputationalWorkflow", BIOSCHEMA_WORKFLOW_NS),
("input", BIOSCHEMA_WORKFLOW_NS + "#input"),
("output", BIOSCHEMA_WORKFLOW_NS + "#output"),
("FormalParameter", BIOSCHEMA_FORMAL_PARAMETER_NS),
"http://schema.org/funding"),
("wasDerivedFrom", "http://www.w3.org/ns/prov#wasDerivedFrom"),
("importedFrom", "http://purl.org/pav/importedFrom"),
("importedOn", "http://purl.org/pav/importedOn"),
("importedBy", "http://purl.org/pav/importedBy"),
("retrievedFrom", "http://purl.org/pav/retrievedFrom"),
("retrievedOn", "http://purl.org/pav/retrievedOn"),
("retrievedBy", "http://purl.org/pav/retrievedBy"),
("conformsTo", "http://purl.org/dc/terms/conformsTo"),
("@label", "http://www.w3.org/2000/01/rdf-schema#label"),
("pcdm", "http://pcdm.org/models#"),
("bibo", "http://purl.org/ontology/bibo/"),
("cc", "http://creativecommons.org/ns#"),
("dct", "http://purl.org/dc/terms/"),
("foaf", "http://xmlns.com/foaf/0.1/"),
("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
("rdfa", "http://www.w3.org/ns/rdfa#"),
("rdfs", "http://www.w3.org/2000/01/rdf-schema#"),
("schema", "http://schema.org/"),
("frapo", "http://purl.org/cerif/frapo/"),
("rel", "https://www.w3.org/ns/iana/link-relations/relation#"),
("pav", "http://purl.org/pav/"),
("prov", "http://www.w3.org/ns/prov#"),
("wfdesc", "http://purl.org/ro/wfdesc#"),
("wfprov", "http://purl.org/ro/wfprov#"),
("roterms", "http://purl.org/ro/roterms#"),
("wf4ever", "http://purl.org/ro/wf4ever#"),
])
if __name__=="__main__":
if "-v" in sys.argv or "--version" in sys.argv:
print("schema-context.py %s" % ROCRATE_VERSION)
print("schema.org %s" % SCHEMA_VERSION)
sys.exit(0)
elif "-h" in sys.argv or "--help" in sys.argv:
print("schema-context.py [VERSION] [TAG]")
print("")
print("Generates context.jsonld from schema.org and additional terms")
print(" VERSION is RO-Crate Specification version (default: %s)" % ROCRATE_VERSION)
print(" TAG is RO-Crate Semantic Versioning tag (default same as VERSION)")
sys.exit(0)
else:
main()
| true
| true
|
7907c198e790378a78592d3e658f9fdde576711c
| 361
|
py
|
Python
|
sps/sps/doctype/district/district.py
|
tushar7724/SPS
|
4a32b740830117327b3597d4e16127ac0a90a3ef
|
[
"MIT"
] | null | null | null |
sps/sps/doctype/district/district.py
|
tushar7724/SPS
|
4a32b740830117327b3597d4e16127ac0a90a3ef
|
[
"MIT"
] | null | null | null |
sps/sps/doctype/district/district.py
|
tushar7724/SPS
|
4a32b740830117327b3597d4e16127ac0a90a3ef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, TUSHAR TAJNE and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class District(Document):
def validate(self):
name = str(self.district.capitalize())
self.name = _(name)
pass
| 25.785714
| 51
| 0.759003
|
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class District(Document):
def validate(self):
name = str(self.district.capitalize())
self.name = _(name)
pass
| true
| true
|
7907c1db45762c8f70eb23a2c7709179bbcd65e5
| 39,387
|
py
|
Python
|
scons/scons-local-1.2.0.d20090919/SCons/SConf.py
|
peterlama/pivy
|
ad7b50f9a3ce0b69d05184c059fd6de12b90839b
|
[
"0BSD"
] | null | null | null |
scons/scons-local-1.2.0.d20090919/SCons/SConf.py
|
peterlama/pivy
|
ad7b50f9a3ce0b69d05184c059fd6de12b90839b
|
[
"0BSD"
] | null | null | null |
scons/scons-local-1.2.0.d20090919/SCons/SConf.py
|
peterlama/pivy
|
ad7b50f9a3ce0b69d05184c059fd6de12b90839b
|
[
"0BSD"
] | null | null | null |
"""SCons.SConf
Autoconf-like configuration support.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py 4369 2009/09/19 15:58:29 scons"
import os
import re
import string
import StringIO
import sys
import traceback
import types
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError, "SCons.SConf.SetCacheMode: Unknown mode " + mode
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', string.upper(str(target[0])))
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
string.replace( source[0].get_contents(),
'\n', "\n |" ) )
# python 2.2 introduces types.BooleanType
BooleanTypes = [types.IntType]
if hasattr(types, 'BooleanType'): BooleanTypes.append(types.BooleanType)
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer:
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = StringIO.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
self.s.write(str)
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
string.replace(" |" + str(bi.string),
"\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
apply(excepthook, self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase:
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise (SCons.Errors.UserError,
"Only one SConf object may be active at one time")
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + string.join(lines, '\n')
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper:
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise (SCons.Errors.UserError,
"Test called after sconf.Finish()")
context = CheckContext(self.sconf)
ret = apply(self.test, (context,) + args, kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if _ac_config_logs.has_key(self.logfile):
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError, "Finish may be called only once!"
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext:
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if type(res) in BooleanTypes:
if res:
text = "yes"
else:
text = "no"
elif type(res) == types.StringType:
text = res
else:
raise TypeError, "Expected string, int or bool, got " + str(type(res))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return apply(self.sconf.TryBuild, args, kw)
def TryAction(self, *args, **kw):
return apply(self.sconf.TryAction, args, kw)
def TryCompile(self, *args, **kw):
return apply(self.sconf.TryCompile, args, kw)
def TryLink(self, *args, **kw):
return apply(self.sconf.TryLink, args, kw)
def TryRun(self, *args, **kw):
return apply(self.sconf.TryRun, args, kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError, "CheckContext instance has no attribute '%s'" % attr
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return apply(SConfBase, args, kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return string.join(l, ''), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 37.945087
| 96
| 0.595146
|
"""SCons.SConf
Autoconf-like configuration support.
"""
__revision__ = "src/engine/SCons/SConf.py 4369 2009/09/19 15:58:29 scons"
import os
import re
import string
import StringIO
import sys
import traceback
import types
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
dryrun = 0
AUTO=0
FORCE=1
CACHE=2
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError, "SCons.SConf.SetCacheMode: Unknown mode " + mode
progress_display = SCons.Util.display
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0
_ac_config_logs = {}
_ac_config_hs = {}
sconf_global = None
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', string.upper(str(target[0])))
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
string.replace( source[0].get_contents(),
'\n', "\n |" ) )
BooleanTypes = [types.IntType]
if hasattr(types, 'BooleanType'): BooleanTypes.append(types.BooleanType)
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None
string = None
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer:
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = StringIO.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
self.s.write(str)
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
string.replace(" |" + str(bi.string),
"\n", "\n |"))
def failed(self):
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
apply(excepthook, self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase:
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise (SCons.Errors.UserError,
"Only one SConf object may be active at one time")
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + string.join(lines, '\n')
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper:
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise (SCons.Errors.UserError,
"Test called after sconf.Finish()")
context = CheckContext(self.sconf)
ret = apply(self.test, (context,) + args, kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
if _ac_config_logs.has_key(self.logfile):
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError, "Finish may be called only once!"
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext:
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = ""
# that tests won't be able to include the config.h file, and so
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if type(res) in BooleanTypes:
if res:
text = "yes"
else:
text = "no"
elif type(res) == types.StringType:
text = res
else:
raise TypeError, "Expected string, int or bool, got " + str(type(res))
if self.did_show_result == 0:
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return apply(self.sconf.TryBuild, args, kw)
def TryAction(self, *args, **kw):
return apply(self.sconf.TryAction, args, kw)
def TryCompile(self, *args, **kw):
return apply(self.sconf.TryCompile, args, kw)
def TryLink(self, *args, **kw):
return apply(self.sconf.TryLink, args, kw)
def TryRun(self, *args, **kw):
return apply(self.sconf.TryRun, args, kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError, "CheckContext instance has no attribute '%s'" % attr
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return apply(SConfBase, args, kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return string.join(l, ''), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
| false
| true
|
7907c232c1a56b6ea80e5d08bb7cefa7387b189c
| 1,433
|
py
|
Python
|
rasters/project_to_behrmann.py
|
samuelbosch/phd
|
97348f3b9795dc0529b02060df576455b4d184a9
|
[
"Unlicense"
] | null | null | null |
rasters/project_to_behrmann.py
|
samuelbosch/phd
|
97348f3b9795dc0529b02060df576455b4d184a9
|
[
"Unlicense"
] | null | null | null |
rasters/project_to_behrmann.py
|
samuelbosch/phd
|
97348f3b9795dc0529b02060df576455b4d184a9
|
[
"Unlicense"
] | null | null | null |
"""
Small script to generate gdal_warp commands
for projecting rasters to the Behrmann projection
to be able to run the generated bat file you should have gdalwarp in your path or run it from an OSGeo4W Shell
"""
import os
root = r"D:\a\data\BioOracle_scenarios_30s_min250"
output = root + r"_equal_area" #os.path.abspath(os.path.join(root, r'..\ascii_equalarea'))
nodata = "-9999"
def create_bat():
proj = "+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +datum=WGS84 +ellps=WGS84 +units=m +no_defs"
with open('project_to_behrmann.bat', 'w') as bat:
for r, dirs, files in os.walk(root):
for f in files:
n, ext = os.path.splitext(f)
if ext == '.asc':
## output of ascii files from gdalwarp is not supported
temptiff = os.path.join(output, n + '.tiff')
bat.write('gdalwarp -of GTiff -multi -srcnodata %s -dstnodata %s -t_srs "%s" "%s" "%s"\n' % (nodata, proj, os.path.join(r, f), temptiff))
## convert output tiff to ascii
outdir = r.replace(root, output)
if not os.path.exists(outdir): os.makedirs(outdir)
bat.write('gdal_translate -of AAIGrid "%s" "%s"\n' % (temptiff, os.path.join(outdir,f)))
## delete temp file
bat.write('del "%s"\n'%temptiff)
if __name__ == '__main__':
create_bat()
| 43.424242
| 157
| 0.586183
|
import os
root = r"D:\a\data\BioOracle_scenarios_30s_min250"
output = root + r"_equal_area"
nodata = "-9999"
def create_bat():
proj = "+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +datum=WGS84 +ellps=WGS84 +units=m +no_defs"
with open('project_to_behrmann.bat', 'w') as bat:
for r, dirs, files in os.walk(root):
for f in files:
n, ext = os.path.splitext(f)
if ext == '.asc':
+ '.tiff')
bat.write('gdalwarp -of GTiff -multi -srcnodata %s -dstnodata %s -t_srs "%s" "%s" "%s"\n' % (nodata, proj, os.path.join(r, f), temptiff))
r.replace(root, output)
if not os.path.exists(outdir): os.makedirs(outdir)
bat.write('gdal_translate -of AAIGrid "%s" "%s"\n' % (temptiff, os.path.join(outdir,f)))
bat.write('del "%s"\n'%temptiff)
if __name__ == '__main__':
create_bat()
| true
| true
|
7907c3382bc7560cb9f38ba5bb20d7e6252a6fba
| 3,097
|
py
|
Python
|
ivanti_security_controls/icon_ivanti_security_controls/actions/start_patch_scan/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
ivanti_security_controls/icon_ivanti_security_controls/actions/start_patch_scan/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
ivanti_security_controls/icon_ivanti_security_controls/actions/start_patch_scan/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import insightconnect_plugin_runtime
from .schema import StartPatchScanInput, StartPatchScanOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
import polling2
class StartPatchScan(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='start_patch_scan',
description=Component.DESCRIPTION,
input=StartPatchScanInput(),
output=StartPatchScanOutput())
def run(self, params={}):
endpoint_names = params.get(Input.HOSTNAMES, [])
machine_group_ids = params.get(Input.MACHINE_GROUP_IDS, [])
use_machine_credential = params.get(Input.USE_MACHINE_CREDENTIAL, False)
max_poll_time = params.get(Input.MAX_POLL_TIME)
if not endpoint_names and not machine_group_ids:
raise PluginException(cause='No hostnames or machine group IDs specified.',
assistance='Either hostnames or machine group IDs must be specified.'
)
if use_machine_credential:
if not endpoint_names:
raise PluginException(cause='Machine credentials can only be set to true if hostname is specified.',
assistance='Either provide a valid hostname or set machine credentials to False.')
payload = {
"credentialId": params.get(Input.CREDENTIAL_ID),
"diagnosticTraceEnabled": params.get(Input.DIAGNOSTIC_TRACE_ENABLED),
"endpointNames": endpoint_names,
"machinegroupIds": machine_group_ids,
"name": params.get(Input.NAME),
"runAsCredentialId": params.get(Input.RUN_AS_CREDENTIAL_ID),
"templateId": params.get(Input.TEMPLATE_ID),
"useMachineCredential": use_machine_credential
}
self.connection.ivanti_api.create_session_credential()
scan = self.connection.ivanti_api.start_patch_scan(payload)
try:
operation_location_url = scan.headers.get("Operation-Location")
polling2.poll(lambda: self.connection.ivanti_api.get_operation_location(operation_location_url)
.get("percentComplete") == 100, step=10, timeout=max_poll_time)
except KeyError as e:
raise PluginException(
cause=f'{e} not found within the header.',
assistance=f'If the issue persists please contact support.')
except polling2.TimeoutException as e:
raise PluginException(
cause='Action timeout.',
assistance=f'This scan has exceeded the maximum poll time of {max_poll_time}.')
operation_location = self.connection.ivanti_api.get_operation_location(operation_location_url)
scan_details = scan.json()
scan_details['isComplete'] = True
scan_details['updatedOn'] = operation_location['lastAction']
return {
Output.SCAN_DETAILS: scan_details
}
| 46.223881
| 120
| 0.656765
|
import insightconnect_plugin_runtime
from .schema import StartPatchScanInput, StartPatchScanOutput, Input, Output, Component
from insightconnect_plugin_runtime.exceptions import PluginException
import polling2
class StartPatchScan(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='start_patch_scan',
description=Component.DESCRIPTION,
input=StartPatchScanInput(),
output=StartPatchScanOutput())
def run(self, params={}):
endpoint_names = params.get(Input.HOSTNAMES, [])
machine_group_ids = params.get(Input.MACHINE_GROUP_IDS, [])
use_machine_credential = params.get(Input.USE_MACHINE_CREDENTIAL, False)
max_poll_time = params.get(Input.MAX_POLL_TIME)
if not endpoint_names and not machine_group_ids:
raise PluginException(cause='No hostnames or machine group IDs specified.',
assistance='Either hostnames or machine group IDs must be specified.'
)
if use_machine_credential:
if not endpoint_names:
raise PluginException(cause='Machine credentials can only be set to true if hostname is specified.',
assistance='Either provide a valid hostname or set machine credentials to False.')
payload = {
"credentialId": params.get(Input.CREDENTIAL_ID),
"diagnosticTraceEnabled": params.get(Input.DIAGNOSTIC_TRACE_ENABLED),
"endpointNames": endpoint_names,
"machinegroupIds": machine_group_ids,
"name": params.get(Input.NAME),
"runAsCredentialId": params.get(Input.RUN_AS_CREDENTIAL_ID),
"templateId": params.get(Input.TEMPLATE_ID),
"useMachineCredential": use_machine_credential
}
self.connection.ivanti_api.create_session_credential()
scan = self.connection.ivanti_api.start_patch_scan(payload)
try:
operation_location_url = scan.headers.get("Operation-Location")
polling2.poll(lambda: self.connection.ivanti_api.get_operation_location(operation_location_url)
.get("percentComplete") == 100, step=10, timeout=max_poll_time)
except KeyError as e:
raise PluginException(
cause=f'{e} not found within the header.',
assistance=f'If the issue persists please contact support.')
except polling2.TimeoutException as e:
raise PluginException(
cause='Action timeout.',
assistance=f'This scan has exceeded the maximum poll time of {max_poll_time}.')
operation_location = self.connection.ivanti_api.get_operation_location(operation_location_url)
scan_details = scan.json()
scan_details['isComplete'] = True
scan_details['updatedOn'] = operation_location['lastAction']
return {
Output.SCAN_DETAILS: scan_details
}
| true
| true
|
7907c353fe7228172d12dfe2ebf9d989531d716e
| 583
|
py
|
Python
|
ecs/meetings/migrations/0010_meeting_documents_zip.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 9
|
2017-02-13T18:17:13.000Z
|
2020-11-21T20:15:54.000Z
|
ecs/meetings/migrations/0010_meeting_documents_zip.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 2
|
2021-05-20T14:26:47.000Z
|
2021-05-20T14:26:48.000Z
|
ecs/meetings/migrations/0010_meeting_documents_zip.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 4
|
2017-04-02T18:48:59.000Z
|
2021-11-23T15:40:35.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('documents', '0004_uuidfield'),
('meetings', '0009_auto_20170106_1414'),
]
operations = [
migrations.AddField(
model_name='meeting',
name='documents_zip',
field=models.ForeignKey(to='documents.Document', related_name='zip_for_meeting', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| 26.5
| 150
| 0.656947
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('documents', '0004_uuidfield'),
('meetings', '0009_auto_20170106_1414'),
]
operations = [
migrations.AddField(
model_name='meeting',
name='documents_zip',
field=models.ForeignKey(to='documents.Document', related_name='zip_for_meeting', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| true
| true
|
7907c41ce2e0c1f14bb1bc6af581366dc6383f46
| 176
|
py
|
Python
|
stringdb/__init__.py
|
gpp-rnd/stringdb
|
8e3b6e0ccbf14c866049f70d85b7e3e7a3f1c210
|
[
"MIT"
] | 4
|
2020-06-28T17:53:37.000Z
|
2022-01-25T20:12:36.000Z
|
stringdb/__init__.py
|
gpp-rnd/stringdb
|
8e3b6e0ccbf14c866049f70d85b7e3e7a3f1c210
|
[
"MIT"
] | null | null | null |
stringdb/__init__.py
|
gpp-rnd/stringdb
|
8e3b6e0ccbf14c866049f70d85b7e3e7a3f1c210
|
[
"MIT"
] | 1
|
2021-08-12T20:11:26.000Z
|
2021-08-12T20:11:26.000Z
|
"""Top-level package for stringdb. Imports the api module"""
from .api import *
__author__ = """Peter C DeWeirdt"""
__email__ = 'petedeweirdt@gmail.com'
__version__ = '0.1.5'
| 25.142857
| 60
| 0.710227
|
from .api import *
__author__ = """Peter C DeWeirdt"""
__email__ = 'petedeweirdt@gmail.com'
__version__ = '0.1.5'
| true
| true
|
7907c47eba7b3aeac3adac5b4cdd9651ee1d73c2
| 5,433
|
py
|
Python
|
tensorflow/contrib/batching/python/ops/batch_ops.py
|
ekyuho/tensorflow
|
e0b721190502346e5485010c8db78339e08c5951
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/batching/python/ops/batch_ops.py
|
ekyuho/tensorflow
|
e0b721190502346e5485010c8db78339e08c5951
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/batching/python/ops/batch_ops.py
|
ekyuho/tensorflow
|
e0b721190502346e5485010c8db78339e08c5951
|
[
"Apache-2.0"
] | 3
|
2018-03-09T05:23:57.000Z
|
2021-08-11T02:38:31.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for automatic batching and unbatching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.batching.ops import gen_batch_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.batching.ops.gen_batch_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_batch_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_batch_ops.so"))
@ops.RegisterGradient("Batch")
def _BatchGrad(op, *out_grads): # pylint: disable=invalid-name
"""Gradient for batch op."""
gradients = []
for i in range(len(op.inputs)):
gradients.append(
gen_batch_ops.unbatch(
out_grads[i],
op.outputs[-2],
op.outputs[-1],
timeout_micros=op.get_attr("grad_timeout_micros"),
shared_name="batch_gradient_{}_{}".format(op.name, i)))
return gradients
@ops.RegisterGradient("Unbatch")
def _UnbatchGrad(op, grad): # pylint: disable=invalid-name
return [
gen_batch_ops.unbatch_grad(
op.inputs[0],
op.inputs[1],
grad,
op.inputs[2],
shared_name="unbatch_gradient_{}".format(op.name)), None, None
]
def batch_function(num_batch_threads, max_batch_size, batch_timeout_micros,
allowed_batch_sizes=None,
grad_timeout_micros=60 * 1000 * 1000,
unbatch_timeout_micros=60 * 1000 * 1000):
"""Batches the computation done by the decorated function.
So, for example, in the following code
```python
@batch_function(1, 2, 3)
def layer(a):
return tf.matmul(a, a)
b = layer(w)
```
if more than one session.run call is simultaneously trying to compute `b`
the values of `w` will be gathered, non-deterministically concatenated
along the first axis, and only one thread will run the computation. See the
documentation of the `Batch` op for more details.
Assumes that all arguments of the decorated function are Tensors which will
be batched along their first dimension.
SparseTensor is not supported. The return value of the decorated function
must be a Tensor or a list/tuple of Tensors.
Args:
num_batch_threads: Number of scheduling threads for processing batches
of work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
grad_timeout_micros: The timeout to use for the gradient. See the
documentation of the unbatch op for more details. Defaults to 60s.
unbatch_timeout_micros: The timeout to use for unbatching. See the
documentation of the unbatch op for more details. Defaults to 60s.
Returns:
The decorated function will return the unbatched computation output Tensors.
"""
def decorator(f): # pylint: disable=missing-docstring
def decorated(*args):
with ops.name_scope("batch") as name:
for a in args:
if not isinstance(a, ops.Tensor):
raise ValueError("All arguments to functions decorated with "
"`batch_function` are supposed to be Tensors; "
"found %s" % repr(a))
batched_tensors, batch_index, id_t = gen_batch_ops.batch(
args,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
grad_timeout_micros=grad_timeout_micros,
shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope("unbatch") as unbatch_name:
unbatched = [
gen_batch_ops.unbatch(t, batch_index, id_t,
timeout_micros=unbatch_timeout_micros,
shared_name=unbatch_name)
for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
return decorated
return decorator
| 39.086331
| 80
| 0.683416
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.batching.ops import gen_batch_ops
from tensorflow.contrib.batching.ops.gen_batch_ops import *
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_batch_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_batch_ops.so"))
@ops.RegisterGradient("Batch")
def _BatchGrad(op, *out_grads):
gradients = []
for i in range(len(op.inputs)):
gradients.append(
gen_batch_ops.unbatch(
out_grads[i],
op.outputs[-2],
op.outputs[-1],
timeout_micros=op.get_attr("grad_timeout_micros"),
shared_name="batch_gradient_{}_{}".format(op.name, i)))
return gradients
@ops.RegisterGradient("Unbatch")
def _UnbatchGrad(op, grad):
return [
gen_batch_ops.unbatch_grad(
op.inputs[0],
op.inputs[1],
grad,
op.inputs[2],
shared_name="unbatch_gradient_{}".format(op.name)), None, None
]
def batch_function(num_batch_threads, max_batch_size, batch_timeout_micros,
allowed_batch_sizes=None,
grad_timeout_micros=60 * 1000 * 1000,
unbatch_timeout_micros=60 * 1000 * 1000):
def decorator(f):
def decorated(*args):
with ops.name_scope("batch") as name:
for a in args:
if not isinstance(a, ops.Tensor):
raise ValueError("All arguments to functions decorated with "
"`batch_function` are supposed to be Tensors; "
"found %s" % repr(a))
batched_tensors, batch_index, id_t = gen_batch_ops.batch(
args,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
grad_timeout_micros=grad_timeout_micros,
shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope("unbatch") as unbatch_name:
unbatched = [
gen_batch_ops.unbatch(t, batch_index, id_t,
timeout_micros=unbatch_timeout_micros,
shared_name=unbatch_name)
for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
return decorated
return decorator
| true
| true
|
7907c5b0b2ac0f60620b0c4b94b611b56eee5608
| 921
|
py
|
Python
|
apps/utils/migrations/0012_auto_20160824_0543.py
|
itsMagondu/MaMaSe
|
0287e092121155314e76124425ef26bb4154847f
|
[
"Apache-2.0"
] | 3
|
2016-03-08T15:15:00.000Z
|
2020-03-05T05:32:19.000Z
|
apps/utils/migrations/0012_auto_20160824_0543.py
|
itsMagondu/MaMaSe
|
0287e092121155314e76124425ef26bb4154847f
|
[
"Apache-2.0"
] | 65
|
2015-09-25T13:32:12.000Z
|
2022-03-11T23:22:12.000Z
|
apps/utils/migrations/0012_auto_20160824_0543.py
|
itsMagondu/MaMaSe
|
0287e092121155314e76124425ef26bb4154847f
|
[
"Apache-2.0"
] | 2
|
2017-05-16T07:56:10.000Z
|
2020-06-06T06:01:31.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-24 05:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('utils', '0011_auto_20160822_1127'),
]
operations = [
migrations.CreateModel(
name='River',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(unique=True)),
('added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='channel',
name='river',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rivers', to='utils.River'),
),
]
| 30.7
| 145
| 0.605863
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('utils', '0011_auto_20160822_1127'),
]
operations = [
migrations.CreateModel(
name='River',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(unique=True)),
('added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='channel',
name='river',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rivers', to='utils.River'),
),
]
| true
| true
|
7907c5c116d79823d0d30543920cab7e5bb67543
| 455
|
py
|
Python
|
data/scripts/templates/object/creature/npc/base/shared_dantari_base_male.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/creature/npc/base/shared_dantari_base_male.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/creature/npc/base/shared_dantari_base_male.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/npc/base/shared_dantari_base_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","dantari_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.764706
| 74
| 0.731868
| true
| true
|
|
7907c846856a644330fc1027d3a5e81c1ee1afa6
| 19,128
|
py
|
Python
|
src/schemathesis/runner/__init__.py
|
hlobit/schemathesis
|
55cea2ca907fdec12c963721a22a3372d0b24abe
|
[
"MIT"
] | null | null | null |
src/schemathesis/runner/__init__.py
|
hlobit/schemathesis
|
55cea2ca907fdec12c963721a22a3372d0b24abe
|
[
"MIT"
] | null | null | null |
src/schemathesis/runner/__init__.py
|
hlobit/schemathesis
|
55cea2ca907fdec12c963721a22a3372d0b24abe
|
[
"MIT"
] | null | null | null |
import ctypes
import logging
import threading
import time
from contextlib import contextmanager
from queue import Queue
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union, cast
import attr
import hypothesis
import hypothesis.errors
import requests
from _pytest.logging import LogCaptureHandler, catching_logs
from requests.auth import HTTPDigestAuth, _basic_auth_str
from .._hypothesis import make_test_or_exception
from ..checks import DEFAULT_CHECKS
from ..constants import USER_AGENT
from ..exceptions import InvalidSchema
from ..loaders import from_uri
from ..models import Case, Endpoint, Status, TestResult, TestResultSet
from ..schemas import BaseSchema
from ..utils import WSGIResponse, capture_hypothesis_output, get_base_url
from . import events
DEFAULT_DEADLINE = 500 # pragma: no mutate
RawAuth = Tuple[str, str] # pragma: no mutate
def get_hypothesis_settings(hypothesis_options: Optional[Dict[str, Any]] = None) -> hypothesis.settings:
# Default settings, used as a parent settings object below
settings = hypothesis.settings(deadline=DEFAULT_DEADLINE)
if hypothesis_options is not None:
settings = hypothesis.settings(settings, **hypothesis_options)
return settings
# pylint: disable=too-many-instance-attributes
@attr.s
class BaseRunner:
schema: BaseSchema = attr.ib()
checks: Iterable[Callable] = attr.ib()
hypothesis_settings: hypothesis.settings = attr.ib(converter=get_hypothesis_settings)
auth: Optional[RawAuth] = attr.ib(default=None)
auth_type: Optional[str] = attr.ib(default=None)
headers: Optional[Dict[str, Any]] = attr.ib(default=None)
request_timeout: Optional[int] = attr.ib(default=None)
seed: Optional[int] = attr.ib(default=None)
def execute(self,) -> Generator[events.ExecutionEvent, None, None]:
"""Common logic for all runners."""
results = TestResultSet()
initialized = events.Initialized(
results=results, schema=self.schema, checks=self.checks, hypothesis_settings=self.hypothesis_settings
)
yield initialized
yield from self._execute(results)
yield events.Finished(results=results, schema=self.schema, running_time=time.time() - initialized.start_time)
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
raise NotImplementedError
@attr.s(slots=True)
class SingleThreadRunner(BaseRunner):
"""Fast runner that runs tests sequentially in the main thread."""
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
auth = get_requests_auth(self.auth, self.auth_type)
with get_session(auth, self.headers) as session:
for endpoint, test in self.schema.get_all_tests(network_test, self.hypothesis_settings, self.seed):
for event in run_test(
self.schema,
endpoint,
test,
self.checks,
results,
session=session,
request_timeout=self.request_timeout,
):
yield event
if isinstance(event, events.Interrupted):
return
@attr.s(slots=True)
class SingleThreadWSGIRunner(SingleThreadRunner):
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
for endpoint, test in self.schema.get_all_tests(wsgi_test, self.hypothesis_settings, self.seed):
for event in run_test(
self.schema,
endpoint,
test,
self.checks,
results,
auth=self.auth,
auth_type=self.auth_type,
headers=self.headers,
):
yield event
if isinstance(event, events.Interrupted):
return
def _run_task(
test_template: Callable,
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
seed: Optional[int],
results: TestResultSet,
**kwargs: Any,
) -> None:
# pylint: disable=too-many-arguments
with capture_hypothesis_output():
while not tasks_queue.empty():
endpoint = tasks_queue.get()
test = make_test_or_exception(endpoint, test_template, settings, seed)
for event in run_test(schema, endpoint, test, checks, results, **kwargs):
events_queue.put(event)
def thread_task(
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
auth: Optional[RawAuth],
auth_type: Optional[str],
headers: Optional[Dict[str, Any]],
seed: Optional[int],
results: TestResultSet,
kwargs: Any,
) -> None:
"""A single task, that threads do.
Pretty similar to the default one-thread flow, but includes communication with the main thread via the events queue.
"""
# pylint: disable=too-many-arguments
prepared_auth = get_requests_auth(auth, auth_type)
with get_session(prepared_auth, headers) as session:
_run_task(
network_test, tasks_queue, events_queue, schema, checks, settings, seed, results, session=session, **kwargs
)
def wsgi_thread_task(
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
seed: Optional[int],
results: TestResultSet,
kwargs: Any,
) -> None:
# pylint: disable=too-many-arguments
_run_task(wsgi_test, tasks_queue, events_queue, schema, checks, settings, seed, results, **kwargs)
def stop_worker(thread_id: int) -> None:
"""Raise an error in a thread so it is possible to asynchronously stop thread execution."""
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id), ctypes.py_object(SystemExit))
class ThreadInterrupted(Exception):
"""Special exception when worker thread received SIGINT."""
@attr.s(slots=True)
class ThreadPoolRunner(BaseRunner):
"""Spread different tests among multiple worker threads."""
workers_num: int = attr.ib(default=2)
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
"""All events come from a queue where different workers push their events."""
tasks_queue = self._get_tasks_queue()
# Events are pushed by workers via a separate queue
events_queue: Queue = Queue()
workers = self._init_workers(tasks_queue, events_queue, results)
def stop_workers() -> None:
for worker in workers:
# workers are initialized at this point and `worker.ident` is set with an integer value
ident = cast(int, worker.ident)
stop_worker(ident)
worker.join()
is_finished = False
try:
while not is_finished:
# Sleep is needed for performance reasons
# each call to `is_alive` of an alive worker waits for a lock
# iterations without waiting are too frequent and a lot of time will be spent on waiting for this locks
time.sleep(0.001)
is_finished = all(not worker.is_alive() for worker in workers)
while not events_queue.empty():
event = events_queue.get()
yield event
if isinstance(event, events.Interrupted):
# Thread received SIGINT
# We could still have events in the queue, but ignore them to keep the logic simple
# for now, could be improved in the future to show more info in such corner cases
raise ThreadInterrupted
except ThreadInterrupted:
stop_workers()
except KeyboardInterrupt:
stop_workers()
yield events.Interrupted(results=results, schema=self.schema)
def _get_tasks_queue(self) -> Queue:
"""All endpoints are distributed among all workers via a queue."""
tasks_queue: Queue = Queue()
tasks_queue.queue.extend(self.schema.get_all_endpoints())
return tasks_queue
def _init_workers(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> List[threading.Thread]:
"""Initialize & start workers that will execute tests."""
workers = [
threading.Thread(
target=self._get_task(), kwargs=self._get_worker_kwargs(tasks_queue, events_queue, results)
)
for _ in range(self.workers_num)
]
for worker in workers:
worker.start()
return workers
def _get_task(self) -> Callable:
return thread_task
def _get_worker_kwargs(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> Dict[str, Any]:
return {
"tasks_queue": tasks_queue,
"events_queue": events_queue,
"schema": self.schema,
"checks": self.checks,
"settings": self.hypothesis_settings,
"auth": self.auth,
"auth_type": self.auth_type,
"headers": self.headers,
"seed": self.seed,
"results": results,
"kwargs": {"request_timeout": self.request_timeout},
}
class ThreadPoolWSGIRunner(ThreadPoolRunner):
def _get_task(self) -> Callable:
return wsgi_thread_task
def _get_worker_kwargs(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> Dict[str, Any]:
return {
"tasks_queue": tasks_queue,
"events_queue": events_queue,
"schema": self.schema,
"checks": self.checks,
"settings": self.hypothesis_settings,
"seed": self.seed,
"results": results,
"kwargs": {"auth": self.auth, "auth_type": self.auth_type, "headers": self.headers},
}
def execute_from_schema(
schema: BaseSchema,
checks: Iterable[Callable],
*,
workers_num: int = 1,
hypothesis_options: Optional[Dict[str, Any]] = None,
auth: Optional[RawAuth] = None,
auth_type: Optional[str] = None,
headers: Optional[Dict[str, Any]] = None,
request_timeout: Optional[int] = None,
seed: Optional[int] = None,
) -> Generator[events.ExecutionEvent, None, None]:
"""Execute tests for the given schema.
Provides the main testing loop and preparation step.
"""
runner: BaseRunner
if workers_num > 1:
if schema.app:
runner = ThreadPoolWSGIRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
workers_num=workers_num,
)
else:
runner = ThreadPoolRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
request_timeout=request_timeout,
)
else:
if schema.app:
runner = SingleThreadWSGIRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
)
else:
runner = SingleThreadRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
request_timeout=request_timeout,
)
yield from runner.execute()
def run_test(
schema: BaseSchema,
endpoint: Endpoint,
test: Union[Callable, InvalidSchema],
checks: Iterable[Callable],
results: TestResultSet,
**kwargs: Any,
) -> Generator[events.ExecutionEvent, None, None]:
"""A single test run with all error handling needed."""
# pylint: disable=too-many-arguments
result = TestResult(endpoint=endpoint)
yield events.BeforeExecution(results=results, schema=schema, endpoint=endpoint)
hypothesis_output: List[str] = []
try:
if isinstance(test, InvalidSchema):
status = Status.error
result.add_error(test)
else:
with capture_hypothesis_output() as hypothesis_output:
test(checks, result, **kwargs)
status = Status.success
except AssertionError:
status = Status.failure
except hypothesis.errors.Flaky:
status = Status.error
result.mark_errored()
# Sometimes Hypothesis detects inconsistent test results and checks are not available
if result.checks:
flaky_example = result.checks[-1].example
else:
flaky_example = None
result.add_error(
hypothesis.errors.Flaky(
"Tests on this endpoint produce unreliable results: \n"
"Falsified on the first call but did not on a subsequent one"
),
flaky_example,
)
except hypothesis.errors.Unsatisfiable:
# We need more clear error message here
status = Status.error
result.add_error(hypothesis.errors.Unsatisfiable("Unable to satisfy schema parameters for this endpoint"))
except KeyboardInterrupt:
yield events.Interrupted(results=results, schema=schema)
return
except Exception as error:
status = Status.error
result.add_error(error)
# Fetch seed value, hypothesis generates it during test execution
result.seed = getattr(test, "_hypothesis_internal_use_seed", None) or getattr(
test, "_hypothesis_internal_use_generated_seed", None
)
results.append(result)
yield events.AfterExecution(
results=results, schema=schema, endpoint=endpoint, status=status, hypothesis_output=hypothesis_output
)
def execute( # pylint: disable=too-many-arguments
schema_uri: str,
checks: Iterable[Callable] = DEFAULT_CHECKS,
api_options: Optional[Dict[str, Any]] = None,
loader_options: Optional[Dict[str, Any]] = None,
hypothesis_options: Optional[Dict[str, Any]] = None,
loader: Callable = from_uri,
) -> TestResultSet:
generator = prepare(
schema_uri=schema_uri,
checks=checks,
api_options=api_options,
loader_options=loader_options,
hypothesis_options=hypothesis_options,
loader=loader,
)
all_events = list(generator)
finished = all_events[-1]
return finished.results
def prepare( # pylint: disable=too-many-arguments
schema_uri: str,
checks: Iterable[Callable] = DEFAULT_CHECKS,
workers_num: int = 1,
api_options: Optional[Dict[str, Any]] = None,
loader_options: Optional[Dict[str, Any]] = None,
hypothesis_options: Optional[Dict[str, Any]] = None,
loader: Callable = from_uri,
seed: Optional[int] = None,
) -> Generator[events.ExecutionEvent, None, None]:
"""Prepare a generator that will run test cases against the given API definition."""
api_options = api_options or {}
loader_options = loader_options or {}
if "base_url" not in loader_options:
loader_options["base_url"] = get_base_url(schema_uri)
schema = loader(schema_uri, **loader_options)
return execute_from_schema(
schema, checks, hypothesis_options=hypothesis_options, seed=seed, workers_num=workers_num, **api_options
)
def network_test(
case: Case,
checks: Iterable[Callable],
result: TestResult,
session: requests.Session,
request_timeout: Optional[int],
) -> None:
"""A single test body that will be executed against the target."""
# pylint: disable=too-many-arguments
timeout = prepare_timeout(request_timeout)
response = case.call(session=session, timeout=timeout)
_run_checks(case, checks, result, response)
def wsgi_test(
case: Case,
checks: Iterable[Callable],
result: TestResult,
auth: Optional[RawAuth],
auth_type: Optional[str],
headers: Optional[Dict[str, Any]],
) -> None:
# pylint: disable=too-many-arguments
headers = _prepare_wsgi_headers(headers, auth, auth_type)
with catching_logs(LogCaptureHandler(), level=logging.DEBUG) as recorded:
response = case.call_wsgi(headers=headers)
result.logs.extend(recorded.records)
_run_checks(case, checks, result, response)
def _prepare_wsgi_headers(
headers: Optional[Dict[str, Any]], auth: Optional[RawAuth], auth_type: Optional[str]
) -> Dict[str, Any]:
headers = headers or {}
headers.setdefault("User-agent", USER_AGENT)
wsgi_auth = get_wsgi_auth(auth, auth_type)
if wsgi_auth:
headers["Authorization"] = wsgi_auth
return headers
def _run_checks(
case: Case, checks: Iterable[Callable], result: TestResult, response: Union[requests.Response, WSGIResponse]
) -> None:
errors = None
for check in checks:
check_name = check.__name__
try:
check(response, result)
result.add_success(check_name, case)
except AssertionError as exc:
errors = True # pragma: no mutate
result.add_failure(check_name, case, str(exc))
if errors is not None:
# An exception needed to trigger Hypothesis shrinking & flaky tests detection logic
# The message doesn't matter
raise AssertionError
def prepare_timeout(timeout: Optional[int]) -> Optional[float]:
"""Request timeout is in milliseconds, but `requests` uses seconds"""
output: Optional[Union[int, float]] = timeout
if timeout is not None:
output = timeout / 1000
return output
@contextmanager
def get_session(
auth: Optional[Union[HTTPDigestAuth, RawAuth]] = None, headers: Optional[Dict[str, Any]] = None
) -> Generator[requests.Session, None, None]:
with requests.Session() as session:
if auth is not None:
session.auth = auth
session.headers["User-agent"] = USER_AGENT
if headers is not None:
session.headers.update(**headers)
yield session
def get_requests_auth(auth: Optional[RawAuth], auth_type: Optional[str]) -> Optional[Union[HTTPDigestAuth, RawAuth]]:
if auth and auth_type == "digest":
return HTTPDigestAuth(*auth)
return auth
def get_wsgi_auth(auth: Optional[RawAuth], auth_type: Optional[str]) -> Optional[str]:
if auth:
if auth_type == "digest":
raise ValueError("Digest auth is not supported for WSGI apps")
return _basic_auth_str(*auth)
return None
| 35.88743
| 120
| 0.644657
|
import ctypes
import logging
import threading
import time
from contextlib import contextmanager
from queue import Queue
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union, cast
import attr
import hypothesis
import hypothesis.errors
import requests
from _pytest.logging import LogCaptureHandler, catching_logs
from requests.auth import HTTPDigestAuth, _basic_auth_str
from .._hypothesis import make_test_or_exception
from ..checks import DEFAULT_CHECKS
from ..constants import USER_AGENT
from ..exceptions import InvalidSchema
from ..loaders import from_uri
from ..models import Case, Endpoint, Status, TestResult, TestResultSet
from ..schemas import BaseSchema
from ..utils import WSGIResponse, capture_hypothesis_output, get_base_url
from . import events
DEFAULT_DEADLINE = 500
RawAuth = Tuple[str, str]
def get_hypothesis_settings(hypothesis_options: Optional[Dict[str, Any]] = None) -> hypothesis.settings:
settings = hypothesis.settings(deadline=DEFAULT_DEADLINE)
if hypothesis_options is not None:
settings = hypothesis.settings(settings, **hypothesis_options)
return settings
@attr.s
class BaseRunner:
schema: BaseSchema = attr.ib()
checks: Iterable[Callable] = attr.ib()
hypothesis_settings: hypothesis.settings = attr.ib(converter=get_hypothesis_settings)
auth: Optional[RawAuth] = attr.ib(default=None)
auth_type: Optional[str] = attr.ib(default=None)
headers: Optional[Dict[str, Any]] = attr.ib(default=None)
request_timeout: Optional[int] = attr.ib(default=None)
seed: Optional[int] = attr.ib(default=None)
def execute(self,) -> Generator[events.ExecutionEvent, None, None]:
results = TestResultSet()
initialized = events.Initialized(
results=results, schema=self.schema, checks=self.checks, hypothesis_settings=self.hypothesis_settings
)
yield initialized
yield from self._execute(results)
yield events.Finished(results=results, schema=self.schema, running_time=time.time() - initialized.start_time)
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
raise NotImplementedError
@attr.s(slots=True)
class SingleThreadRunner(BaseRunner):
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
auth = get_requests_auth(self.auth, self.auth_type)
with get_session(auth, self.headers) as session:
for endpoint, test in self.schema.get_all_tests(network_test, self.hypothesis_settings, self.seed):
for event in run_test(
self.schema,
endpoint,
test,
self.checks,
results,
session=session,
request_timeout=self.request_timeout,
):
yield event
if isinstance(event, events.Interrupted):
return
@attr.s(slots=True)
class SingleThreadWSGIRunner(SingleThreadRunner):
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
for endpoint, test in self.schema.get_all_tests(wsgi_test, self.hypothesis_settings, self.seed):
for event in run_test(
self.schema,
endpoint,
test,
self.checks,
results,
auth=self.auth,
auth_type=self.auth_type,
headers=self.headers,
):
yield event
if isinstance(event, events.Interrupted):
return
def _run_task(
test_template: Callable,
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
seed: Optional[int],
results: TestResultSet,
**kwargs: Any,
) -> None:
with capture_hypothesis_output():
while not tasks_queue.empty():
endpoint = tasks_queue.get()
test = make_test_or_exception(endpoint, test_template, settings, seed)
for event in run_test(schema, endpoint, test, checks, results, **kwargs):
events_queue.put(event)
def thread_task(
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
auth: Optional[RawAuth],
auth_type: Optional[str],
headers: Optional[Dict[str, Any]],
seed: Optional[int],
results: TestResultSet,
kwargs: Any,
) -> None:
prepared_auth = get_requests_auth(auth, auth_type)
with get_session(prepared_auth, headers) as session:
_run_task(
network_test, tasks_queue, events_queue, schema, checks, settings, seed, results, session=session, **kwargs
)
def wsgi_thread_task(
tasks_queue: Queue,
events_queue: Queue,
schema: BaseSchema,
checks: Iterable[Callable],
settings: hypothesis.settings,
seed: Optional[int],
results: TestResultSet,
kwargs: Any,
) -> None:
_run_task(wsgi_test, tasks_queue, events_queue, schema, checks, settings, seed, results, **kwargs)
def stop_worker(thread_id: int) -> None:
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id), ctypes.py_object(SystemExit))
class ThreadInterrupted(Exception):
@attr.s(slots=True)
class ThreadPoolRunner(BaseRunner):
workers_num: int = attr.ib(default=2)
def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:
tasks_queue = self._get_tasks_queue()
events_queue: Queue = Queue()
workers = self._init_workers(tasks_queue, events_queue, results)
def stop_workers() -> None:
for worker in workers:
ident = cast(int, worker.ident)
stop_worker(ident)
worker.join()
is_finished = False
try:
while not is_finished:
time.sleep(0.001)
is_finished = all(not worker.is_alive() for worker in workers)
while not events_queue.empty():
event = events_queue.get()
yield event
if isinstance(event, events.Interrupted):
raise ThreadInterrupted
except ThreadInterrupted:
stop_workers()
except KeyboardInterrupt:
stop_workers()
yield events.Interrupted(results=results, schema=self.schema)
def _get_tasks_queue(self) -> Queue:
tasks_queue: Queue = Queue()
tasks_queue.queue.extend(self.schema.get_all_endpoints())
return tasks_queue
def _init_workers(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> List[threading.Thread]:
workers = [
threading.Thread(
target=self._get_task(), kwargs=self._get_worker_kwargs(tasks_queue, events_queue, results)
)
for _ in range(self.workers_num)
]
for worker in workers:
worker.start()
return workers
def _get_task(self) -> Callable:
return thread_task
def _get_worker_kwargs(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> Dict[str, Any]:
return {
"tasks_queue": tasks_queue,
"events_queue": events_queue,
"schema": self.schema,
"checks": self.checks,
"settings": self.hypothesis_settings,
"auth": self.auth,
"auth_type": self.auth_type,
"headers": self.headers,
"seed": self.seed,
"results": results,
"kwargs": {"request_timeout": self.request_timeout},
}
class ThreadPoolWSGIRunner(ThreadPoolRunner):
def _get_task(self) -> Callable:
return wsgi_thread_task
def _get_worker_kwargs(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet) -> Dict[str, Any]:
return {
"tasks_queue": tasks_queue,
"events_queue": events_queue,
"schema": self.schema,
"checks": self.checks,
"settings": self.hypothesis_settings,
"seed": self.seed,
"results": results,
"kwargs": {"auth": self.auth, "auth_type": self.auth_type, "headers": self.headers},
}
def execute_from_schema(
schema: BaseSchema,
checks: Iterable[Callable],
*,
workers_num: int = 1,
hypothesis_options: Optional[Dict[str, Any]] = None,
auth: Optional[RawAuth] = None,
auth_type: Optional[str] = None,
headers: Optional[Dict[str, Any]] = None,
request_timeout: Optional[int] = None,
seed: Optional[int] = None,
) -> Generator[events.ExecutionEvent, None, None]:
runner: BaseRunner
if workers_num > 1:
if schema.app:
runner = ThreadPoolWSGIRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
workers_num=workers_num,
)
else:
runner = ThreadPoolRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
request_timeout=request_timeout,
)
else:
if schema.app:
runner = SingleThreadWSGIRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
)
else:
runner = SingleThreadRunner(
schema=schema,
checks=checks,
hypothesis_settings=hypothesis_options,
auth=auth,
auth_type=auth_type,
headers=headers,
seed=seed,
request_timeout=request_timeout,
)
yield from runner.execute()
def run_test(
schema: BaseSchema,
endpoint: Endpoint,
test: Union[Callable, InvalidSchema],
checks: Iterable[Callable],
results: TestResultSet,
**kwargs: Any,
) -> Generator[events.ExecutionEvent, None, None]:
result = TestResult(endpoint=endpoint)
yield events.BeforeExecution(results=results, schema=schema, endpoint=endpoint)
hypothesis_output: List[str] = []
try:
if isinstance(test, InvalidSchema):
status = Status.error
result.add_error(test)
else:
with capture_hypothesis_output() as hypothesis_output:
test(checks, result, **kwargs)
status = Status.success
except AssertionError:
status = Status.failure
except hypothesis.errors.Flaky:
status = Status.error
result.mark_errored()
if result.checks:
flaky_example = result.checks[-1].example
else:
flaky_example = None
result.add_error(
hypothesis.errors.Flaky(
"Tests on this endpoint produce unreliable results: \n"
"Falsified on the first call but did not on a subsequent one"
),
flaky_example,
)
except hypothesis.errors.Unsatisfiable:
status = Status.error
result.add_error(hypothesis.errors.Unsatisfiable("Unable to satisfy schema parameters for this endpoint"))
except KeyboardInterrupt:
yield events.Interrupted(results=results, schema=schema)
return
except Exception as error:
status = Status.error
result.add_error(error)
result.seed = getattr(test, "_hypothesis_internal_use_seed", None) or getattr(
test, "_hypothesis_internal_use_generated_seed", None
)
results.append(result)
yield events.AfterExecution(
results=results, schema=schema, endpoint=endpoint, status=status, hypothesis_output=hypothesis_output
)
def execute(
schema_uri: str,
checks: Iterable[Callable] = DEFAULT_CHECKS,
api_options: Optional[Dict[str, Any]] = None,
loader_options: Optional[Dict[str, Any]] = None,
hypothesis_options: Optional[Dict[str, Any]] = None,
loader: Callable = from_uri,
) -> TestResultSet:
generator = prepare(
schema_uri=schema_uri,
checks=checks,
api_options=api_options,
loader_options=loader_options,
hypothesis_options=hypothesis_options,
loader=loader,
)
all_events = list(generator)
finished = all_events[-1]
return finished.results
def prepare(
schema_uri: str,
checks: Iterable[Callable] = DEFAULT_CHECKS,
workers_num: int = 1,
api_options: Optional[Dict[str, Any]] = None,
loader_options: Optional[Dict[str, Any]] = None,
hypothesis_options: Optional[Dict[str, Any]] = None,
loader: Callable = from_uri,
seed: Optional[int] = None,
) -> Generator[events.ExecutionEvent, None, None]:
api_options = api_options or {}
loader_options = loader_options or {}
if "base_url" not in loader_options:
loader_options["base_url"] = get_base_url(schema_uri)
schema = loader(schema_uri, **loader_options)
return execute_from_schema(
schema, checks, hypothesis_options=hypothesis_options, seed=seed, workers_num=workers_num, **api_options
)
def network_test(
case: Case,
checks: Iterable[Callable],
result: TestResult,
session: requests.Session,
request_timeout: Optional[int],
) -> None:
timeout = prepare_timeout(request_timeout)
response = case.call(session=session, timeout=timeout)
_run_checks(case, checks, result, response)
def wsgi_test(
case: Case,
checks: Iterable[Callable],
result: TestResult,
auth: Optional[RawAuth],
auth_type: Optional[str],
headers: Optional[Dict[str, Any]],
) -> None:
headers = _prepare_wsgi_headers(headers, auth, auth_type)
with catching_logs(LogCaptureHandler(), level=logging.DEBUG) as recorded:
response = case.call_wsgi(headers=headers)
result.logs.extend(recorded.records)
_run_checks(case, checks, result, response)
def _prepare_wsgi_headers(
headers: Optional[Dict[str, Any]], auth: Optional[RawAuth], auth_type: Optional[str]
) -> Dict[str, Any]:
headers = headers or {}
headers.setdefault("User-agent", USER_AGENT)
wsgi_auth = get_wsgi_auth(auth, auth_type)
if wsgi_auth:
headers["Authorization"] = wsgi_auth
return headers
def _run_checks(
case: Case, checks: Iterable[Callable], result: TestResult, response: Union[requests.Response, WSGIResponse]
) -> None:
errors = None
for check in checks:
check_name = check.__name__
try:
check(response, result)
result.add_success(check_name, case)
except AssertionError as exc:
errors = True
result.add_failure(check_name, case, str(exc))
if errors is not None:
raise AssertionError
def prepare_timeout(timeout: Optional[int]) -> Optional[float]:
output: Optional[Union[int, float]] = timeout
if timeout is not None:
output = timeout / 1000
return output
@contextmanager
def get_session(
auth: Optional[Union[HTTPDigestAuth, RawAuth]] = None, headers: Optional[Dict[str, Any]] = None
) -> Generator[requests.Session, None, None]:
with requests.Session() as session:
if auth is not None:
session.auth = auth
session.headers["User-agent"] = USER_AGENT
if headers is not None:
session.headers.update(**headers)
yield session
def get_requests_auth(auth: Optional[RawAuth], auth_type: Optional[str]) -> Optional[Union[HTTPDigestAuth, RawAuth]]:
if auth and auth_type == "digest":
return HTTPDigestAuth(*auth)
return auth
def get_wsgi_auth(auth: Optional[RawAuth], auth_type: Optional[str]) -> Optional[str]:
if auth:
if auth_type == "digest":
raise ValueError("Digest auth is not supported for WSGI apps")
return _basic_auth_str(*auth)
return None
| true
| true
|
7907ca2e64ffc8d1c2dc911818d33d542a896f45
| 1,184
|
py
|
Python
|
atomate/lammps/database.py
|
Zhuoying/atomate
|
067023f0f740d3abac47b7ae7743c1c31eff8a06
|
[
"BSD-3-Clause-LBNL"
] | 167
|
2017-01-26T00:14:19.000Z
|
2022-03-18T20:47:58.000Z
|
atomate/lammps/database.py
|
Zhuoying/atomate
|
067023f0f740d3abac47b7ae7743c1c31eff8a06
|
[
"BSD-3-Clause-LBNL"
] | 422
|
2016-12-16T18:21:15.000Z
|
2022-03-23T22:13:19.000Z
|
atomate/lammps/database.py
|
Zhuoying/atomate
|
067023f0f740d3abac47b7ae7743c1c31eff8a06
|
[
"BSD-3-Clause-LBNL"
] | 158
|
2016-12-16T18:28:00.000Z
|
2022-03-28T11:40:03.000Z
|
"""
This module defines the database classes.
"""
import pymongo
from atomate.utils.database import CalcDb
from atomate.utils.utils import get_logger
__author__ = "Kiran Mathew"
__credits__ = "Anubhav Jain"
__email__ = "kmathew@lbl.gov"
logger = get_logger(__name__)
class LammpsCalcDb(CalcDb):
def __init__(
self,
host="localhost",
port=27017,
database="lammps",
collection="tasks",
user=None,
password=None,
**kwargs
):
super().__init__(host, port, database, collection, user, password, **kwargs)
def build_indexes(self, indexes=None, background=True):
indexes = indexes or []
self.collection.create_index("task_id", unique=True, background=background)
self.collection.create_index(
[("completed_at", pymongo.DESCENDING)], background=background
)
for i in indexes:
self.collection.create_index(i, background=background)
def reset(self):
self.collection.delete_many({})
self.db.counter.delete_one({"_id": "taskid"})
self.db.counter.insert_one({"_id": "taskid", "c": 0})
self.build_indexes()
| 26.909091
| 84
| 0.642736
|
import pymongo
from atomate.utils.database import CalcDb
from atomate.utils.utils import get_logger
__author__ = "Kiran Mathew"
__credits__ = "Anubhav Jain"
__email__ = "kmathew@lbl.gov"
logger = get_logger(__name__)
class LammpsCalcDb(CalcDb):
def __init__(
self,
host="localhost",
port=27017,
database="lammps",
collection="tasks",
user=None,
password=None,
**kwargs
):
super().__init__(host, port, database, collection, user, password, **kwargs)
def build_indexes(self, indexes=None, background=True):
indexes = indexes or []
self.collection.create_index("task_id", unique=True, background=background)
self.collection.create_index(
[("completed_at", pymongo.DESCENDING)], background=background
)
for i in indexes:
self.collection.create_index(i, background=background)
def reset(self):
self.collection.delete_many({})
self.db.counter.delete_one({"_id": "taskid"})
self.db.counter.insert_one({"_id": "taskid", "c": 0})
self.build_indexes()
| true
| true
|
7907cbe7ab34e79a8c97739c577f7313d2fcda1c
| 21,110
|
py
|
Python
|
recbole/model/knowledge_aware_recommender/kgnnls.py
|
xingkongxiaxia/xx
|
a75e3894adfd05f5167ca76c48d1bf8626ee8588
|
[
"MIT"
] | 4
|
2021-04-23T07:47:53.000Z
|
2022-02-01T13:48:33.000Z
|
recbole/model/knowledge_aware_recommender/kgnnls.py
|
xingkongxiaxia/RecBole
|
ce51d75406592d6bc25bb803f773f0788496fd97
|
[
"MIT"
] | null | null | null |
recbole/model/knowledge_aware_recommender/kgnnls.py
|
xingkongxiaxia/RecBole
|
ce51d75406592d6bc25bb803f773f0788496fd97
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : cx.tian@outlook.com
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
| 46.091703
| 120
| 0.597726
|
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator']
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight']
self.ls_weight = config['ls_weight']
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size))
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1)
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1)
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2)
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size))
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size))
elif self.aggregator_class == 'concat':
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2))
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
entity_labels = []
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1)
user_entity_concat = users * self.offset + \
entities_per_iter
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask)
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
reset_masks = reset_masks[:-1]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size])
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1)
user_relation_scores_normalized = self.softmax(
user_relation_scores)
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) utput = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
| true
| true
|
7907cc9bf16ce33e7051101f1275c55a5b458738
| 2,292
|
py
|
Python
|
monitoring/google/cloud/monitoring_v3/gapic/notification_channel_service_client_config.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | 1
|
2018-06-29T17:53:28.000Z
|
2018-06-29T17:53:28.000Z
|
monitoring/google/cloud/monitoring_v3/gapic/notification_channel_service_client_config.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:16:57.000Z
|
2021-06-25T15:16:57.000Z
|
monitoring/google/cloud/monitoring_v3/gapic/notification_channel_service_client_config.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | 1
|
2021-06-30T11:44:03.000Z
|
2021-06-30T11:44:03.000Z
|
config = {
"interfaces": {
"google.monitoring.v3.NotificationChannelService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"ListNotificationChannelDescriptors": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"GetNotificationChannelDescriptor": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"ListNotificationChannels": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"GetNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"CreateNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"UpdateNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"DeleteNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| 38.847458
| 67
| 0.442845
|
config = {
"interfaces": {
"google.monitoring.v3.NotificationChannelService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"ListNotificationChannelDescriptors": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"GetNotificationChannelDescriptor": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"ListNotificationChannels": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"GetNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"CreateNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"UpdateNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"DeleteNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| true
| true
|
7907cd972452380f69e7a4e249088458c43baec0
| 5,533
|
py
|
Python
|
data/cnews_loader_bert.py
|
a414351664/Bert-THUCNews
|
4dad6900eb9ace8b4e4b3c33e97df9851796a442
|
[
"MIT"
] | 24
|
2019-01-22T11:03:57.000Z
|
2021-09-15T03:06:11.000Z
|
data/cnews_loader_bert.py
|
pengwei-iie/Bert-THUCNews
|
a20749225091533b530f0e539bfaacbd3524fe99
|
[
"MIT"
] | 2
|
2019-05-15T11:03:36.000Z
|
2019-06-29T14:36:33.000Z
|
data/cnews_loader_bert.py
|
pengwei-iie/Bert-THUCNews
|
a20749225091533b530f0e539bfaacbd3524fe99
|
[
"MIT"
] | 16
|
2019-01-22T11:03:57.000Z
|
2021-04-18T15:29:30.000Z
|
# coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.contrib.keras as kr
import tensorflow as tf
if sys.version_info[0] > 2:
is_py3 = True
else:
# reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
# while True:
# line = f.readline()
try:
label, content = line.strip().split('\t')
contents.append(content)
if content:
# contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
# if not line:
# break
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储, x, y"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1)
words, _ = list(zip(*count_pairs))
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
# np.save('./train_x.npy', contents)
# np.savetxt('./train_x.txt', contents, fmt='%s')
data_id, label_id = [], []
for i in range(len(contents)):
# data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
# x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示
return contents, y_pad
def batch_iter(x, y, batch_size=64):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
# 区别在于shuffle直接在原来的数组上进行操作,改变原来数组的顺序,无返回值。
# 而permutation不直接在原来的数组上进行操作,而是返回一个新的打乱顺序的数组,并不改变原来的数组。
indices = np.random.permutation(np.arange(data_len))
x_shuffle = np.array(x)[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
# yield x[start_id:end_id], y[start_id:end_id]
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
def attention(inputs, attention_size, l2_reg_lambda):
"""
Attention mechanism layer.
:param inputs: outputs of RNN/Bi-RNN layer (not final state)
:param attention_size: linear size of attention weights
:return: outputs of the passed RNN/Bi-RNN reduced with attention vector
"""
# In case of Bi-RNN input we need to concatenate outputs of its forward and backward parts
if isinstance(inputs, tuple):
inputs = tf.concat(2, inputs)
sequence_length = inputs.get_shape()[1].value # the length of sequences processed in the antecedent RNN layer
hidden_size = inputs.get_shape()[2].value # hidden size of the RNN layer
# Attention mechanism W,b 相当于对RNN的输出做一个非线性变化,得到的结果在和u做内积
W_omega = tf.get_variable("W_omega", initializer=tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.get_variable("b_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.get_variable("u_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
v = tf.tanh(tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_omega) + tf.reshape(b_omega, [1, -1]))
vu = tf.matmul(v, tf.reshape(u_omega, [-1, 1]))
exps = tf.reshape(tf.exp(vu), [-1, sequence_length])
alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])
# Output of Bi-RNN is reduced with attention vector
output = tf.reduce_sum(inputs * tf.reshape(alphas, [-1, sequence_length, 1]), 1)
#if l2_reg_lambda > 0:
# l2_loss += tf.nn.l2_loss(W_omega)
# l2_loss += tf.nn.l2_loss(b_omega)
# l2_loss += tf.nn.l2_loss(u_omega)
# tf.add_to_collection('losses', l2_loss)
return output
| 32.739645
| 114
| 0.646665
|
import sys
from collections import Counter
import numpy as np
import tensorflow.contrib.keras as kr
import tensorflow as tf
if sys.version_info[0] > 2:
is_py3 = True
else:
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
contents, labels = [], []
with open_file(filename) as f:
for line in f:
try:
label, content = line.strip().split('\t')
contents.append(content)
if content:
labels.append(native_content(label))
except:
pass
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1)
words, _ = list(zip(*count_pairs))
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
with open_file(vocab_dir) as fp:
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def to_words(content, words):
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
contents, labels = read_file(filename)
data_id, label_id = [], []
for i in range(len(contents)):
label_id.append(cat_to_id[labels[i]])
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id))
return contents, y_pad
def batch_iter(x, y, batch_size=64):
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(data_len))
x_shuffle = np.array(x)[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
def attention(inputs, attention_size, l2_reg_lambda):
if isinstance(inputs, tuple):
inputs = tf.concat(2, inputs)
sequence_length = inputs.get_shape()[1].value
hidden_size = inputs.get_shape()[2].value
W_omega = tf.get_variable("W_omega", initializer=tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.get_variable("b_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.get_variable("u_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
v = tf.tanh(tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_omega) + tf.reshape(b_omega, [1, -1]))
vu = tf.matmul(v, tf.reshape(u_omega, [-1, 1]))
exps = tf.reshape(tf.exp(vu), [-1, sequence_length])
alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])
output = tf.reduce_sum(inputs * tf.reshape(alphas, [-1, sequence_length, 1]), 1)
return output
| true
| true
|
7907cdad81d346362d6a01b224b8cd2d83f39bf2
| 677
|
py
|
Python
|
setup.py
|
pharmbio/robot-imager
|
2256cea4cf7b28d0f575769d3675c97299ede10d
|
[
"MIT"
] | null | null | null |
setup.py
|
pharmbio/robot-imager
|
2256cea4cf7b28d0f575769d3675c97299ede10d
|
[
"MIT"
] | null | null | null |
setup.py
|
pharmbio/robot-imager
|
2256cea4cf7b28d0f575769d3675c97299ede10d
|
[
"MIT"
] | null | null | null |
from setuptools import setup
requirements = '''
flask
'''
name='imager'
console_scripts = f'''
pf-moves={name}.moves_gui:main
pf-flash={name}.flash:main
imager={name}.cli:main
'''
packages=f'''
{name}
{name}.utils
'''
setup(
name=name,
packages=packages.split(),
version='0.1',
description='IMX imaging using the PreciseFlex robot arm and LiCONiC fridge',
url='https://github.com/pharmbio/robot-imager',
author='Dan Rosén',
author_email='dan.rosen@farmbio.uu.se',
python_requires='>=3.10',
license='MIT',
install_requires=requirements.split(),
entry_points={'console_scripts': console_scripts.split()}
)
| 20.515152
| 81
| 0.660266
|
from setuptools import setup
requirements = '''
flask
'''
name='imager'
console_scripts = f'''
pf-moves={name}.moves_gui:main
pf-flash={name}.flash:main
imager={name}.cli:main
'''
packages=f'''
{name}
{name}.utils
'''
setup(
name=name,
packages=packages.split(),
version='0.1',
description='IMX imaging using the PreciseFlex robot arm and LiCONiC fridge',
url='https://github.com/pharmbio/robot-imager',
author='Dan Rosén',
author_email='dan.rosen@farmbio.uu.se',
python_requires='>=3.10',
license='MIT',
install_requires=requirements.split(),
entry_points={'console_scripts': console_scripts.split()}
)
| true
| true
|
7907ce85299f0f04d1bb20affad859f9baaa78bc
| 35,432
|
py
|
Python
|
train.py
|
alexeypechorin/tibetan-transductive
|
e2356d5c0a7cbc2f2359d9cf5b6b18729fecd8de
|
[
"MIT"
] | 1
|
2019-12-08T05:26:20.000Z
|
2019-12-08T05:26:20.000Z
|
train.py
|
alexeypechorin/tibetan-transductive
|
e2356d5c0a7cbc2f2359d9cf5b6b18729fecd8de
|
[
"MIT"
] | null | null | null |
train.py
|
alexeypechorin/tibetan-transductive
|
e2356d5c0a7cbc2f2359d9cf5b6b18729fecd8de
|
[
"MIT"
] | 1
|
2020-09-03T14:51:53.000Z
|
2020-09-03T14:51:53.000Z
|
import os
import click
import numpy as np
from tqdm import tqdm
from models.model_loader import load_model
from torchvision.transforms import Compose
from dataset.data_transform import Resize, Rotation, ElasticAndSine, ColorGradGausNoise, AddWidth, Normalize, ToGray, OnlyElastic, OnlySine, ColorGrad, ColorGausNoise
from dataset.text_data import TextDataset, TextDatasetRandomFont
from dataset.collate_fn import text_collate
from utils.data_visualization import TbSummary
from lr_policy import StepLR, DannLR
import pickle as pkl
import glob
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from warpctc_pytorch import CTCLoss
from test import test
from models.new_vat import VATLoss, VATLossSign, LabeledATLoss, LabeledAtAndUnlabeledTestVatLoss, VATonRnnSign, VATonRnnCnnSign, VATonCnnSign
from dataset.dataset_metadata import SynthDataInfo
@click.command()
@click.option('--base-data-dir', type=str,
default=os.path.expandvars ('../Data/'),
help='Path to base data directory (all other data paths are relative to this one).')
@click.option('--train-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_train.txt'),
help='Path to training dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--train-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing training images (relative to base-data-dir)')
@click.option('--orig-eval-data-path', type=str,
default=os.path.expandvars(
'Test/Prepared/im2line.txt'),
help='Path to original test dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--orig-eval-base-dir', type=str,
default=os.path.expandvars(
'Test/Prepared/LineImages'),
help='Path to directory containing original test images (relative to base-data-dir)')
@click.option('--synth-eval-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_val.txt'),
help='Path to synthetic evaluation dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--synth-eval-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing synthetic evaluation images (relative to base-data-dir)')
@click.option('--lexicon-path', type=str,
default=os.path.expandvars('char_to_class.pkl'),
help='Path to alphabet lexicon (letter to id), relative to base-data-dir.')
@click.option('--seq-proj', type=str, default="10x20", help='Projection of sequence')
@click.option('--backend', type=str, default="resnet18", help='Backend network to use (default is resnet18)')
@click.option('--snapshot', type=str, default=None, help='Path to pre-trained weights')
@click.option('--input-height', type=int, default=64, help='Height of input images to network')
@click.option('--base-lr', type=float, default=1e-4, help='Base learning rate.') # was e-3
#@click.option('--lr-decay', type=float, default=1e-4, help='Base learning rate') # was 0.0001
@click.option('--elastic-alpha', type=float, default=34, help='Elastic augmentation parameter alpha.')
@click.option('--elastic-sigma', type=float, default=3, help='Elastic augmentation parameter sigma.')
@click.option('--step-size', type=int, default=500, help='Step size for step lr change.')
@click.option('--max-iter', type=int, default=6000, help='Max iterations for taining')
@click.option('--batch-size', type=int, default=8, help='Batch size for training')
@click.option('--output-dir', type=str,
default='../Output/exp1',
help='Path to save output snapshot')
@click.option('--test-iter', type=int, default=1000, help='Number of iterations between test evaluation.')
@click.option('--show-iter', type=int, default=1000, help='Number of iterations between showing images in tensorboard.')
@click.option('--test-init', type=bool, default=False, help='Wether to test after network initialization initialization')
@click.option('--use-gpu', type=bool, default=True, help='Whether to use the gpu')
@click.option('--use-no-font-repeat-data', type=bool, default=True, help='Parameter to remove (always true) - whether to use random training data.')
@click.option('--do-vat', type=bool, default=False, help='Whether to do VAT on synthetic trainig data')
@click.option('--do-at', type=bool, default=False, help='Whether to do AT on synthetic trainig data')
@click.option('--vat-ratio', type=float, default=1, help='Ratio of vat on train data loss vs base loss')
@click.option('--test-vat-ratio', type=float, default=1, help='Ratio on vat on test data loss vs base loss')
@click.option('--vat-epsilon', type=float, default=2.5, help='VAT on train hyperparameter - epsilon')
@click.option('--vat-ip', type=int, default=1, help='VAT on train hyperparameter - number of power iterations')
@click.option('--vat-xi', type=float, default=10., help='VAT on train hyperparameter - xi')
@click.option('--vat-sign', type=bool, default=False, help='VAT on train hyperparameter - whether to do sign on vat loss')
@click.option('--do-remove-augs', type=bool, default=False, help='Whether to remove some of the augmentations (for ablation study)')
@click.option('--aug-to-remove', type=str,
default='',
help="with augmentation to remover out of ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']")
@click.option('--do-beam-search', type=bool, default=False, help='whether to do beam search inference in evaluation')
@click.option('--dropout-conv', type=bool, default=False, help='Whether to do dropout between convolution and rnn.')
@click.option('--dropout-rnn', type=bool, default=False, help='Whether to do dropout in rnn.')
@click.option('--dropout-output', type=bool, default=False, help='Whether to do dropout after rnn.')
@click.option('--do-ema', type=bool, default=False, help='Whether to do exponential moving average on weights')
@click.option('--do-gray', type=bool, default=False, help='whether to use grayscale instread of rgb')
@click.option('--do-test-vat', type=bool, default=False, help='Whether to do VAT loss on original test data')
@click.option('--do-test-entropy', type=bool, default=False, help='Whether to do entropy loss on original test data')
@click.option('--do-test-vat-cnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for cnn part')
@click.option('--do-test-vat-rnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for rnn part')
@click.option('--ada-after-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on rnn part')
@click.option('--ada-before-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on cnn part')
@click.option('--do-ada-lr', type=bool, default=False, help='Whether to do lr rule suitable of adversarial domain adaptaion (from article)')
@click.option('--ada-ratio', type=float, default=1, help='Ratio of ADA loss vs base loss')
@click.option('--rnn-hidden-size', type=int, default=128, help='Size of rnn hidden layer')
@click.option('--do-lr-step', type=bool, default=False, help='Visualize output')
@click.option('--dataset-name', type=str, default='tibetan', help='Dataset name, currently wiener or tibetan')
def main(base_data_dir, train_data_path, train_base_dir,
orig_eval_data_path, orig_eval_base_dir,
synth_eval_data_path, synth_eval_base_dir,
lexicon_path, seq_proj, backend, snapshot, input_height, base_lr, elastic_alpha, elastic_sigma,
step_size, max_iter,
batch_size, output_dir, test_iter, show_iter, test_init, use_gpu, use_no_font_repeat_data,
do_vat, do_at, vat_ratio, test_vat_ratio, vat_epsilon, vat_ip, vat_xi, vat_sign,
do_remove_augs, aug_to_remove, do_beam_search,
dropout_conv, dropout_rnn, dropout_output, do_ema, do_gray, do_test_vat, do_test_entropy, do_test_vat_cnn,
do_test_vat_rnn,
ada_after_rnn, ada_before_rnn, do_ada_lr, ada_ratio, rnn_hidden_size,
do_lr_step,
dataset_name
):
if not do_lr_step and not do_ada_lr:
raise NotImplementedError('learning rate should be either step or ada.')
train_data_path = os.path.join(base_data_dir, train_data_path)
train_base_dir = os.path.join(base_data_dir, train_base_dir)
synth_eval_data_path = os.path.join(base_data_dir, synth_eval_data_path)
synth_eval_base_dir = os.path.join(base_data_dir, synth_eval_base_dir)
orig_eval_data_path = os.path.join(base_data_dir, orig_eval_data_path)
orig_eval_base_dir = os.path.join(base_data_dir, orig_eval_base_dir)
lexicon_path = os.path.join(base_data_dir, lexicon_path)
all_parameters = locals()
cuda = use_gpu
#print(train_base_dir)
if output_dir is not None:
os.makedirs(output_dir, exist_ok=True)
tb_writer = TbSummary(output_dir)
output_dir = os.path.join(output_dir, 'model')
os.makedirs(output_dir, exist_ok=True)
with open(lexicon_path, 'rb') as f:
lexicon = pkl.load(f)
#print(sorted(lexicon.items(), key=operator.itemgetter(1)))
with open(os.path.join(output_dir, 'params.txt'),'w') as f:
f.writelines(str(all_parameters))
print(all_parameters)
print('new vat')
sin_magnitude = 4
rotate_max_angle = 2
dataset_info = SynthDataInfo(None, None, None, dataset_name.lower())
train_fonts = dataset_info.font_names
all_args = locals()
allowed_removals = ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']
if do_remove_augs and aug_to_remove not in allowed_removals:
raise Exception('augmentation removal value is not allowed.')
if do_remove_augs:
rand_trans = []
if aug_to_remove == 'elastic':
print('doing sine transform :)')
rand_trans.append(OnlySine(sin_magnitude=sin_magnitude))
elif aug_to_remove in ['sine', 'sine_rotate']:
print('doing elastic transform :)')
rand_trans.append(OnlyElastic(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma))
if aug_to_remove not in ['elastic', 'sine', 'sine_rotate']:
print('doing elastic transform :)')
print('doing sine transform :)')
rand_trans.append(ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude))
if aug_to_remove not in ['rotation', 'sine_rotate']:
print('doing rotation transform :)')
rand_trans.append(Rotation(angle=rotate_max_angle, fill_value=255))
if aug_to_remove not in ['color_aug', 'color_gaus', 'color_sine']:
print('doing color_aug transform :)')
rand_trans.append(ColorGradGausNoise())
elif aug_to_remove == 'color_gaus':
print('doing color_sine transform :)')
rand_trans.append(ColorGrad())
elif aug_to_remove == 'color_sine':
print('doing color_gaus transform :)')
rand_trans.append(ColorGausNoise())
else:
print('doing all transforms :)')
rand_trans = [
ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude),
Rotation(angle=rotate_max_angle, fill_value=255),
ColorGradGausNoise()]
if do_gray:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()]
else:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
Normalize()]
transform_random = Compose(rand_trans)
if do_gray:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()
])
else:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
Normalize()
])
if use_no_font_repeat_data:
print('creating dataset')
train_data = TextDatasetRandomFont(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
print('finished creating dataset')
else:
print('train data path:\n{}'.format(train_data_path))
print('train_base_dir:\n{}'.format(train_base_dir))
train_data = TextDataset(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
synth_eval_data = TextDataset(data_path=synth_eval_data_path, lexicon=lexicon,
base_path=synth_eval_base_dir, transform=transform_random, fonts=train_fonts)
orig_eval_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
orig_vat_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if ada_after_rnn or ada_before_rnn:
orig_ada_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
#else:
# train_data = TestDataset(transform=transform, abc=abc).set_mode("train")
# synth_eval_data = TestDataset(transform=transform, abc=abc).set_mode("test")
# orig_eval_data = TestDataset(transform=transform, abc=abc).set_mode("test")
seq_proj = [int(x) for x in seq_proj.split('x')]
net = load_model(lexicon=train_data.get_lexicon(), seq_proj=seq_proj, backend=backend,
snapshot=snapshot, cuda=cuda, do_beam_search=do_beam_search,
dropout_conv=dropout_conv,
dropout_rnn=dropout_rnn,
dropout_output=dropout_output,
do_ema=do_ema,
ada_after_rnn=ada_after_rnn, ada_before_rnn=ada_before_rnn,
rnn_hidden_size=rnn_hidden_size
)
optimizer = optim.Adam(net.parameters(), lr = base_lr, weight_decay=0.0001)
if do_ada_lr:
print('using ada lr')
lr_scheduler = DannLR(optimizer, max_iter=max_iter)
elif do_lr_step:
print('using step lr')
lr_scheduler = StepLR(optimizer, step_size=step_size, max_iter=max_iter)
loss_function = CTCLoss()
synth_avg_ed_best = float("inf")
orig_avg_ed_best = float("inf")
epoch_count = 0
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
collate_vat = lambda x: text_collate(x, do_mask=True)
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
if ada_after_rnn or ada_before_rnn:
collate_ada = lambda x: text_collate(x, do_mask=True)
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
loss_domain = torch.nn.NLLLoss()
while True:
collate = lambda x: text_collate(x, do_mask=(do_vat or ada_before_rnn or ada_after_rnn))
data_loader = DataLoader(train_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate)
loss_mean_ctc = []
loss_mean_vat = []
loss_mean_at = []
loss_mean_comp = []
loss_mean_total = []
loss_mean_test_vat = []
loss_mean_test_pseudo = []
loss_mean_test_rand = []
loss_mean_ada_rnn_s = []
loss_mean_ada_rnn_t = []
loss_mean_ada_cnn_s = []
loss_mean_ada_cnn_t = []
iterator = tqdm(data_loader)
iter_count = 0
for iter_num, sample in enumerate(iterator):
total_iter = (epoch_count * len(data_loader)) + iter_num
if ((total_iter > 1) and total_iter % test_iter == 0) or (test_init and total_iter == 0):
# epoch_count != 0 and
print("Test phase")
net = net.eval()
if do_ema:
net.start_test()
synth_acc, synth_avg_ed, synth_avg_no_stop_ed, synth_avg_loss = test(net, synth_eval_data,
synth_eval_data.get_lexicon(),
cuda, visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer,
n_iter=total_iter,
initial_title='val_synth',
loss_function=loss_function,
output_path=os.path.join(
output_dir, 'results'),
do_beam_search=False)
orig_acc, orig_avg_ed, orig_avg_no_stop_ed, orig_avg_loss = test(net, orig_eval_data,
orig_eval_data.get_lexicon(), cuda,
visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer, n_iter=total_iter,
initial_title='test_orig',
loss_function=loss_function,
output_path=os.path.join(output_dir,
'results'),
do_beam_search=do_beam_search)
net = net.train()
#save periodic
if output_dir is not None and total_iter // 30000:
periodic_save = os.path.join(output_dir, 'periodic_save')
os.makedirs(periodic_save, exist_ok=True)
old_save = glob.glob(os.path.join(periodic_save,'*'))
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_" + str(total_iter)))
if orig_avg_no_stop_ed < orig_avg_ed_best:
orig_avg_ed_best = orig_avg_no_stop_ed
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_best"))
if synth_avg_no_stop_ed < synth_avg_ed_best:
synth_avg_ed_best = synth_avg_no_stop_ed
if do_ema:
net.end_test()
print("synth: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(synth_avg_ed_best,
synth_avg_ed,
synth_avg_no_stop_ed,
synth_acc))
print("orig: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(orig_avg_ed_best,
orig_avg_ed,
orig_avg_no_stop_ed,
orig_acc))
tb_writer.get_writer().add_scalars('data/test',
{'synth_ed_total': synth_avg_ed,
'synth_ed_no_stop': synth_avg_no_stop_ed,
'synth_avg_loss': synth_avg_loss,
'orig_ed_total': orig_avg_ed,
'orig_ed_no_stop': orig_avg_no_stop_ed,
'orig_avg_loss': orig_avg_loss
}, total_iter)
if len(loss_mean_ctc) > 0:
train_dict = {'mean_ctc_loss': np.mean(loss_mean_ctc)}
if do_vat:
train_dict = {**train_dict, **{'mean_vat_loss':np.mean(loss_mean_vat)}}
if do_at:
train_dict = {**train_dict, **{'mean_at_loss':np.mean(loss_mean_at)}}
if do_test_vat:
train_dict = {**train_dict, **{'mean_test_vat_loss': np.mean(loss_mean_test_vat)}}
if do_test_vat_rnn and do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_crnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_rnn:
train_dict = {**train_dict, **{'mean_test_vat_rnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_cnn_loss': np.mean(loss_mean_test_vat)}}
if ada_after_rnn:
train_dict = {**train_dict,
**{'mean_ada_rnn_s_loss': np.mean(loss_mean_ada_rnn_s),
'mean_ada_rnn_t_loss': np.mean(loss_mean_ada_rnn_t)}}
if ada_before_rnn:
train_dict = {**train_dict,
**{'mean_ada_cnn_s_loss': np.mean(loss_mean_ada_cnn_s),
'mean_ada_cnn_t_loss': np.mean(loss_mean_ada_cnn_t)}}
print(train_dict)
tb_writer.get_writer().add_scalars('data/train',
train_dict,
total_iter)
'''
# for multi-gpu support
if sample["img"].size(0) % len(gpu.split(',')) != 0:
continue
'''
optimizer.zero_grad()
imgs = Variable(sample["img"])
#print("images sizes are:")
#print(sample["img"].shape)
if do_vat or ada_after_rnn or ada_before_rnn:
mask = sample['mask']
labels_flatten = Variable(sample["seq"]).view(-1)
label_lens = Variable(sample["seq_len"].int())
#print("image sequence length is:")
#print(sample["im_seq_len"])
#print("label sequence length is:")
#print(sample["seq_len"].view(1,-1))
img_seq_lens = sample["im_seq_len"]
if cuda:
imgs = imgs.cuda()
if do_vat or ada_after_rnn or ada_before_rnn:
mask = mask.cuda()
if do_ada_lr:
ada_p = float(iter_count) / max_iter
lr_scheduler.update(ada_p)
if ada_before_rnn or ada_after_rnn:
if not do_ada_lr:
ada_p = float(iter_count) / max_iter
ada_alpha = 2. / (1. + np.exp(-10. * ada_p)) - 1
if cur_ada >= ada_len:
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
ada_batch = next(ada_iter)
cur_ada += 1
ada_imgs = Variable(ada_batch["img"])
ada_img_seq_lens = ada_batch["im_seq_len"]
ada_mask = ada_batch['mask'].byte()
if cuda:
ada_imgs = ada_imgs.cuda()
_, ada_cnn, ada_rnn = net(ada_imgs, ada_img_seq_lens,
ada_alpha=ada_alpha, mask=ada_mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.zeros(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_t = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_t = loss_domain(ada_rnn, domain_label)
if do_test_vat and do_at:
# test part!
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
test_vat_batch = next(vat_iter)
cur_vat += 1
test_vat_mask = test_vat_batch['mask']
test_vat_imgs = Variable(test_vat_batch["img"])
test_vat_img_seq_lens = test_vat_batch["im_seq_len"]
if cuda:
test_vat_imgs = test_vat_imgs.cuda()
test_vat_mask = test_vat_mask.cuda()
# train part
at_test_vat_loss = LabeledAtAndUnlabeledTestVatLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss, test_vat_loss = at_test_vat_loss(model=net, train_x=imgs, train_labels_flatten=labels_flatten,
train_img_seq_lens=img_seq_lens, train_label_lens=label_lens, batch_size=batch_size,
test_x=test_vat_imgs, test_seq_len=test_vat_img_seq_lens, test_mask=test_vat_mask)
elif do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
vat_batch = next(vat_iter)
cur_vat += 1
vat_mask = vat_batch['mask']
vat_imgs = Variable(vat_batch["img"])
vat_img_seq_lens = vat_batch["im_seq_len"]
if cuda:
vat_imgs = vat_imgs.cuda()
vat_mask = vat_mask.cuda()
if do_test_vat:
if do_test_vat_rnn or do_test_vat_cnn:
raise "can only do one of do_test_vat | (do_test_vat_rnn, do_test_vat_cnn)"
if vat_sign == True:
test_vat_loss = VATLossSign(do_test_entropy=do_test_entropy, xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
else:
test_vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn and do_test_vat_cnn:
test_vat_loss = VATonRnnCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn:
test_vat_loss = VATonRnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_cnn:
test_vat_loss = VATonCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
if do_test_vat_cnn and do_test_vat_rnn:
test_vat_loss, cnn_lds, rnn_lds = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_test_vat:
test_vat_loss = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_vat:
vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
vat_loss = vat_loss(net, imgs, img_seq_lens, mask)
elif do_at:
at_loss = LabeledATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss = at_loss(net, imgs, labels_flatten, img_seq_lens, label_lens, batch_size)
if ada_after_rnn or ada_before_rnn:
preds, ada_cnn, ada_rnn = net(imgs, img_seq_lens, ada_alpha=ada_alpha, mask=mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.ones(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_s = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_s = loss_domain(ada_rnn, domain_label)
else:
preds = net(imgs, img_seq_lens)
'''
if output_dir is not None:
if (show_iter is not None and iter_num != 0 and iter_num % show_iter == 0):
print_data_visuals(net, tb_writer, train_data.get_lexicon(), sample["img"], labels_flatten, label_lens,
preds, ((epoch_count * len(data_loader)) + iter_num))
'''
loss_ctc = loss_function(preds, labels_flatten,
Variable(torch.IntTensor(np.array(img_seq_lens))), label_lens) / batch_size
if loss_ctc.data[0] in [float("inf"), -float("inf")]:
print("warnning: loss should not be inf.")
continue
total_loss = loss_ctc
if do_vat:
#mask = sample['mask']
#if cuda:
# mask = mask.cuda()
#vat_loss = virtual_adversarial_loss(net, imgs, img_seq_lens, mask, is_training=True, do_entropy=False, epsilon=vat_epsilon, num_power_iterations=1,
# xi=1e-6, average_loss=True)
total_loss = total_loss + vat_ratio * vat_loss.cpu()
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
total_loss = total_loss + test_vat_ratio * test_vat_loss.cpu()
if ada_before_rnn:
total_loss = total_loss + ada_ratio * err_ada_cnn_s.cpu() + ada_ratio * err_ada_cnn_t.cpu()
if ada_after_rnn:
total_loss = total_loss + ada_ratio * err_ada_rnn_s.cpu() + ada_ratio * err_ada_rnn_t.cpu()
total_loss.backward()
nn.utils.clip_grad_norm(net.parameters(), 10.0)
if -400 < loss_ctc.data[0] < 400:
loss_mean_ctc.append(loss_ctc.data[0])
if -1000 < total_loss.data[0] < 1000:
loss_mean_total.append(total_loss.data[0])
if len(loss_mean_total) > 100:
loss_mean_total = loss_mean_total[-100:]
status = "epoch: {0:5d}; iter_num: {1:5d}; lr: {2:.2E}; loss_mean: {3:.3f}; loss: {4:.3f}".format(epoch_count,
lr_scheduler.last_iter,
lr_scheduler.get_lr(),
np.mean(loss_mean_total),
loss_ctc.data[0])
if ada_after_rnn:
loss_mean_ada_rnn_s.append(err_ada_rnn_s.data[0])
loss_mean_ada_rnn_t.append(err_ada_rnn_t.data[0])
status += "; ladatrnns: {0:.3f}; ladatrnnt: {1:.3f}".format(
err_ada_rnn_s.data[0], err_ada_rnn_t.data[0]
)
if ada_before_rnn:
loss_mean_ada_cnn_s.append(err_ada_cnn_s.data[0])
loss_mean_ada_cnn_t.append(err_ada_cnn_t.data[0])
status += "; ladatcnns: {0:.3f}; ladatcnnt: {1:.3f}".format(
err_ada_cnn_s.data[0], err_ada_cnn_t.data[0]
)
if do_vat:
loss_mean_vat.append(vat_loss.data[0])
status += "; lvat: {0:.3f}".format(
vat_loss.data[0]
)
if do_at:
loss_mean_at.append(at_loss.data[0])
status += "; lat: {0:.3f}".format(
at_loss.data[0]
)
if do_test_vat:
loss_mean_test_vat.append(test_vat_loss.data[0])
status += "; l_tvat: {0:.3f}".format(
test_vat_loss.data[0]
)
if do_test_vat_rnn or do_test_vat_cnn:
loss_mean_test_vat.append(test_vat_loss.data[0])
if do_test_vat_rnn and do_test_vat_cnn:
status += "; l_tvatc: {}".format(
cnn_lds.data[0]
)
status += "; l_tvatr: {}".format(
rnn_lds.data[0]
)
else:
status += "; l_tvat: {}".format(
test_vat_loss.data[0]
)
iterator.set_description(status)
optimizer.step()
if do_lr_step:
lr_scheduler.step()
if do_ema:
net.udate_ema()
iter_count += 1
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_last"))
epoch_count += 1
return
if __name__ == '__main__':
main()
| 55.190031
| 166
| 0.561526
|
import os
import click
import numpy as np
from tqdm import tqdm
from models.model_loader import load_model
from torchvision.transforms import Compose
from dataset.data_transform import Resize, Rotation, ElasticAndSine, ColorGradGausNoise, AddWidth, Normalize, ToGray, OnlyElastic, OnlySine, ColorGrad, ColorGausNoise
from dataset.text_data import TextDataset, TextDatasetRandomFont
from dataset.collate_fn import text_collate
from utils.data_visualization import TbSummary
from lr_policy import StepLR, DannLR
import pickle as pkl
import glob
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from warpctc_pytorch import CTCLoss
from test import test
from models.new_vat import VATLoss, VATLossSign, LabeledATLoss, LabeledAtAndUnlabeledTestVatLoss, VATonRnnSign, VATonRnnCnnSign, VATonCnnSign
from dataset.dataset_metadata import SynthDataInfo
@click.command()
@click.option('--base-data-dir', type=str,
default=os.path.expandvars ('../Data/'),
help='Path to base data directory (all other data paths are relative to this one).')
@click.option('--train-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_train.txt'),
help='Path to training dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--train-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing training images (relative to base-data-dir)')
@click.option('--orig-eval-data-path', type=str,
default=os.path.expandvars(
'Test/Prepared/im2line.txt'),
help='Path to original test dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--orig-eval-base-dir', type=str,
default=os.path.expandvars(
'Test/Prepared/LineImages'),
help='Path to directory containing original test images (relative to base-data-dir)')
@click.option('--synth-eval-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_val.txt'),
help='Path to synthetic evaluation dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--synth-eval-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing synthetic evaluation images (relative to base-data-dir)')
@click.option('--lexicon-path', type=str,
default=os.path.expandvars('char_to_class.pkl'),
help='Path to alphabet lexicon (letter to id), relative to base-data-dir.')
@click.option('--seq-proj', type=str, default="10x20", help='Projection of sequence')
@click.option('--backend', type=str, default="resnet18", help='Backend network to use (default is resnet18)')
@click.option('--snapshot', type=str, default=None, help='Path to pre-trained weights')
@click.option('--input-height', type=int, default=64, help='Height of input images to network')
@click.option('--base-lr', type=float, default=1e-4, help='Base learning rate.')
on('--elastic-alpha', type=float, default=34, help='Elastic augmentation parameter alpha.')
@click.option('--elastic-sigma', type=float, default=3, help='Elastic augmentation parameter sigma.')
@click.option('--step-size', type=int, default=500, help='Step size for step lr change.')
@click.option('--max-iter', type=int, default=6000, help='Max iterations for taining')
@click.option('--batch-size', type=int, default=8, help='Batch size for training')
@click.option('--output-dir', type=str,
default='../Output/exp1',
help='Path to save output snapshot')
@click.option('--test-iter', type=int, default=1000, help='Number of iterations between test evaluation.')
@click.option('--show-iter', type=int, default=1000, help='Number of iterations between showing images in tensorboard.')
@click.option('--test-init', type=bool, default=False, help='Wether to test after network initialization initialization')
@click.option('--use-gpu', type=bool, default=True, help='Whether to use the gpu')
@click.option('--use-no-font-repeat-data', type=bool, default=True, help='Parameter to remove (always true) - whether to use random training data.')
@click.option('--do-vat', type=bool, default=False, help='Whether to do VAT on synthetic trainig data')
@click.option('--do-at', type=bool, default=False, help='Whether to do AT on synthetic trainig data')
@click.option('--vat-ratio', type=float, default=1, help='Ratio of vat on train data loss vs base loss')
@click.option('--test-vat-ratio', type=float, default=1, help='Ratio on vat on test data loss vs base loss')
@click.option('--vat-epsilon', type=float, default=2.5, help='VAT on train hyperparameter - epsilon')
@click.option('--vat-ip', type=int, default=1, help='VAT on train hyperparameter - number of power iterations')
@click.option('--vat-xi', type=float, default=10., help='VAT on train hyperparameter - xi')
@click.option('--vat-sign', type=bool, default=False, help='VAT on train hyperparameter - whether to do sign on vat loss')
@click.option('--do-remove-augs', type=bool, default=False, help='Whether to remove some of the augmentations (for ablation study)')
@click.option('--aug-to-remove', type=str,
default='',
help="with augmentation to remover out of ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']")
@click.option('--do-beam-search', type=bool, default=False, help='whether to do beam search inference in evaluation')
@click.option('--dropout-conv', type=bool, default=False, help='Whether to do dropout between convolution and rnn.')
@click.option('--dropout-rnn', type=bool, default=False, help='Whether to do dropout in rnn.')
@click.option('--dropout-output', type=bool, default=False, help='Whether to do dropout after rnn.')
@click.option('--do-ema', type=bool, default=False, help='Whether to do exponential moving average on weights')
@click.option('--do-gray', type=bool, default=False, help='whether to use grayscale instread of rgb')
@click.option('--do-test-vat', type=bool, default=False, help='Whether to do VAT loss on original test data')
@click.option('--do-test-entropy', type=bool, default=False, help='Whether to do entropy loss on original test data')
@click.option('--do-test-vat-cnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for cnn part')
@click.option('--do-test-vat-rnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for rnn part')
@click.option('--ada-after-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on rnn part')
@click.option('--ada-before-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on cnn part')
@click.option('--do-ada-lr', type=bool, default=False, help='Whether to do lr rule suitable of adversarial domain adaptaion (from article)')
@click.option('--ada-ratio', type=float, default=1, help='Ratio of ADA loss vs base loss')
@click.option('--rnn-hidden-size', type=int, default=128, help='Size of rnn hidden layer')
@click.option('--do-lr-step', type=bool, default=False, help='Visualize output')
@click.option('--dataset-name', type=str, default='tibetan', help='Dataset name, currently wiener or tibetan')
def main(base_data_dir, train_data_path, train_base_dir,
orig_eval_data_path, orig_eval_base_dir,
synth_eval_data_path, synth_eval_base_dir,
lexicon_path, seq_proj, backend, snapshot, input_height, base_lr, elastic_alpha, elastic_sigma,
step_size, max_iter,
batch_size, output_dir, test_iter, show_iter, test_init, use_gpu, use_no_font_repeat_data,
do_vat, do_at, vat_ratio, test_vat_ratio, vat_epsilon, vat_ip, vat_xi, vat_sign,
do_remove_augs, aug_to_remove, do_beam_search,
dropout_conv, dropout_rnn, dropout_output, do_ema, do_gray, do_test_vat, do_test_entropy, do_test_vat_cnn,
do_test_vat_rnn,
ada_after_rnn, ada_before_rnn, do_ada_lr, ada_ratio, rnn_hidden_size,
do_lr_step,
dataset_name
):
if not do_lr_step and not do_ada_lr:
raise NotImplementedError('learning rate should be either step or ada.')
train_data_path = os.path.join(base_data_dir, train_data_path)
train_base_dir = os.path.join(base_data_dir, train_base_dir)
synth_eval_data_path = os.path.join(base_data_dir, synth_eval_data_path)
synth_eval_base_dir = os.path.join(base_data_dir, synth_eval_base_dir)
orig_eval_data_path = os.path.join(base_data_dir, orig_eval_data_path)
orig_eval_base_dir = os.path.join(base_data_dir, orig_eval_base_dir)
lexicon_path = os.path.join(base_data_dir, lexicon_path)
all_parameters = locals()
cuda = use_gpu
if output_dir is not None:
os.makedirs(output_dir, exist_ok=True)
tb_writer = TbSummary(output_dir)
output_dir = os.path.join(output_dir, 'model')
os.makedirs(output_dir, exist_ok=True)
with open(lexicon_path, 'rb') as f:
lexicon = pkl.load(f)
with open(os.path.join(output_dir, 'params.txt'),'w') as f:
f.writelines(str(all_parameters))
print(all_parameters)
print('new vat')
sin_magnitude = 4
rotate_max_angle = 2
dataset_info = SynthDataInfo(None, None, None, dataset_name.lower())
train_fonts = dataset_info.font_names
all_args = locals()
allowed_removals = ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']
if do_remove_augs and aug_to_remove not in allowed_removals:
raise Exception('augmentation removal value is not allowed.')
if do_remove_augs:
rand_trans = []
if aug_to_remove == 'elastic':
print('doing sine transform :)')
rand_trans.append(OnlySine(sin_magnitude=sin_magnitude))
elif aug_to_remove in ['sine', 'sine_rotate']:
print('doing elastic transform :)')
rand_trans.append(OnlyElastic(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma))
if aug_to_remove not in ['elastic', 'sine', 'sine_rotate']:
print('doing elastic transform :)')
print('doing sine transform :)')
rand_trans.append(ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude))
if aug_to_remove not in ['rotation', 'sine_rotate']:
print('doing rotation transform :)')
rand_trans.append(Rotation(angle=rotate_max_angle, fill_value=255))
if aug_to_remove not in ['color_aug', 'color_gaus', 'color_sine']:
print('doing color_aug transform :)')
rand_trans.append(ColorGradGausNoise())
elif aug_to_remove == 'color_gaus':
print('doing color_sine transform :)')
rand_trans.append(ColorGrad())
elif aug_to_remove == 'color_sine':
print('doing color_gaus transform :)')
rand_trans.append(ColorGausNoise())
else:
print('doing all transforms :)')
rand_trans = [
ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude),
Rotation(angle=rotate_max_angle, fill_value=255),
ColorGradGausNoise()]
if do_gray:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()]
else:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
Normalize()]
transform_random = Compose(rand_trans)
if do_gray:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()
])
else:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
Normalize()
])
if use_no_font_repeat_data:
print('creating dataset')
train_data = TextDatasetRandomFont(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
print('finished creating dataset')
else:
print('train data path:\n{}'.format(train_data_path))
print('train_base_dir:\n{}'.format(train_base_dir))
train_data = TextDataset(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
synth_eval_data = TextDataset(data_path=synth_eval_data_path, lexicon=lexicon,
base_path=synth_eval_base_dir, transform=transform_random, fonts=train_fonts)
orig_eval_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
orig_vat_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if ada_after_rnn or ada_before_rnn:
orig_ada_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
seq_proj = [int(x) for x in seq_proj.split('x')]
net = load_model(lexicon=train_data.get_lexicon(), seq_proj=seq_proj, backend=backend,
snapshot=snapshot, cuda=cuda, do_beam_search=do_beam_search,
dropout_conv=dropout_conv,
dropout_rnn=dropout_rnn,
dropout_output=dropout_output,
do_ema=do_ema,
ada_after_rnn=ada_after_rnn, ada_before_rnn=ada_before_rnn,
rnn_hidden_size=rnn_hidden_size
)
optimizer = optim.Adam(net.parameters(), lr = base_lr, weight_decay=0.0001)
if do_ada_lr:
print('using ada lr')
lr_scheduler = DannLR(optimizer, max_iter=max_iter)
elif do_lr_step:
print('using step lr')
lr_scheduler = StepLR(optimizer, step_size=step_size, max_iter=max_iter)
loss_function = CTCLoss()
synth_avg_ed_best = float("inf")
orig_avg_ed_best = float("inf")
epoch_count = 0
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
collate_vat = lambda x: text_collate(x, do_mask=True)
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
if ada_after_rnn or ada_before_rnn:
collate_ada = lambda x: text_collate(x, do_mask=True)
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
loss_domain = torch.nn.NLLLoss()
while True:
collate = lambda x: text_collate(x, do_mask=(do_vat or ada_before_rnn or ada_after_rnn))
data_loader = DataLoader(train_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate)
loss_mean_ctc = []
loss_mean_vat = []
loss_mean_at = []
loss_mean_comp = []
loss_mean_total = []
loss_mean_test_vat = []
loss_mean_test_pseudo = []
loss_mean_test_rand = []
loss_mean_ada_rnn_s = []
loss_mean_ada_rnn_t = []
loss_mean_ada_cnn_s = []
loss_mean_ada_cnn_t = []
iterator = tqdm(data_loader)
iter_count = 0
for iter_num, sample in enumerate(iterator):
total_iter = (epoch_count * len(data_loader)) + iter_num
if ((total_iter > 1) and total_iter % test_iter == 0) or (test_init and total_iter == 0):
print("Test phase")
net = net.eval()
if do_ema:
net.start_test()
synth_acc, synth_avg_ed, synth_avg_no_stop_ed, synth_avg_loss = test(net, synth_eval_data,
synth_eval_data.get_lexicon(),
cuda, visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer,
n_iter=total_iter,
initial_title='val_synth',
loss_function=loss_function,
output_path=os.path.join(
output_dir, 'results'),
do_beam_search=False)
orig_acc, orig_avg_ed, orig_avg_no_stop_ed, orig_avg_loss = test(net, orig_eval_data,
orig_eval_data.get_lexicon(), cuda,
visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer, n_iter=total_iter,
initial_title='test_orig',
loss_function=loss_function,
output_path=os.path.join(output_dir,
'results'),
do_beam_search=do_beam_search)
net = net.train()
if output_dir is not None and total_iter // 30000:
periodic_save = os.path.join(output_dir, 'periodic_save')
os.makedirs(periodic_save, exist_ok=True)
old_save = glob.glob(os.path.join(periodic_save,'*'))
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_" + str(total_iter)))
if orig_avg_no_stop_ed < orig_avg_ed_best:
orig_avg_ed_best = orig_avg_no_stop_ed
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_best"))
if synth_avg_no_stop_ed < synth_avg_ed_best:
synth_avg_ed_best = synth_avg_no_stop_ed
if do_ema:
net.end_test()
print("synth: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(synth_avg_ed_best,
synth_avg_ed,
synth_avg_no_stop_ed,
synth_acc))
print("orig: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(orig_avg_ed_best,
orig_avg_ed,
orig_avg_no_stop_ed,
orig_acc))
tb_writer.get_writer().add_scalars('data/test',
{'synth_ed_total': synth_avg_ed,
'synth_ed_no_stop': synth_avg_no_stop_ed,
'synth_avg_loss': synth_avg_loss,
'orig_ed_total': orig_avg_ed,
'orig_ed_no_stop': orig_avg_no_stop_ed,
'orig_avg_loss': orig_avg_loss
}, total_iter)
if len(loss_mean_ctc) > 0:
train_dict = {'mean_ctc_loss': np.mean(loss_mean_ctc)}
if do_vat:
train_dict = {**train_dict, **{'mean_vat_loss':np.mean(loss_mean_vat)}}
if do_at:
train_dict = {**train_dict, **{'mean_at_loss':np.mean(loss_mean_at)}}
if do_test_vat:
train_dict = {**train_dict, **{'mean_test_vat_loss': np.mean(loss_mean_test_vat)}}
if do_test_vat_rnn and do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_crnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_rnn:
train_dict = {**train_dict, **{'mean_test_vat_rnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_cnn_loss': np.mean(loss_mean_test_vat)}}
if ada_after_rnn:
train_dict = {**train_dict,
**{'mean_ada_rnn_s_loss': np.mean(loss_mean_ada_rnn_s),
'mean_ada_rnn_t_loss': np.mean(loss_mean_ada_rnn_t)}}
if ada_before_rnn:
train_dict = {**train_dict,
**{'mean_ada_cnn_s_loss': np.mean(loss_mean_ada_cnn_s),
'mean_ada_cnn_t_loss': np.mean(loss_mean_ada_cnn_t)}}
print(train_dict)
tb_writer.get_writer().add_scalars('data/train',
train_dict,
total_iter)
optimizer.zero_grad()
imgs = Variable(sample["img"])
if do_vat or ada_after_rnn or ada_before_rnn:
mask = sample['mask']
labels_flatten = Variable(sample["seq"]).view(-1)
label_lens = Variable(sample["seq_len"].int())
img_seq_lens = sample["im_seq_len"]
if cuda:
imgs = imgs.cuda()
if do_vat or ada_after_rnn or ada_before_rnn:
mask = mask.cuda()
if do_ada_lr:
ada_p = float(iter_count) / max_iter
lr_scheduler.update(ada_p)
if ada_before_rnn or ada_after_rnn:
if not do_ada_lr:
ada_p = float(iter_count) / max_iter
ada_alpha = 2. / (1. + np.exp(-10. * ada_p)) - 1
if cur_ada >= ada_len:
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
ada_batch = next(ada_iter)
cur_ada += 1
ada_imgs = Variable(ada_batch["img"])
ada_img_seq_lens = ada_batch["im_seq_len"]
ada_mask = ada_batch['mask'].byte()
if cuda:
ada_imgs = ada_imgs.cuda()
_, ada_cnn, ada_rnn = net(ada_imgs, ada_img_seq_lens,
ada_alpha=ada_alpha, mask=ada_mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.zeros(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_t = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_t = loss_domain(ada_rnn, domain_label)
if do_test_vat and do_at:
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
test_vat_batch = next(vat_iter)
cur_vat += 1
test_vat_mask = test_vat_batch['mask']
test_vat_imgs = Variable(test_vat_batch["img"])
test_vat_img_seq_lens = test_vat_batch["im_seq_len"]
if cuda:
test_vat_imgs = test_vat_imgs.cuda()
test_vat_mask = test_vat_mask.cuda()
at_test_vat_loss = LabeledAtAndUnlabeledTestVatLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss, test_vat_loss = at_test_vat_loss(model=net, train_x=imgs, train_labels_flatten=labels_flatten,
train_img_seq_lens=img_seq_lens, train_label_lens=label_lens, batch_size=batch_size,
test_x=test_vat_imgs, test_seq_len=test_vat_img_seq_lens, test_mask=test_vat_mask)
elif do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
vat_batch = next(vat_iter)
cur_vat += 1
vat_mask = vat_batch['mask']
vat_imgs = Variable(vat_batch["img"])
vat_img_seq_lens = vat_batch["im_seq_len"]
if cuda:
vat_imgs = vat_imgs.cuda()
vat_mask = vat_mask.cuda()
if do_test_vat:
if do_test_vat_rnn or do_test_vat_cnn:
raise "can only do one of do_test_vat | (do_test_vat_rnn, do_test_vat_cnn)"
if vat_sign == True:
test_vat_loss = VATLossSign(do_test_entropy=do_test_entropy, xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
else:
test_vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn and do_test_vat_cnn:
test_vat_loss = VATonRnnCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn:
test_vat_loss = VATonRnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_cnn:
test_vat_loss = VATonCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
if do_test_vat_cnn and do_test_vat_rnn:
test_vat_loss, cnn_lds, rnn_lds = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_test_vat:
test_vat_loss = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_vat:
vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
vat_loss = vat_loss(net, imgs, img_seq_lens, mask)
elif do_at:
at_loss = LabeledATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss = at_loss(net, imgs, labels_flatten, img_seq_lens, label_lens, batch_size)
if ada_after_rnn or ada_before_rnn:
preds, ada_cnn, ada_rnn = net(imgs, img_seq_lens, ada_alpha=ada_alpha, mask=mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.ones(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_s = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_s = loss_domain(ada_rnn, domain_label)
else:
preds = net(imgs, img_seq_lens)
loss_ctc = loss_function(preds, labels_flatten,
Variable(torch.IntTensor(np.array(img_seq_lens))), label_lens) / batch_size
if loss_ctc.data[0] in [float("inf"), -float("inf")]:
print("warnning: loss should not be inf.")
continue
total_loss = loss_ctc
if do_vat:
total_loss = total_loss + vat_ratio * vat_loss.cpu()
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
total_loss = total_loss + test_vat_ratio * test_vat_loss.cpu()
if ada_before_rnn:
total_loss = total_loss + ada_ratio * err_ada_cnn_s.cpu() + ada_ratio * err_ada_cnn_t.cpu()
if ada_after_rnn:
total_loss = total_loss + ada_ratio * err_ada_rnn_s.cpu() + ada_ratio * err_ada_rnn_t.cpu()
total_loss.backward()
nn.utils.clip_grad_norm(net.parameters(), 10.0)
if -400 < loss_ctc.data[0] < 400:
loss_mean_ctc.append(loss_ctc.data[0])
if -1000 < total_loss.data[0] < 1000:
loss_mean_total.append(total_loss.data[0])
if len(loss_mean_total) > 100:
loss_mean_total = loss_mean_total[-100:]
status = "epoch: {0:5d}; iter_num: {1:5d}; lr: {2:.2E}; loss_mean: {3:.3f}; loss: {4:.3f}".format(epoch_count,
lr_scheduler.last_iter,
lr_scheduler.get_lr(),
np.mean(loss_mean_total),
loss_ctc.data[0])
if ada_after_rnn:
loss_mean_ada_rnn_s.append(err_ada_rnn_s.data[0])
loss_mean_ada_rnn_t.append(err_ada_rnn_t.data[0])
status += "; ladatrnns: {0:.3f}; ladatrnnt: {1:.3f}".format(
err_ada_rnn_s.data[0], err_ada_rnn_t.data[0]
)
if ada_before_rnn:
loss_mean_ada_cnn_s.append(err_ada_cnn_s.data[0])
loss_mean_ada_cnn_t.append(err_ada_cnn_t.data[0])
status += "; ladatcnns: {0:.3f}; ladatcnnt: {1:.3f}".format(
err_ada_cnn_s.data[0], err_ada_cnn_t.data[0]
)
if do_vat:
loss_mean_vat.append(vat_loss.data[0])
status += "; lvat: {0:.3f}".format(
vat_loss.data[0]
)
if do_at:
loss_mean_at.append(at_loss.data[0])
status += "; lat: {0:.3f}".format(
at_loss.data[0]
)
if do_test_vat:
loss_mean_test_vat.append(test_vat_loss.data[0])
status += "; l_tvat: {0:.3f}".format(
test_vat_loss.data[0]
)
if do_test_vat_rnn or do_test_vat_cnn:
loss_mean_test_vat.append(test_vat_loss.data[0])
if do_test_vat_rnn and do_test_vat_cnn:
status += "; l_tvatc: {}".format(
cnn_lds.data[0]
)
status += "; l_tvatr: {}".format(
rnn_lds.data[0]
)
else:
status += "; l_tvat: {}".format(
test_vat_loss.data[0]
)
iterator.set_description(status)
optimizer.step()
if do_lr_step:
lr_scheduler.step()
if do_ema:
net.udate_ema()
iter_count += 1
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_last"))
epoch_count += 1
return
if __name__ == '__main__':
main()
| true
| true
|
7907cff8bda5b4bf85fd1cfe56569ea160b3bb19
| 1,565
|
py
|
Python
|
python/friesian/example/wnd/csv_to_parquet.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | 3
|
2021-07-14T01:28:47.000Z
|
2022-03-02T01:16:32.000Z
|
python/friesian/example/wnd/csv_to_parquet.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/friesian/example/wnd/csv_to_parquet.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from argparse import ArgumentParser
from pyspark.sql import SparkSession
from pyspark.sql.types import *
LABEL_COL = 0
INT_COLS = list(range(1, 14))
CAT_COLS = list(range(14, 40))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--input', type=str, required=True, help="The path to the csv file to be processed.")
parser.add_argument('--output', type=str, default=".", help="The path to the folder to save the parquet data.")
args = parser.parse_args()
spark = SparkSession.builder.getOrCreate()
input = args.input
output = args.output
label_fields = [StructField('_c%d' % LABEL_COL, IntegerType())]
int_fields = [StructField('_c%d' % i, IntegerType()) for i in INT_COLS]
str_fields = [StructField('_c%d' % i, StringType()) for i in CAT_COLS]
schema = StructType(label_fields + int_fields + str_fields)
df = spark.read.schema(schema).option('sep', '\t').csv(input)
df.write.parquet(output, mode="overwrite")
| 36.395349
| 114
| 0.723323
|
from argparse import ArgumentParser
from pyspark.sql import SparkSession
from pyspark.sql.types import *
LABEL_COL = 0
INT_COLS = list(range(1, 14))
CAT_COLS = list(range(14, 40))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--input', type=str, required=True, help="The path to the csv file to be processed.")
parser.add_argument('--output', type=str, default=".", help="The path to the folder to save the parquet data.")
args = parser.parse_args()
spark = SparkSession.builder.getOrCreate()
input = args.input
output = args.output
label_fields = [StructField('_c%d' % LABEL_COL, IntegerType())]
int_fields = [StructField('_c%d' % i, IntegerType()) for i in INT_COLS]
str_fields = [StructField('_c%d' % i, StringType()) for i in CAT_COLS]
schema = StructType(label_fields + int_fields + str_fields)
df = spark.read.schema(schema).option('sep', '\t').csv(input)
df.write.parquet(output, mode="overwrite")
| true
| true
|
7907d0c90f7aa7e834df6c7dc70100df4279026e
| 2,986
|
py
|
Python
|
NBABet/Telegram.py
|
davideganna/NBA_Bet
|
dba00542b8ed63a5a7290f25209270b32d18fb86
|
[
"MIT"
] | 4
|
2021-08-02T07:49:51.000Z
|
2021-12-14T18:49:27.000Z
|
NBABet/Telegram.py
|
davideganna/NBA_Bet
|
dba00542b8ed63a5a7290f25209270b32d18fb86
|
[
"MIT"
] | 1
|
2021-08-03T14:55:13.000Z
|
2021-08-03T14:55:13.000Z
|
NBABet/Telegram.py
|
davideganna/NBA_Bet
|
dba00542b8ed63a5a7290f25209270b32d18fb86
|
[
"MIT"
] | null | null | null |
# --------------------- Telegram.py --------------------------------- #
# Allows the integration with Telegram Bot.
# ------------------------------------------------------------------- #
from numpy.core.fromnumeric import around, std
import requests
import Elo
from Models import Models
import Helper
import pandas as pd
import numpy as np
class TelegramBot():
"""
Allows integration with the Telegram Bot.
"""
def __init__(self):
self.url = 'https://api.telegram.org/'
with open('secrets/telegram_secrets') as f:
lines = f.readlines()
self.bot_token = lines[0].strip()
self.chat_id = lines[1].strip()
def send_message(self, d:dict):
df = pd.read_csv('past_data/2021_2022/split_stats_per_game.csv')
df = Helper.add_features_to_df(df)
n = 3
train_df = pd.read_csv('past_data/average_seasons/average_NSeasons_prod.csv')
# Standardize the DataFrame
std_df, scaler = Helper.standardize_DataFrame(train_df)
clf = Models.build_RF_classifier(std_df)
text = "🏀 Tonight's Games: Home vs. Away 🏀\n\n"
for home, away in d.items():
last_N_games_away = df.loc[df['Team_away'] == away].tail(n)
last_N_games_home = df.loc[df['Team_home'] == home].tail(n)
to_predict = pd.concat(
[
last_N_games_away[Models.away_features].mean(),
last_N_games_home[Models.home_features].mean()
],
axis=0)[Models.features]
prob_home_rf, prob_away_rf = clf.predict_proba(scaler.transform(to_predict.values.reshape(1,-1)))[0]
prob_away_elo, prob_home_elo = Elo.get_probas(away, home)
if ((prob_home_rf > 0.5) and (prob_home_elo > 0.5)):
prob_home = str(around((prob_home_rf + prob_home_elo)/2, decimals=3))
odds_home = str(around(1/float(prob_home), decimals=2))
if float(prob_home) >= 0.6:
text = text + home + '(' + prob_home + ' --> ' + odds_home + ') vs. ' + away + '\n\
RF Prob.: ' + str(around(prob_home_rf, decimals=3)) + '\n\
Elo Prob.: ' + str(around(prob_home_elo, decimals=3)) + '\n\n'
if ((prob_away_rf > 0.5) and (prob_away_elo > 0.5)):
prob_away = str(around((prob_away_rf + prob_away_elo)/2, decimals=3))
odds_away = str(around(1/float(prob_away), decimals=2))
if float(prob_away) >= 0.6:
text = text + home + ' vs. ' + away + '(' + prob_away + ' --> ' + odds_away + ')' + '\n\
RF Prob.: ' + str(around(prob_away_rf, decimals=3)) + '\n\
Elo Prob.: ' + str(around(prob_away_elo, decimals=3)) + '\n\n'
query = self.url + self.bot_token + '/sendMessage?' + self.chat_id + '&text=' + text
requests.request("POST", query)
| 43.275362
| 112
| 0.540522
|
from numpy.core.fromnumeric import around, std
import requests
import Elo
from Models import Models
import Helper
import pandas as pd
import numpy as np
class TelegramBot():
def __init__(self):
self.url = 'https://api.telegram.org/'
with open('secrets/telegram_secrets') as f:
lines = f.readlines()
self.bot_token = lines[0].strip()
self.chat_id = lines[1].strip()
def send_message(self, d:dict):
df = pd.read_csv('past_data/2021_2022/split_stats_per_game.csv')
df = Helper.add_features_to_df(df)
n = 3
train_df = pd.read_csv('past_data/average_seasons/average_NSeasons_prod.csv')
std_df, scaler = Helper.standardize_DataFrame(train_df)
clf = Models.build_RF_classifier(std_df)
text = "🏀 Tonight's Games: Home vs. Away 🏀\n\n"
for home, away in d.items():
last_N_games_away = df.loc[df['Team_away'] == away].tail(n)
last_N_games_home = df.loc[df['Team_home'] == home].tail(n)
to_predict = pd.concat(
[
last_N_games_away[Models.away_features].mean(),
last_N_games_home[Models.home_features].mean()
],
axis=0)[Models.features]
prob_home_rf, prob_away_rf = clf.predict_proba(scaler.transform(to_predict.values.reshape(1,-1)))[0]
prob_away_elo, prob_home_elo = Elo.get_probas(away, home)
if ((prob_home_rf > 0.5) and (prob_home_elo > 0.5)):
prob_home = str(around((prob_home_rf + prob_home_elo)/2, decimals=3))
odds_home = str(around(1/float(prob_home), decimals=2))
if float(prob_home) >= 0.6:
text = text + home + '(' + prob_home + ' --> ' + odds_home + ') vs. ' + away + '\n\
RF Prob.: ' + str(around(prob_home_rf, decimals=3)) + '\n\
Elo Prob.: ' + str(around(prob_home_elo, decimals=3)) + '\n\n'
if ((prob_away_rf > 0.5) and (prob_away_elo > 0.5)):
prob_away = str(around((prob_away_rf + prob_away_elo)/2, decimals=3))
odds_away = str(around(1/float(prob_away), decimals=2))
if float(prob_away) >= 0.6:
text = text + home + ' vs. ' + away + '(' + prob_away + ' --> ' + odds_away + ')' + '\n\
RF Prob.: ' + str(around(prob_away_rf, decimals=3)) + '\n\
Elo Prob.: ' + str(around(prob_away_elo, decimals=3)) + '\n\n'
query = self.url + self.bot_token + '/sendMessage?' + self.chat_id + '&text=' + text
requests.request("POST", query)
| true
| true
|
7907d0cb7b597721da3ae026d431521a2fa0335b
| 3,946
|
py
|
Python
|
td/oauth.py
|
southpaw27/td-ameritrade-python-api
|
ddb2a48b7cc2ffe00c31b4a4cef55dce39c7a442
|
[
"MIT"
] | 610
|
2019-11-08T04:56:28.000Z
|
2022-03-29T18:17:01.000Z
|
td/oauth.py
|
southpaw27/td-ameritrade-python-api
|
ddb2a48b7cc2ffe00c31b4a4cef55dce39c7a442
|
[
"MIT"
] | 177
|
2019-12-22T18:03:48.000Z
|
2022-03-12T20:37:40.000Z
|
td/oauth.py
|
southpaw27/td-ameritrade-python-api
|
ddb2a48b7cc2ffe00c31b4a4cef55dce39c7a442
|
[
"MIT"
] | 248
|
2019-11-08T04:56:38.000Z
|
2022-03-29T20:09:22.000Z
|
import os
import pathlib
from flask import Flask
from flask import request
from flask import redirect
from flask import url_for
from flask import session
from flask import render_template
from flask.json import jsonify
from td.app.auth import FlaskTDAuth
from configparser import ConfigParser
# Define the templates folder.
template_folder_path: pathlib.Path = pathlib.Path(__file__).parents[0]
template_folder_path: pathlib.Path = template_folder_path.joinpath('templates')
# Create the App.
app = Flask('TD_oAuth_App', template_folder=template_folder_path.resolve())
@app.route("/")
def home():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
return render_template("index.html")
@app.route("/login")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
# Build the authorization URL.
auth_tuple = app.config['auth_client'].authorization_url()
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0])
@app.route("/login/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
# Grab the Refresh and Access Token.
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
# Store it in the Session.
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict)
@app.route("/login/refresh", methods=["GET"])
def refresh():
# Grab the Refresh Token.
refresh_token_dict = app.config['auth_client'].grab_refresh_token()
return jsonify(refresh_token_dict)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
def run(flask_client: FlaskTDAuth, close_after: bool = False):
certs_pem = pathlib.Path(__file__).parents[0].joinpath('certs/cert.pem')
certs_key = pathlib.Path(__file__).parents[0].joinpath('certs/key.pem')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
app.config['auth_client'] = flask_client
app.config['call_close'] = close_after
app.run(
ssl_context=(certs_pem, certs_key),
host='localhost',
port=5000,
debug=True
)
if __name__ == "__main__":
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
client_id = config.get('main', 'client_id')
redirect_uri = config.get('main', 'redirect_uri')
credentials = config.get('main','json_path')
# Define the Secret Key.
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
# Define the App Configurations.
app.config['auth_client'] = FlaskTDAuth(
client_id=client_id,
redirect_uri=redirect_uri,
credentials_file=pathlib.Path(credentials)
)
# Run the App.
app.run(
ssl_context=('td/certs/cert.pem', 'td/certs/key.pem'),
host='localhost',
port=5000,
debug=True
)
# flask_td_app = FlaskAppTD(client_id=client_id, redirect_uri=redirect_uri, credentials_file=credentials)
# flask_td_app.run()
# This allows us to use a plain HTTP callback
# os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# # app.run(ssl_context="adhoc")
| 28.594203
| 109
| 0.696655
|
import os
import pathlib
from flask import Flask
from flask import request
from flask import redirect
from flask import url_for
from flask import session
from flask import render_template
from flask.json import jsonify
from td.app.auth import FlaskTDAuth
from configparser import ConfigParser
template_folder_path: pathlib.Path = pathlib.Path(__file__).parents[0]
template_folder_path: pathlib.Path = template_folder_path.joinpath('templates')
app = Flask('TD_oAuth_App', template_folder=template_folder_path.resolve())
@app.route("/")
def home():
return render_template("index.html")
@app.route("/login")
def demo():
auth_tuple = app.config['auth_client'].authorization_url()
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0])
@app.route("/login/callback", methods=["GET"])
def callback():
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict)
@app.route("/login/refresh", methods=["GET"])
def refresh():
refresh_token_dict = app.config['auth_client'].grab_refresh_token()
return jsonify(refresh_token_dict)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
def run(flask_client: FlaskTDAuth, close_after: bool = False):
certs_pem = pathlib.Path(__file__).parents[0].joinpath('certs/cert.pem')
certs_key = pathlib.Path(__file__).parents[0].joinpath('certs/key.pem')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
app.config['auth_client'] = flask_client
app.config['call_close'] = close_after
app.run(
ssl_context=(certs_pem, certs_key),
host='localhost',
port=5000,
debug=True
)
if __name__ == "__main__":
config = ConfigParser()
config.read('config/config.ini')
client_id = config.get('main', 'client_id')
redirect_uri = config.get('main', 'redirect_uri')
credentials = config.get('main','json_path')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
app.config['auth_client'] = FlaskTDAuth(
client_id=client_id,
redirect_uri=redirect_uri,
credentials_file=pathlib.Path(credentials)
)
app.run(
ssl_context=('td/certs/cert.pem', 'td/certs/key.pem'),
host='localhost',
port=5000,
debug=True
)
| true
| true
|
7907d0f4268a7e165b3ca17d49d01c36511c2cad
| 2,207
|
py
|
Python
|
web_console_v2/api/fedlearner_webconsole/workflow_template/slots_formatter.py
|
duanbing/fedlearner
|
5cce3c1fe09abe66879274a0ad3dc8e2f25a322d
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/fedlearner_webconsole/workflow_template/slots_formatter.py
|
duanbing/fedlearner
|
5cce3c1fe09abe66879274a0ad3dc8e2f25a322d
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/fedlearner_webconsole/workflow_template/slots_formatter.py
|
duanbing/fedlearner
|
5cce3c1fe09abe66879274a0ad3dc8e2f25a322d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from flatten_dict import flatten
from fedlearner_webconsole.proto.workflow_definition_pb2 import Slot
from fedlearner_webconsole.workflow_template.template_validaor \
import YamlTemplate
class _YamlTemplate(YamlTemplate):
# Which placeholders in the template should be interpreted
idpattern = r'Slot_[a-z0-9_]*'
def substitute(self, mapping):
return super()._substitute(mapping,
fixed_placeholder=None,
ignore_invalid=True)
def format_yaml(yaml, **kwargs):
"""Formats a yaml template.
Example usage:
format_yaml('{"abc": ${x.y}}', x={'y': 123})
output should be '{"abc": 123}'
"""
template = _YamlTemplate(yaml)
try:
return template.substitute(flatten(kwargs or {},
reducer='dot'))
except KeyError as e:
raise RuntimeError(
'Unknown placeholder: {}'.format(e.args[0])) from e
def generate_yaml_template(base_yaml, slots_proto):
"""
Args:
base_yaml: A string representation of one type job's base yaml.
slots_proto: A proto map object representation of modification
template's operable smallest units.
Returns:
string: A yaml_template
"""
slots = {}
for key in slots_proto:
if slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT:
slots[key] = slots_proto[key].default
else:
slots[key] = f'${{{slots_proto[key].reference}}}'
return format_yaml(base_yaml, **slots)
| 34.484375
| 74
| 0.663344
|
from flatten_dict import flatten
from fedlearner_webconsole.proto.workflow_definition_pb2 import Slot
from fedlearner_webconsole.workflow_template.template_validaor \
import YamlTemplate
class _YamlTemplate(YamlTemplate):
idpattern = r'Slot_[a-z0-9_]*'
def substitute(self, mapping):
return super()._substitute(mapping,
fixed_placeholder=None,
ignore_invalid=True)
def format_yaml(yaml, **kwargs):
template = _YamlTemplate(yaml)
try:
return template.substitute(flatten(kwargs or {},
reducer='dot'))
except KeyError as e:
raise RuntimeError(
'Unknown placeholder: {}'.format(e.args[0])) from e
def generate_yaml_template(base_yaml, slots_proto):
slots = {}
for key in slots_proto:
if slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT:
slots[key] = slots_proto[key].default
else:
slots[key] = f'${{{slots_proto[key].reference}}}'
return format_yaml(base_yaml, **slots)
| true
| true
|
7907d127cf12169a5fef2281a97299f28b322d70
| 13,055
|
py
|
Python
|
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Quotes API For Digital Portals
The quotes API combines endpoints for retrieving security end-of-day, delayed, and realtime prices with performance key figures and basic reference data on the security and market level. The API supports over 20 different price types for each quote and comes with basic search endpoints based on security identifiers and instrument names. Market coverage is included in the *Sample Use Cases* section below. The Digital Portal use case is focused on high-performance applications that are * serving millions of end-users, * accessible by client browsers via the internet, * supporting subscriptions for streamed updates out-of-the-box, * typically combining a wide variety of *for Digital Portals*-APIs into a highly use-case specific solution for customers, * integrated into complex infrastructures such as existing frontend frameworks, authentication services. All APIs labelled *for Digital Portals* have been designed for direct use by client web applications and feature extreme low latency: The average response time across all endpoints is 30 ms whereas 99% of all requests are answered in close to under 300ms. See the Time Series API for Digital Portals for direct access to price histories, and the News API for Digital Portals for searching and fetching related news. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response20013_data import InlineResponse20013Data
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response200_meta import InlineResponse200Meta
globals()['InlineResponse20013Data'] = InlineResponse20013Data
globals()['InlineResponse200Meta'] = InlineResponse200Meta
class InlineResponse20013(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([InlineResponse20013Data],), # noqa: E501
'meta': (InlineResponse200Meta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse20013 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse20013 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 48.712687
| 1,302
| 0.603294
|
import re
import sys
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response20013_data import InlineResponse20013Data
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response200_meta import InlineResponse200Meta
globals()['InlineResponse20013Data'] = InlineResponse20013Data
globals()['InlineResponse200Meta'] = InlineResponse200Meta
class InlineResponse20013(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'data': ([InlineResponse20013Data],),
'meta': (InlineResponse200Meta,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data',
'meta': 'meta',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true
| true
|
7907d2520b97c6db4ca765d799011e4657476b9e
| 62
|
py
|
Python
|
medical_prescription/chat/validators/__init__.py
|
ristovao/2017.2-Receituario-Medico
|
5387eb80dfb354e948abe64f7d8bbe087fc4f136
|
[
"MIT"
] | 11
|
2017-09-19T00:29:40.000Z
|
2018-04-05T23:52:39.000Z
|
medical_prescription/chat/validators/__init__.py
|
ristovao/2017.2-Receituario-Medico
|
5387eb80dfb354e948abe64f7d8bbe087fc4f136
|
[
"MIT"
] | 271
|
2017-09-09T00:07:28.000Z
|
2017-12-07T05:00:45.000Z
|
medical_prescription/chat/validators/__init__.py
|
ristovao/2017.2-Receituario-Medico
|
5387eb80dfb354e948abe64f7d8bbe087fc4f136
|
[
"MIT"
] | 26
|
2017-08-31T20:48:49.000Z
|
2018-03-21T15:11:27.000Z
|
# Local Django
from .messagevalidator import MessageValidator
| 20.666667
| 46
| 0.854839
|
from .messagevalidator import MessageValidator
| true
| true
|
7907d27ba6ed261852f88877a4e315e999f4610d
| 1,191
|
py
|
Python
|
twitter_app/iris_classifier.py
|
Struth-Rourke/twitter_flask_app
|
f73ad147f216ad77f8010ef6c02da4784dbfa9c8
|
[
"MIT"
] | null | null | null |
twitter_app/iris_classifier.py
|
Struth-Rourke/twitter_flask_app
|
f73ad147f216ad77f8010ef6c02da4784dbfa9c8
|
[
"MIT"
] | 3
|
2021-09-08T02:05:54.000Z
|
2022-03-12T00:36:59.000Z
|
twitter_app/iris_classifier.py
|
Struth-Rourke/twitter_flask_app
|
f73ad147f216ad77f8010ef6c02da4784dbfa9c8
|
[
"MIT"
] | null | null | null |
# twitter_app/iris_classifier.py
import os
import pickle
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
MODEL_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "models", "latest_model.pkl")
def train_and_save_model():
print("TRAINING THE MODEL...")
X, y = load_iris(return_X_y=True)
#print(type(X), X.shape) #> <class 'numpy.ndarray'> (150, 4)
#print(type(y), y.shape) #> <class 'numpy.ndarray'> (150,)
classifier = LogisticRegression() # for example
classifier.fit(X, y)
print("SAVING THE MODEL...")
with open(MODEL_FILEPATH, "wb") as model_file:
pickle.dump(classifier, model_file)
return classifier
def load_model():
print("LOADING THE MODEL...")
with open(MODEL_FILEPATH, "rb") as model_file:
saved_model = pickle.load(model_file)
return saved_model
if __name__ == "__main__":
#train_and_save_model()
clf = load_model()
print("CLASSIFIER:", clf)
X, y = load_iris(return_X_y=True) # just to have some data to use when predicting
inputs = X[:2, :]
print(type(inputs), inputs)
result = clf.predict(inputs)
print("RESULT:", result)
| 27.697674
| 92
| 0.677582
|
import os
import pickle
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
MODEL_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "models", "latest_model.pkl")
def train_and_save_model():
print("TRAINING THE MODEL...")
X, y = load_iris(return_X_y=True)
)
print("SAVING THE MODEL...")
with open(MODEL_FILEPATH, "wb") as model_file:
pickle.dump(classifier, model_file)
return classifier
def load_model():
print("LOADING THE MODEL...")
with open(MODEL_FILEPATH, "rb") as model_file:
saved_model = pickle.load(model_file)
return saved_model
if __name__ == "__main__":
clf = load_model()
print("CLASSIFIER:", clf)
X, y = load_iris(return_X_y=True)
inputs = X[:2, :]
print(type(inputs), inputs)
result = clf.predict(inputs)
print("RESULT:", result)
| true
| true
|
7907d4cc1d7a5cfd49346651d20eeb4152d5b9ce
| 195
|
py
|
Python
|
convertextract/parsers/tsv_parser.py
|
roedoejet/convertextract
|
bf194a7d81d847d68690ea0d58dc47a70259cd78
|
[
"MIT"
] | 12
|
2016-10-20T16:17:04.000Z
|
2022-03-10T06:36:59.000Z
|
convertextract/parsers/tsv_parser.py
|
roedoejet/convertextract
|
bf194a7d81d847d68690ea0d58dc47a70259cd78
|
[
"MIT"
] | 3
|
2018-01-12T00:41:26.000Z
|
2020-08-12T05:04:45.000Z
|
convertextract/parsers/tsv_parser.py
|
roedoejet/convertextract
|
bf194a7d81d847d68690ea0d58dc47a70259cd78
|
[
"MIT"
] | 3
|
2020-08-18T21:47:03.000Z
|
2022-02-03T06:32:46.000Z
|
import csv
from convertextract.parsers.csv_parser import Parser as BaseParser
class Parser(BaseParser):
"""Extract text from tab separated values files (.tsv).
"""
delimiter = '\t'
| 21.666667
| 66
| 0.717949
|
import csv
from convertextract.parsers.csv_parser import Parser as BaseParser
class Parser(BaseParser):
delimiter = '\t'
| true
| true
|
7907d57b99b40944d3cd9e2c239492ab9355ef78
| 1,161
|
py
|
Python
|
crawler/test_code/test_selenium.py
|
Coslate/NBA_Win_Predictor
|
c8f4fb5a12fdd36bd43e573510bfb2307f37ec1f
|
[
"MIT"
] | null | null | null |
crawler/test_code/test_selenium.py
|
Coslate/NBA_Win_Predictor
|
c8f4fb5a12fdd36bd43e573510bfb2307f37ec1f
|
[
"MIT"
] | null | null | null |
crawler/test_code/test_selenium.py
|
Coslate/NBA_Win_Predictor
|
c8f4fb5a12fdd36bd43e573510bfb2307f37ec1f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3.6
from selenium import webdriver
import time
browser = webdriver.Chrome(executable_path='/home/coslate/anaconda3/bin/chromedriver')
#url = 'https://stats.nba.com/leaders'
url = 'http://stats.nba.com/teams/traditional/#!?sort=W_PCT&dir=-1'
browser.get(url)
time.sleep(5)
#browser.find_element_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/div[1]/div[1]/div/div/label/select/option[3]').click()
#browser.find_element_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/div[1]/div[2]/div/div/label/select/option[2]').click()
#browser.find_element_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[3]/div/div/select/option[1]').click()
#table = browser.find_element_by_class_name('nba-stat-table__overflow')
table = browser.find_elements_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[2]/div[1]/table/tbody')
line1 = browser.find_element_by_xpath('//tr[@index="0"]')
print(line1.text)
print("All the window handles : ")
print(browser.window_handles) # 查看所有window handles
print("The current window handle : ")
print(browser.current_window_handle) # 查看所有window handles
browser.close()
| 44.653846
| 130
| 0.750215
|
from selenium import webdriver
import time
browser = webdriver.Chrome(executable_path='/home/coslate/anaconda3/bin/chromedriver')
url = 'http://stats.nba.com/teams/traditional/#!?sort=W_PCT&dir=-1'
browser.get(url)
time.sleep(5)
table = browser.find_elements_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[2]/div[1]/table/tbody')
line1 = browser.find_element_by_xpath('//tr[@index="0"]')
print(line1.text)
print("All the window handles : ")
print(browser.window_handles)
print("The current window handle : ")
print(browser.current_window_handle)
browser.close()
| true
| true
|
7907d5d8da7a9d75d151f7561ae03dee7c281322
| 10,916
|
py
|
Python
|
algorithm/python/topological_sort.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
algorithm/python/topological_sort.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
algorithm/python/topological_sort.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
#---------------------------------------------------------------
# ALGORITHM DEMO : TOPLOGICAL SORT
#---------------------------------------------------------------
# Topological Sort is a algorithm can find "ordering" on an "order dependency" graph
# Concept
# https://blog.techbridge.cc/2020/05/10/leetcode-topological-sort/
# https://alrightchiu.github.io/SecondRound/graph-li-yong-dfsxun-zhao-dagde-topological-sorttuo-pu-pai-xu.html
# V0
# IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods
# step 1) maintain a stack, save "ordering" nodes in it (and return in final step)
# step 2) init visited as [False]*self.V (all nodes are NOT visited yet)
# step 3) iterate over all vertices in graph, if not visited, then run topologicalSortUtil
# step 4) return result (stack)
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
# for build graph
def addEdge(self, u, v):
self.graph[u].append(v)
def topologicalSortUtil(self, v, visited, stack):
visited[v] = True
### NOTE this !!! (self.graph[v])
for k in self.graph[v]:
if visited[k] == False:
self.topologicalSortUtil(k, visited, stack)
# stack.insert(0,v) # instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1])
"""
### NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v)
"""
stack.insert(0,v)
def topologicalSort(self):
visited = [False] * self.V
stack = []
### NOTE this !!! (range(self.V))
for v in range(self.V):
# call tologicalSortUtil only if visited[v] == False (the vertice is not visited yet)
if visited[v] == False:
self.topologicalSortUtil(v, visited, stack)
# return the result in inverse order
return stack[::-1]
### TEST
{"A": 0, "B":1, "C":2, "D": 3}
v = 4
g = Graph(v)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(2, 3)
g.addEdge(3, 1)
print (g.graph)
# ans should be TableB, TableD, TableC, TableA.
r = g.topologicalSort()
print (r)
# V0'
from collections import defaultdict
class Graph:
def __init__(self, v):
self.graph = defaultdict(list)
self.v = v
def addEdge(self, a, b):
self.graph[a].append(b)
def topologicalSortUtil(self, x, visited, stack):
# V1
if visited[x]:
return
for k in self.graph[x]:
self.topologicalSortUtil(k, visited, stack)
visited[x] = True
stack.insert(0, x)
# V2
# visited[v] = True
# ### NOTE this !!! (self.graph[v])
# for k in self.graph[v]:
# if visited[k] == False:
# self.topologicalSortUtil(k, visited, stack)
# # stack.insert(0,v) # instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1])
# """
# ### NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v)
# """
# stack.insert(0,v)
def topologicalSort(self):
visited = [False] * self.v
stack = []
for x in range(self.v):
if not visited[x]:
self.topologicalSortUtil(x, visited, stack)
print ("stack = " + str(stack))
return stack[::-1]
# V0''
# IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods
from collections import defaultdict
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
# for testing (build graph)
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# V1
# https://www.geeksforgeeks.org/topological-sorting/
# Python program to print topological sorting of a DAG
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list) # dictionary containing adjacency List
self.V = vertices # No. of vertices
# function to add an edge to graph
def addEdge(self, u, v):
self.graph[u].append(v)
# A recursive function used by topologicalSort
def topologicalSortUtil(self, v, visited, stack):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Push current vertex to stack which stores result
#stack.append(v)
stack.insert(0,v)
# The function to do Topological Sort. It uses recursive
# topologicalSortUtil()
def topologicalSort(self):
# Mark all the vertices as not visited
visited = [False]*self.V
stack = []
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Print contents of the stack
print(stack[::-1]) # return list in reverse order
# TEST
# Driver Code
# g = Graph(6)
# g.addEdge(5, 2)
# g.addEdge(5, 0)
# g.addEdge(4, 0)
# g.addEdge(4, 1)
# g.addEdge(2, 3)
# g.addEdge(3, 1)
#
# print ("Following is a Topological Sort of the given graph")
#
# # Function Call
# g.topologicalSort()
# V1
# https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py
"""Topological Sort."""
# a
# / \
# b c
# / \
# d e
# edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
# vertices = ["a", "b", "c", "d", "e"]
class Graph:
def topological_sort(self, start, visited, sort):
"""Perform topological sort on a directed acyclic graph."""
current = start
# add current to visited
visited.append(current)
neighbors = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
sort = topological_sort(neighbor, visited, sort)
# if all neighbors visited add current to sort
sort.append(current)
# if all vertices haven't been visited select a new one to visit
if len(visited) != len(vertices):
for vertice in vertices:
if vertice not in visited:
sort = topological_sort(vertice, visited, sort)
# return sort
return sort
# TEST
# edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
# vertices = ["a", "b", "c", "d", "e"]
# sort = topological_sort("a", [], [])
# print(sort)
# V1'
# http://www.runoob.com/python3/python-topological-sorting.html
class Graph:
from collections import defaultdict
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# TEST
# g= Graph(6)
# g.addEdge(5, 2);
# g.addEdge(5, 0);
# g.addEdge(4, 0);
# g.addEdge(4, 1);
# g.addEdge(2, 3);
# g.addEdge(3, 1);
# print ("output of Topological Sort ")
# g.topologicalSort()
# [5, 4, 2, 3, 1, 0]
# V2
# https://zhuanlan.zhihu.com/p/69858335
def topoSort(graph):
in_degrees = dict((u,0) for u in graph) # init (value with 0)
num = len(in_degrees)
for u in graph:
for v in graph[u]:
in_degrees[v] += 1
Q = [u for u in in_degrees if in_degrees[u] == 0]
Seq = []
while Q:
u = Q.pop()
Seq.append(u)
for v in graph[u]:
in_degrees[v] -= 1
if in_degrees[v] == 0:
Q.append(v)
if len(Seq) == num:
return Seq
else:
return None
# TEST
# G = {
# 'a':'bf',
# 'b':'cdf',
# 'c':'d',
# 'd':'ef',
# 'e':'f',
# 'f':''
# }
# print(topoSort(G))
# ['a', 'b', 'c', 'd', 'e', 'f']
# V3
# https://www.educative.io/courses/grokking-the-coding-interview/m25rBmwLV00
from collections import deque
def topological_sort(vertices, edges):
sortedOrder = []
if vertices <= 0:
return sortedOrder
# a. Initialize the graph
inDegree = {i: 0 for i in range(vertices)} # count of incoming edges
graph = {i: [] for i in range(vertices)} # adjacency list graph
# b. Build the graph
for edge in edges:
parent, child = edge[0], edge[1]
graph[parent].append(child) # put the child into it's parent's list
inDegree[child] += 1 # increment child's inDegree
# c. Find all sources i.e., all vertices with 0 in-degrees
sources = deque()
for key in inDegree:
if inDegree[key] == 0:
sources.append(key)
# d. For each source, add it to the sortedOrder and subtract one from all of its children's in-degrees
# if a child's in-degree becomes zero, add it to the sources queue
while sources:
vertex = sources.popleft()
sortedOrder.append(vertex)
for child in graph[vertex]: # get the node's children to decrement their in-degrees
inDegree[child] -= 1
if inDegree[child] == 0:
sources.append(child)
# topological sort is not possible as the graph has a cycle
if len(sortedOrder) != vertices:
return []
return sortedOrder
# TEST
# def main():
# print("Topological sort: " +
# str(topological_sort(4, [[3, 2], [3, 0], [2, 0], [2, 1]])))
# print("Topological sort: " +
# str(topological_sort(5, [[4, 2], [4, 3], [2, 0], [2, 1], [3, 1]])))
# print("Topological sort: " +
# str(topological_sort(7, [[6, 4], [6, 2], [5, 3], [5, 4], [3, 0], [3, 1], [3, 2], [4, 1]])))
#main()
| 30.322222
| 146
| 0.563576
|
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self, u, v):
self.graph[u].append(v)
def topologicalSortUtil(self, v, visited, stack):
visited[v] = True
alse:
self.topologicalSortUtil(k, visited, stack)
sited[v] == False:
self.topologicalSortUtil(v, visited, stack)
return stack[::-1]
:1, "C":2, "D": 3}
v = 4
g = Graph(v)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(2, 3)
g.addEdge(3, 1)
print (g.graph)
r = g.topologicalSort()
print (r)
from collections import defaultdict
class Graph:
def __init__(self, v):
self.graph = defaultdict(list)
self.v = v
def addEdge(self, a, b):
self.graph[a].append(b)
def topologicalSortUtil(self, x, visited, stack):
# V1
if visited[x]:
return
for k in self.graph[x]:
self.topologicalSortUtil(k, visited, stack)
visited[x] = True
stack.insert(0, x)
# V2
# visited[v] = True
# ### NOTE this !!! (self.graph[v])
# for k in self.graph[v]:
# if visited[k] == False:
# self.topologicalSortUtil(k, visited, stack)
# # stack.insert(0,v) # instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1])
# """
# ### NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v)
# """
# stack.insert(0,v)
def topologicalSort(self):
visited = [False] * self.v
stack = []
for x in range(self.v):
if not visited[x]:
self.topologicalSortUtil(x, visited, stack)
print ("stack = " + str(stack))
return stack[::-1]
# V0''
# IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods
from collections import defaultdict
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
# for testing (build graph)
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# V1
# https://www.geeksforgeeks.org/topological-sorting/
# Python program to print topological sorting of a DAG
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list) # dictionary containing adjacency List
self.V = vertices # No. of vertices
# function to add an edge to graph
def addEdge(self, u, v):
self.graph[u].append(v)
# A recursive function used by topologicalSort
def topologicalSortUtil(self, v, visited, stack):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Push current vertex to stack which stores result
#stack.append(v)
stack.insert(0,v)
# The function to do Topological Sort. It uses recursive
# topologicalSortUtil()
def topologicalSort(self):
# Mark all the vertices as not visited
visited = [False]*self.V
stack = []
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Print contents of the stack
print(stack[::-1]) # return list in reverse order
# TEST
# Driver Code
# g = Graph(6)
# g.addEdge(5, 2)
# g.addEdge(5, 0)
# g.addEdge(4, 0)
# g.addEdge(4, 1)
# g.addEdge(2, 3)
# g.addEdge(3, 1)
#
# print ("Following is a Topological Sort of the given graph")
#
# # Function Call
# g.topologicalSort()
# V1
# https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py
# a
# / \
# b c
# / \
# d e
# edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
# vertices = ["a", "b", "c", "d", "e"]
class Graph:
def topological_sort(self, start, visited, sort):
current = start
# add current to visited
visited.append(current)
neighbors = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
sort = topological_sort(neighbor, visited, sort)
# if all neighbors visited add current to sort
sort.append(current)
# if all vertices haven't been visited select a new one to visit
if len(visited) != len(vertices):
for vertice in vertices:
if vertice not in visited:
sort = topological_sort(vertice, visited, sort)
return sort
# http://www.runoob.com/python3/python-topological-sorting.html
class Graph:
from collections import defaultdict
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# TEST
# g= Graph(6)
# g.addEdge(5, 2);
# g.addEdge(5, 0);
# g.addEdge(4, 0);
# g.addEdge(4, 1);
# g.addEdge(2, 3);
# g.addEdge(3, 1);
# print ("output of Topological Sort ")
# g.topologicalSort()
# [5, 4, 2, 3, 1, 0]
# V2
# https://zhuanlan.zhihu.com/p/69858335
def topoSort(graph):
in_degrees = dict((u,0) for u in graph) # init (value with 0)
num = len(in_degrees)
for u in graph:
for v in graph[u]:
in_degrees[v] += 1
Q = [u for u in in_degrees if in_degrees[u] == 0]
Seq = []
while Q:
u = Q.pop()
Seq.append(u)
for v in graph[u]:
in_degrees[v] -= 1
if in_degrees[v] == 0:
Q.append(v)
if len(Seq) == num:
return Seq
else:
return None
# TEST
# G = {
# 'a':'bf',
# 'b':'cdf',
# 'c':'d',
# 'd':'ef',
# 'e':'f',
# 'f':''
# }
# print(topoSort(G))
# ['a', 'b', 'c', 'd', 'e', 'f']
# V3
# https://www.educative.io/courses/grokking-the-coding-interview/m25rBmwLV00
from collections import deque
def topological_sort(vertices, edges):
sortedOrder = []
if vertices <= 0:
return sortedOrder
# a. Initialize the graph
inDegree = {i: 0 for i in range(vertices)} # count of incoming edges
graph = {i: [] for i in range(vertices)} # adjacency list graph
# b. Build the graph
for edge in edges:
parent, child = edge[0], edge[1]
graph[parent].append(child) # put the child into it's parent's list
inDegree[child] += 1 # increment child's inDegree
sources = deque()
for key in inDegree:
if inDegree[key] == 0:
sources.append(key)
# if a child's in-degree becomes zero, add it to the sources queue
while sources:
vertex = sources.popleft()
sortedOrder.append(vertex)
for child in graph[vertex]:
inDegree[child] -= 1
if inDegree[child] == 0:
sources.append(child)
# topological sort is not possible as the graph has a cycle
if len(sortedOrder) != vertices:
return []
return sortedOrder
# TEST
# def main():
# print("Topological sort: " +
# str(topological_sort(4, [[3, 2], [3, 0], [2, 0], [2, 1]])))
# print("Topological sort: " +
# str(topological_sort(5, [[4, 2], [4, 3], [2, 0], [2, 1], [3, 1]])))
# print("Topological sort: " +
# str(topological_sort(7, [[6, 4], [6, 2], [5, 3], [5, 4], [3, 0], [3, 1], [3, 2], [4, 1]])))
#main()
| true
| true
|
7907d6df00104e5dbb8f2efc2f845186d93d01d2
| 912
|
py
|
Python
|
examples/data.py
|
zkx741481546/keract
|
6f25711e54f7f8b5387fff8f79ad35a0a1113d33
|
[
"MIT"
] | null | null | null |
examples/data.py
|
zkx741481546/keract
|
6f25711e54f7f8b5387fff8f79ad35a0a1113d33
|
[
"MIT"
] | null | null | null |
examples/data.py
|
zkx741481546/keract
|
6f25711e54f7f8b5387fff8f79ad35a0a1113d33
|
[
"MIT"
] | 1
|
2019-03-22T17:10:38.000Z
|
2019-03-22T17:10:38.000Z
|
import keras
from keras.datasets import mnist
# input image dimensions
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
num_classes = 10
def get_mnist_data():
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
| 31.448276
| 70
| 0.710526
|
import keras
from keras.datasets import mnist
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
num_classes = 10
def get_mnist_data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
| true
| true
|
7907d6e31c23a931fa12193e8b0d2f539025e7e2
| 1,356
|
py
|
Python
|
google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.instance",
manifest={
"TextSentimentPredictionInstance",
},
)
class TextSentimentPredictionInstance(proto.Message):
r"""Prediction input format for Text Sentiment.
Attributes:
content (str):
The text snippet to make the predictions on.
mime_type (str):
The MIME type of the text snippet. The
supported MIME types are listed below.
- text/plain
"""
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 27.12
| 74
| 0.676254
|
import proto
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.instance",
manifest={
"TextSentimentPredictionInstance",
},
)
class TextSentimentPredictionInstance(proto.Message):
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| true
| true
|
7907d6ec77fd61d93ef1f5aa9370235babd38542
| 2,998
|
py
|
Python
|
checktheplug/data/ServerDao.py
|
maximx1/checktheplug
|
585068666a93cee0c6e8dd80c92511d6cee5ca04
|
[
"MIT"
] | null | null | null |
checktheplug/data/ServerDao.py
|
maximx1/checktheplug
|
585068666a93cee0c6e8dd80c92511d6cee5ca04
|
[
"MIT"
] | 26
|
2015-02-04T15:09:54.000Z
|
2015-03-22T02:44:14.000Z
|
checktheplug/data/ServerDao.py
|
maximx1/checktheplug
|
585068666a93cee0c6e8dd80c92511d6cee5ca04
|
[
"MIT"
] | null | null | null |
import sqlite3
from checktheplug.models.Server import Server
"""
Operations to manage accessing the server database.
"""
class ServerDao:
"""
Sets up the object with the sql connection.
"""
def __init__(self, settings):
self.conn = sqlite3.connect(settings.database)
"""
Add Server to the database.
"""
def add(self, new_server):
if new_server:
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("INSERT INTO servers(host, url) values(?, ?)", (new_server.host, new_server.url))
return(Server(cur.lastrowid, new_server.host, new_server.url), None)
except sqlite3.IntegrityError as er:
return (None, "There was a db issue: " + str(er))
else:
return (None, "No server passed in")
"""
Find all the servers for a particular app.
"""
def find_by_app_id(self, app_id):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers where app_id = ?", (app_id,))
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], app_id), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
"""
Find x number of available servers or all that are available.
"""
def find_available_servers(self, quantity):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers where app_id = null limit = ?", (quantity,))
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], None), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
"""
Retrieve all servers.
"""
def retrieve_all_servers(self):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers")
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], None), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
"""
Tie an app to a number of servers.
"""
def tie_app_to_servers(self, app_id, available_servers):
try:
with self.conn:
cur = self.conn.cursor()
server_id_string = ', '.join("?" * available_servers)
cur.execute("update servers set app_id = ? where id in ({0})".format(server_id_string), tuple([app_id] + available_servers))
return (None, "ok")
except Exception as er:
return (None, "There was a db issue: " + str(er))
| 37.012346
| 140
| 0.53936
|
import sqlite3
from checktheplug.models.Server import Server
class ServerDao:
def __init__(self, settings):
self.conn = sqlite3.connect(settings.database)
def add(self, new_server):
if new_server:
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("INSERT INTO servers(host, url) values(?, ?)", (new_server.host, new_server.url))
return(Server(cur.lastrowid, new_server.host, new_server.url), None)
except sqlite3.IntegrityError as er:
return (None, "There was a db issue: " + str(er))
else:
return (None, "No server passed in")
def find_by_app_id(self, app_id):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers where app_id = ?", (app_id,))
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], app_id), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
def find_available_servers(self, quantity):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers where app_id = null limit = ?", (quantity,))
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], None), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
def retrieve_all_servers(self):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers")
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], None), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
def tie_app_to_servers(self, app_id, available_servers):
try:
with self.conn:
cur = self.conn.cursor()
server_id_string = ', '.join("?" * available_servers)
cur.execute("update servers set app_id = ? where id in ({0})".format(server_id_string), tuple([app_id] + available_servers))
return (None, "ok")
except Exception as er:
return (None, "There was a db issue: " + str(er))
| true
| true
|
7907d718e907edc5a762d32039926501bb9d4317
| 14,299
|
py
|
Python
|
airbyte-integrations/connectors/source-slack/source_slack/source.py
|
rclmenezes/airbyte
|
84ba3e79b3d223954fc2d997df02ff35c9d39840
|
[
"MIT"
] | 1
|
2021-08-06T10:21:40.000Z
|
2021-08-06T10:21:40.000Z
|
airbyte-integrations/connectors/source-slack/source_slack/source.py
|
rclmenezes/airbyte
|
84ba3e79b3d223954fc2d997df02ff35c9d39840
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-slack/source_slack/source.py
|
rclmenezes/airbyte
|
84ba3e79b3d223954fc2d997df02ff35c9d39840
|
[
"MIT"
] | 1
|
2021-05-31T00:08:34.000Z
|
2021-05-31T00:08:34.000Z
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from pendulum import DateTime, Period
from slack_sdk import WebClient
class SlackStream(HttpStream, ABC):
url_base = "https://slack.com/api/"
primary_key = "id"
page_size = 100
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# Slack uses a cursor-based pagination strategy.
# Extract the cursor from the response if it exists and return it in a format that can be used to update request parameters
json_response = response.json()
next_cursor = json_response.get("response_metadata", {}).get("next_cursor")
if next_cursor:
return {"cursor": next_cursor}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {"limit": self.page_size}
if next_page_token:
params.update(**next_page_token)
return params
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[MutableMapping]:
json_response = response.json()
yield from json_response.get(self.data_field, [])
def backoff_time(self, response: requests.Response) -> Optional[float]:
# This method is called if we run into the rate limit. Slack puts the retry time in the `Retry-After` response header so we
# we return that value. If the response is anything other than a 429 (e.g: 5XX) fall back on default retry behavior.
# https://api.slack.com/docs/rate-limits#web
if response.status_code == 429:
return int(response.headers.get("Retry-After", 0))
@property
@abstractmethod
def data_field(self) -> str:
"""The name of the field in the response which contains the data"""
class Channels(SlackStream):
data_field = "channels"
def path(self, **kwargs) -> str:
return "conversations.list"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["types"] = "public_channel"
return params
class ChannelMembers(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "conversations.members"
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params["channel"] = stream_slice["channel_id"]
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for member_id in super().parse_response(response, **kwargs):
# Slack just returns raw IDs as a string, so we want to put them in a "join table" format
yield {"member_id": member_id, "channel_id": stream_slice["channel_id"]}
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel_record in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel_id": channel_record["id"]}
class Users(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "users.list"
# Incremental Streams
def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
"""
Yields a list of the beginning and ending timestamps of each day between the start date and now.
The return value is a pendulum.period
"""
now = pendulum.now()
# Each stream_slice contains the beginning and ending timestamp for a 24 hour period
while start_date <= now:
end_date = start_date + interval
yield pendulum.period(start_date, end_date)
start_date = end_date
class IncrementalMessageStream(SlackStream, ABC):
data_field = "messages"
cursor_field = "float_ts"
primary_key = ["channel_id", "ts"]
def __init__(self, default_start_date: DateTime, **kwargs):
self._start_ts = default_start_date.timestamp()
super().__init__(**kwargs)
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params.update(**stream_slice)
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for record in super().parse_response(response, **kwargs):
record[self.primary_key[0]] = stream_slice.get("channel", "")
record[self.cursor_field] = float(record[self.primary_key[1]])
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
current_stream_state[self.cursor_field] = max(
latest_record[self.cursor_field], current_stream_state.get(self.cursor_field, self._start_ts)
)
return current_stream_state
class ChannelMessages(IncrementalMessageStream):
def path(self, **kwargs) -> str:
return "conversations.history"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = pendulum.from_timestamp(stream_state.get(self.cursor_field, self._start_ts))
for period in chunk_date_range(start_date):
yield {"oldest": period.start.timestamp(), "latest": period.end.timestamp()}
def read_records(self, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
# Channel is provided when reading threads
if "channel" in stream_slice:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
# if channel is not provided, then get channels and read accordingly
channels = Channels(authenticator=self.authenticator)
for channel_record in channels.read_records(sync_mode=SyncMode.full_refresh):
stream_slice["channel"] = channel_record["id"]
yield from super().read_records(stream_slice=stream_slice, **kwargs)
class Threads(IncrementalMessageStream):
def __init__(self, lookback_window: Mapping[str, int], **kwargs):
self.messages_lookback_window = lookback_window
super().__init__(**kwargs)
def path(self, **kwargs) -> str:
return "conversations.replies"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
"""
The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.
One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every
single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no
way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,
and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the
past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep
the logic simple to reason about.
Good luck.
"""
stream_state = stream_state or {}
channels_stream = Channels(authenticator=self.authenticator)
if self.cursor_field in stream_state:
# Since new messages can be posted to threads continuously after the parent message has been posted, we get messages from the latest date
# found in the state minus 7 days to pick up any new messages in threads.
# If there is state always use lookback
messages_start_date = pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window
else:
# If there is no state i.e: this is the first sync then there is no use for lookback, just get messages from the default start date
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f"Syncing replies {message_chunk}")
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk["channel"] = channel["id"]
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
yield {"channel": channel["id"], self.cursor_field: message[self.primary_key]}
class JoinChannelsStream(HttpStream):
"""
This class is a special stream which joins channels because the Slack API only returns messages from channels this bot is in.
Its responses should only be logged for debugging reasons, not read as records.
"""
url_base = "https://slack.com/api/"
http_method = "POST"
primary_key = "id"
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
return [{"message": f"Successfully joined channel: {stream_slice['channel_name']}"}]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None # No pagination
def path(self, **kwargs) -> str:
return "conversations.join"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel": channel["id"], "channel_name": channel["name"]}
def request_body_json(self, stream_slice: Mapping = None, **kwargs) -> Optional[Mapping]:
return {"channel": stream_slice["channel"]}
class SourceSlack(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
slack_client = WebClient(token=config["api_token"])
users = slack_client.users_list(limit=1).get("members", [])
if len(users) > 0:
return True, None
else:
return False, "There are no users in the given Slack instance"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = TokenAuthenticator(config["api_token"])
default_start_date = pendulum.parse(config["start_date"])
threads_lookback_window = pendulum.Duration(days=config["lookback_window"])
streams = [
Channels(authenticator=authenticator),
ChannelMembers(authenticator=authenticator),
ChannelMessages(authenticator=authenticator, default_start_date=default_start_date),
Threads(authenticator=authenticator, default_start_date=default_start_date, lookback_window=threads_lookback_window),
Users(authenticator=authenticator),
]
# To sync data from channels, the bot backed by this token needs to join all those channels. This operation is idempotent.
if config["join_channels"]:
logger = AirbyteLogger()
logger.info("joining Slack channels")
join_channels_stream = JoinChannelsStream(authenticator=authenticator)
for stream_slice in join_channels_stream.stream_slices():
for message in join_channels_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice):
logger.info(message["message"])
return streams
| 47.822742
| 150
| 0.699979
|
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from pendulum import DateTime, Period
from slack_sdk import WebClient
class SlackStream(HttpStream, ABC):
url_base = "https://slack.com/api/"
primary_key = "id"
page_size = 100
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
json_response = response.json()
next_cursor = json_response.get("response_metadata", {}).get("next_cursor")
if next_cursor:
return {"cursor": next_cursor}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {"limit": self.page_size}
if next_page_token:
params.update(**next_page_token)
return params
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[MutableMapping]:
json_response = response.json()
yield from json_response.get(self.data_field, [])
def backoff_time(self, response: requests.Response) -> Optional[float]:
if response.status_code == 429:
return int(response.headers.get("Retry-After", 0))
@property
@abstractmethod
def data_field(self) -> str:
class Channels(SlackStream):
data_field = "channels"
def path(self, **kwargs) -> str:
return "conversations.list"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["types"] = "public_channel"
return params
class ChannelMembers(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "conversations.members"
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params["channel"] = stream_slice["channel_id"]
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for member_id in super().parse_response(response, **kwargs):
yield {"member_id": member_id, "channel_id": stream_slice["channel_id"]}
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel_record in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel_id": channel_record["id"]}
class Users(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "users.list"
def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
now = pendulum.now()
while start_date <= now:
end_date = start_date + interval
yield pendulum.period(start_date, end_date)
start_date = end_date
class IncrementalMessageStream(SlackStream, ABC):
data_field = "messages"
cursor_field = "float_ts"
primary_key = ["channel_id", "ts"]
def __init__(self, default_start_date: DateTime, **kwargs):
self._start_ts = default_start_date.timestamp()
super().__init__(**kwargs)
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params.update(**stream_slice)
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for record in super().parse_response(response, **kwargs):
record[self.primary_key[0]] = stream_slice.get("channel", "")
record[self.cursor_field] = float(record[self.primary_key[1]])
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
current_stream_state[self.cursor_field] = max(
latest_record[self.cursor_field], current_stream_state.get(self.cursor_field, self._start_ts)
)
return current_stream_state
class ChannelMessages(IncrementalMessageStream):
def path(self, **kwargs) -> str:
return "conversations.history"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = pendulum.from_timestamp(stream_state.get(self.cursor_field, self._start_ts))
for period in chunk_date_range(start_date):
yield {"oldest": period.start.timestamp(), "latest": period.end.timestamp()}
def read_records(self, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
if "channel" in stream_slice:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
channels = Channels(authenticator=self.authenticator)
for channel_record in channels.read_records(sync_mode=SyncMode.full_refresh):
stream_slice["channel"] = channel_record["id"]
yield from super().read_records(stream_slice=stream_slice, **kwargs)
class Threads(IncrementalMessageStream):
def __init__(self, lookback_window: Mapping[str, int], **kwargs):
self.messages_lookback_window = lookback_window
super().__init__(**kwargs)
def path(self, **kwargs) -> str:
return "conversations.replies"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
channels_stream = Channels(authenticator=self.authenticator)
if self.cursor_field in stream_state:
messages_start_date = pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window
else:
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f"Syncing replies {message_chunk}")
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk["channel"] = channel["id"]
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
yield {"channel": channel["id"], self.cursor_field: message[self.primary_key]}
class JoinChannelsStream(HttpStream):
url_base = "https://slack.com/api/"
http_method = "POST"
primary_key = "id"
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
return [{"message": f"Successfully joined channel: {stream_slice['channel_name']}"}]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def path(self, **kwargs) -> str:
return "conversations.join"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel": channel["id"], "channel_name": channel["name"]}
def request_body_json(self, stream_slice: Mapping = None, **kwargs) -> Optional[Mapping]:
return {"channel": stream_slice["channel"]}
class SourceSlack(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
slack_client = WebClient(token=config["api_token"])
users = slack_client.users_list(limit=1).get("members", [])
if len(users) > 0:
return True, None
else:
return False, "There are no users in the given Slack instance"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = TokenAuthenticator(config["api_token"])
default_start_date = pendulum.parse(config["start_date"])
threads_lookback_window = pendulum.Duration(days=config["lookback_window"])
streams = [
Channels(authenticator=authenticator),
ChannelMembers(authenticator=authenticator),
ChannelMessages(authenticator=authenticator, default_start_date=default_start_date),
Threads(authenticator=authenticator, default_start_date=default_start_date, lookback_window=threads_lookback_window),
Users(authenticator=authenticator),
]
if config["join_channels"]:
logger = AirbyteLogger()
logger.info("joining Slack channels")
join_channels_stream = JoinChannelsStream(authenticator=authenticator)
for stream_slice in join_channels_stream.stream_slices():
for message in join_channels_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice):
logger.info(message["message"])
return streams
| true
| true
|
7907d7bda5f235bc65679ea4efce65cf68e3b788
| 30,892
|
py
|
Python
|
examples/applications/plot_cyclical_feature_engineering.py
|
patrickctrf/scikit-learn
|
d6735f4851d828984a0517de954b9b88c74919fe
|
[
"BSD-3-Clause"
] | 1
|
2021-02-09T18:15:01.000Z
|
2021-02-09T18:15:01.000Z
|
examples/applications/plot_cyclical_feature_engineering.py
|
patrickctrf/scikit-learn
|
d6735f4851d828984a0517de954b9b88c74919fe
|
[
"BSD-3-Clause"
] | null | null | null |
examples/applications/plot_cyclical_feature_engineering.py
|
patrickctrf/scikit-learn
|
d6735f4851d828984a0517de954b9b88c74919fe
|
[
"BSD-3-Clause"
] | null | null | null |
"""
================================
Time-related feature engineering
================================
This notebook introduces different strategies to leverage time-related features
for a bike sharing demand regression task that is highly dependent on business
cycles (days, weeks, months) and yearly season cycles.
In the process, we introduce how to perform periodic feature engineering using
the :class:`sklearn.preprocessing.SplineTransformer` class and its
`extrapolation="periodic"` option.
"""
# %%
# Data exploration on the Bike Sharing Demand dataset
# ---------------------------------------------------
#
# We start by loading the data from the OpenML repository.
from sklearn.datasets import fetch_openml
bike_sharing = fetch_openml("Bike_Sharing_Demand", version=2, as_frame=True)
df = bike_sharing.frame
# %%
# To get a quick understanding of the periodic patterns of the data, let us
# have a look at the average demand per hour during a week.
#
# Note that the week starts on a Sunday, during the weekend. We can clearly
# distinguish the commute patterns in the morning and evenings of the work days
# and the leisure use of the bikes on the weekends with a more spread peak
# demand around the middle of the days:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 4))
average_week_demand = df.groupby(["weekday", "hour"]).mean()["count"]
average_week_demand.plot(ax=ax)
_ = ax.set(
title="Average hourly bike demand during the week",
xticks=[i * 24 for i in range(7)],
xticklabels=["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"],
xlabel="Time of the week",
ylabel="Number of bike rentals",
)
# %%
#
# The target of the prediction problem is the absolute count of bike rentals on
# a hourly basis:
df["count"].max()
# %% [markdown]
#
# Let us rescale the target variable (number of hourly bike rentals) to predict
# a relative demand so that the mean absolute error is more easily interpreted
# as a fraction of the maximum demand.
#
# .. note::
#
# The fit method of the models used in this notebook all minimize the
# mean squared error to estimate the conditional mean instead of the mean
# absolute error that would fit an estimator of the conditional median.
#
# When reporting performance measure on the test set in the discussion, we
# instead choose to focus on the mean absolute error that is more
# intuitive than the (root) mean squared error. Note however that the best
# models for one metric are also the best for the other in this study.
y = df["count"] / 1000
# %%
fig, ax = plt.subplots(figsize=(12, 4))
y.hist(bins=30, ax=ax)
_ = ax.set(
xlabel="Fraction of rented fleet demand",
ylabel="Number of hours",
)
# %%
# The input feature data frame is a time annotated hourly log of variables
# describing the weather conditions. It includes both numerical and categorical
# variables. Note that the time information has already been expanded into
# several complementary columns.
#
X = df.drop("count", axis="columns")
X
# %%
# .. note::
#
# If the time information was only present as a date or datetime column, we
# could have expanded it into hour-in-the-day, day-in-the-week,
# day-in-the-month, month-in-the-year using pandas:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#time-date-components
#
# We now introspect the distribution of the categorical variables, starting
# with `"weather"`:
#
X["weather"].value_counts()
# %%
# Since there are only 3 `"heavy_rain"` events, we cannot use this category to
# train machine learning models with cross validation. Instead, we simplify the
# representation by collapsing those into the `"rain"` category.
#
X["weather"].replace(to_replace="heavy_rain", value="rain", inplace=True)
# %%
X["weather"].value_counts()
# %%
# As expected, the `"season"` variable is well balanced:
#
X["season"].value_counts()
# %%
# Time-based cross-validation
# ---------------------------
#
# Since the dataset is a time-ordered event log (hourly demand), we will use a
# time-sensitive cross-validation splitter to evaluate our demand forecasting
# model as realistically as possible. We use a gap of 2 days between the train
# and test side of the splits. We also limit the training set size to make the
# performance of the CV folds more stable.
#
# 1000 test datapoints should be enough to quantify the performance of the
# model. This represents a bit less than a month and a half of contiguous test
# data:
from sklearn.model_selection import TimeSeriesSplit
ts_cv = TimeSeriesSplit(
n_splits=5,
gap=48,
max_train_size=10000,
test_size=1000,
)
# %%
# Let us manually inspect the various splits to check that the
# `TimeSeriesSplit` works as we expect, starting with the first split:
all_splits = list(ts_cv.split(X, y))
train_0, test_0 = all_splits[0]
# %%
X.iloc[test_0]
# %%
X.iloc[train_0]
# %%
# We now inspect the last split:
train_4, test_4 = all_splits[4]
# %%
X.iloc[test_4]
# %%
X.iloc[train_4]
# %%
# All is well. We are now ready to do some predictive modeling!
#
# Gradient Boosting
# -----------------
#
# Gradient Boosting Regression with decision trees is often flexible enough to
# efficiently handle heteorogenous tabular data with a mix of categorical and
# numerical features as long as the number of samples is large enough.
#
# Here, we do minimal ordinal encoding for the categorical variables and then
# let the model know that it should treat those as categorical variables by
# using a dedicated tree splitting rule. Since we use an ordinal encoder, we
# pass the list of categorical values explicitly to use a logical order when
# encoding the categories as integer instead of the lexicographical order. This
# also has the added benefit of preventing any issue with unknown categories
# when using cross-validation.
#
# The numerical variable need no preprocessing and, for the sake of simplicity,
# we only try the default hyper-parameters for this model:
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.model_selection import cross_validate
categorical_columns = [
"weather",
"season",
"holiday",
"workingday",
]
categories = [
["clear", "misty", "rain"],
["spring", "summer", "fall", "winter"],
["False", "True"],
["False", "True"],
]
ordinal_encoder = OrdinalEncoder(categories=categories)
gbrt_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", ordinal_encoder, categorical_columns),
],
remainder="passthrough",
),
HistGradientBoostingRegressor(
categorical_features=range(4),
),
)
# %%
#
# Lets evaluate our gradient boosting model with the mean absolute error of the
# relative demand averaged accross our 5 time-based cross-validation splits:
def evaluate(model, X, y, cv):
cv_results = cross_validate(
model,
X,
y,
cv=ts_cv,
scoring=["neg_mean_absolute_error", "neg_root_mean_squared_error"],
)
mae = -cv_results["test_neg_mean_absolute_error"]
rmse = -cv_results["test_neg_root_mean_squared_error"]
print(
f"Mean Absolute Error: {mae.mean():.3f} +/- {mae.std():.3f}\n"
f"Root Mean Squared Error: {rmse.mean():.3f} +/- {rmse.std():.3f}"
)
evaluate(gbrt_pipeline, X, y, cv=ts_cv)
# %%
# This model has an average error around 4 to 5% of the maximum demand. This is
# quite good for a first trial without any hyper-parameter tuning! We just had
# to make the categorical variables explicit. Note that the time related
# features are passed as is, i.e. without processing them. But this is not much
# of a problem for tree-based models as they can learn a non-monotonic
# relationship between ordinal input features and the target.
#
# This is not the case for linear regression model as we will see in the
# following.
#
# Naive linear regression
# -----------------------
#
# As usual for linear models, categorical variables need to be one-hot encoded.
# For consistency, we scale the numerical features to the same 0-1 range using
# class:`sklearn.preprocessing.MinMaxScaler`, although in this case it does not
# impact the results much because they are already on comparable scales:
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import RidgeCV
import numpy as np
one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
alphas = np.logspace(-6, 6, 25)
naive_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(naive_linear_pipeline, X, y, cv=ts_cv)
# %%
#
# The performance is not good: the average error is around 14% of the maximum
# demand. This is more than three times higher than the average error of the
# gradient boosting model. We can suspect that the naive original encoding of
# the periodic time-related features might prevent the linear regression model
# to properly leverage the time information: linear regression does not model
# non-monotonic relationships between the input features and the target.
# Non-linear terms have to be engineered in the input.
#
# For example, the raw numerical encoding of the `"hour"` feature prevents the
# linear model from recognizing that an increase of hour in the morning from 6
# to 8 should have a strong positive impact on the number of bike rentals while
# a increase of similar magnitude in the evening from 18 to 20 should have a
# strong negative impact on the predicted number of bike rentals.
#
# Time-steps as categories
# ------------------------
#
# Since the time features are encoded in a discrete manner using integers (24
# unique values in the "hours" feature), we could decide to treat those as
# categorical variables and ignore any assumption implied by the ordering of
# the hour values using a one-hot encoding.
#
# Using one-hot encoding for the time features gives the linear model a lot
# more flexibility as we introduce one additional feature per discrete time
# level.
one_hot_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_linear_pipeline, X, y, cv=ts_cv)
# %%
# The average error rate of this model is 10% which is much better than using
# the original ordinal encoding of the time feature, confirming our intuition
# that the linear regression model benefit from the added flexibility to not
# treat time progression in a monotonic manner.
#
# However, this introduces a very large number of new features. If the time of
# the day was represented in minutes since the start of the day instead of
# hours, one-hot encoding would have introduced 1440 features instead of 24.
# This could cause some significant overfitting. To avoid this we could use
# :func:`sklearn.preprocessing.KBinsDiscretizer` instead to re-bin the number
# of levels of fine-grained ordinal or numerical variables while still
# benefitting from the non-monotonic expressivity advantages of one-hot
# encoding.
#
# Finally, we also observe than one-hot encoding completely ignores the
# ordering of the hour levels while this could be an interesting inductive bias
# to preserve to some level. In the following we try to explore smooth,
# non-monotonic encoding that locally preserves the relative ordering of time
# features.
#
# Trigonometric features
# ----------------------
#
# As a first attempt, we can try to encode each of those periodic features
# using a sine and cosine transform with the matching period.
#
# Each ordinal time feature is transformed into 2 features that together encode
# equivalent information in a non-monotonic way, and more importantly without
# any jump between the first and the last value of the periodic range.
from sklearn.preprocessing import FunctionTransformer
def sin_transformer(period):
return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi))
def cos_transformer(period):
return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi))
# %%
#
# Let us visualize the effect of this feature expansion on some synthetic hour
# data with a bit of extrapolation beyond hour=23:
import pandas as pd
hour_df = pd.DataFrame(
np.arange(26).reshape(-1, 1),
columns=["hour"],
)
hour_df["hour_sin"] = sin_transformer(24).fit_transform(hour_df)["hour"]
hour_df["hour_cos"] = cos_transformer(24).fit_transform(hour_df)["hour"]
hour_df.plot(x="hour")
_ = plt.title("Trigonometric encoding for the 'hour' feature")
# %%
#
# Let's use a 2D scatter plot with the hours encoded as colors to better see
# how this representation maps the 24 hours of the day to a 2D space, akin to
# some sort of 24 hour version of an analog clock. Note that the "25th" hour is
# mapped back to the 1st hour because of the periodic nature of the sine/cosine
# representation.
fig, ax = plt.subplots(figsize=(7, 5))
sp = ax.scatter(hour_df["hour_sin"], hour_df["hour_cos"], c=hour_df["hour"])
ax.set(
xlabel="sin(hour)",
ylabel="cos(hour)",
)
_ = fig.colorbar(sp)
# %%
#
# We can now build a feature extraction pipeline using this strategy:
cyclic_cossin_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("month_sin", sin_transformer(12), ["month"]),
("month_cos", cos_transformer(12), ["month"]),
("weekday_sin", sin_transformer(7), ["weekday"]),
("weekday_cos", cos_transformer(7), ["weekday"]),
("hour_sin", sin_transformer(24), ["hour"]),
("hour_cos", cos_transformer(24), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_cossin_linear_pipeline = make_pipeline(
cyclic_cossin_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_cossin_linear_pipeline, X, y, cv=ts_cv)
# %%
#
# The performance of our linear regression model with this simple feature
# engineering is a bit better than using the original ordinal time features but
# worse than using the one-hot encoded time features. We will further analyze
# possible reasons for this disappointing outcome at the end of this notebook.
#
# Periodic spline features
# ------------------------
#
# We can try an alternative encoding of the periodic time-related features
# using spline transformations with a large enough number of splines, and as a
# result a larger number of expanded features:
from sklearn.preprocessing import SplineTransformer
def periodic_spline_transformer(period, n_splines=None, degree=3):
if n_splines is None:
n_splines = period
n_knots = n_splines + 1 # periodic and include_bias is True
return SplineTransformer(
degree=degree,
n_knots=n_knots,
knots=np.linspace(0, period, n_knots).reshape(n_knots, 1),
extrapolation="periodic",
include_bias=True,
)
# %%
#
# Again, let us visualize the effect of this feature expansion on some
# synthetic hour data with a bit of extrapolation beyond hour=23:
hour_df = pd.DataFrame(
np.linspace(0, 26, 1000).reshape(-1, 1),
columns=["hour"],
)
splines = periodic_spline_transformer(24, n_splines=12).fit_transform(hour_df)
splines_df = pd.DataFrame(
splines,
columns=[f"spline_{i}" for i in range(splines.shape[1])],
)
pd.concat([hour_df, splines_df], axis="columns").plot(x="hour", cmap=plt.cm.tab20b)
_ = plt.title("Periodic spline-based encoding for the 'hour' feature")
# %%
# Thanks to the use of the `extrapolation="periodic"` parameter, we observe
# that the feature encoding stays smooth when extrapolating beyond midnight.
#
# We can now build a predictive pipeline using this alternative periodic
# feature engineering strategy.
#
# It is possible to use fewer splines than discrete levels for those ordinal
# values. This makes spline-based encoding more efficient than one-hot encoding
# while preserving most of the expressivity:
cyclic_spline_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("cyclic_month", periodic_spline_transformer(12, n_splines=6), ["month"]),
("cyclic_weekday", periodic_spline_transformer(7, n_splines=3), ["weekday"]),
("cyclic_hour", periodic_spline_transformer(24, n_splines=12), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_spline_linear_pipeline = make_pipeline(
cyclic_spline_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_linear_pipeline, X, y, cv=ts_cv)
# %%
# Spline features make it possible for the linear model to successfully
# leverage the periodic time-related features and reduce the error from ~14% to
# ~10% of the maximum demand, which is similar to what we observed with the
# one-hot encoded features.
#
# Qualitative analysis of the impact of features on linear models predictions
# ---------------------------------------------------------------------------
#
# Here, we want to visualize the impact of the feature engineering choices on
# the time related shape of the predictions.
#
# To do so we consider an arbitrary time-based split to compare the predictions
# on a range of held out data points.
naive_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
naive_linear_predictions = naive_linear_pipeline.predict(X.iloc[test_0])
one_hot_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_linear_predictions = one_hot_linear_pipeline.predict(X.iloc[test_0])
cyclic_cossin_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_cossin_linear_predictions = cyclic_cossin_linear_pipeline.predict(X.iloc[test_0])
cyclic_spline_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_linear_predictions = cyclic_spline_linear_pipeline.predict(X.iloc[test_0])
# %%
# We visualize those predictions by zooming on the last 96 hours (4 days) of
# the test set to get some qualitative insights:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by linear models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(naive_linear_predictions[last_hours], "x-", label="Ordinal time features")
ax.plot(
cyclic_cossin_linear_predictions[last_hours],
"x-",
label="Trigonometric time features",
)
ax.plot(
cyclic_spline_linear_predictions[last_hours],
"x-",
label="Spline-based time features",
)
ax.plot(
one_hot_linear_predictions[last_hours],
"x-",
label="One-hot time features",
)
_ = ax.legend()
# %%
# We can draw the following conclusions from the above plot:
#
# - the **raw ordinal time-related features** are problematic because they do
# not capture the natural periodicity: we observe a big jump in the
# predictions at the end of each day when the hour features goes from 23 back
# to 0. We can expect similar artifacts at the end of each week or each year.
#
# - as expected, the **trigonometric features** (sine and cosine) do not have
# these discontinuities at midnight but the linear regression model fails to
# leverage those features to properly model intra-day variations.
# Using trigonometric features for higher harmonics or additional
# trigonometric features for the natural period with different phases could
# potentially fix this problem.
#
# - the **periodic spline-based features** fix those two problems at once: they
# give more expressivity to the linear model by making it possible to focus
# on specific hours thanks to the use of 12 splines. Furthermore the
# `extrapolation="periodic"` option enforces a smooth representation between
# `hour=23` and `hour=0`.
#
# - the **one-hot encoded features** behave similarly to the periodic
# spline-based features but are more spiky: for instance they can better
# model the morning peak during the week days since this peak lasts shorter
# than an hour. However, we will see in the following that what can be an
# advantage for linear models is not necessarily one for more expressive
# models.
# %%
# We can also compare the number of features extracted by each feature
# engineering pipeline:
naive_linear_pipeline[:-1].transform(X).shape
# %%
one_hot_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_cossin_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_spline_linear_pipeline[:-1].transform(X).shape
# %%
# This confirms that the one-hot encoding and the spline encoding strategies
# create a lot more features for the time representation than the alternatives,
# which in turn gives the downstream linear model more flexibility (degrees of
# freedom) to avoid underfitting.
#
# Finally, we observe that none of the linear models can approximate the true
# bike rentals demand, especially for the peaks that can be very sharp at rush
# hours during the working days but much flatter during the week-ends: the most
# accurate linear models based on splines or one-hot encoding tend to forecast
# peaks of commuting-related bike rentals even on the week-ends and
# under-estimate the commuting-related events during the working days.
#
# These systematic prediction errors reveal a form of under-fitting and can be
# explained by the lack of non-additive modeling of the interactions between
# features (in this case "workingday" and features derived from "hours"). This
# issue will be addressed in the following section.
# %%
# Modeling pairwise interactions with splines and polynomial features
# -------------------------------------------------------------------
#
# Linear models alone cannot model interaction effects between input features.
# It does not help that some features are marginally non-linear as is the case
# with features constructed by `SplineTransformer` (or one-hot encoding or
# binning).
#
# However, it is possible to use the `PolynomialFeatures` class on coarse
# grained splined encoded hours to model the "workingday"/"hours" interaction
# explicitly without introducing too many new variables:
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import FeatureUnion
hour_workday_interaction = make_pipeline(
ColumnTransformer(
[
("cyclic_hour", periodic_spline_transformer(24, n_splines=8), ["hour"]),
("workingday", FunctionTransformer(lambda x: x == "True"), ["workingday"]),
]
),
PolynomialFeatures(degree=2, interaction_only=True, include_bias=False),
)
# %%
# Those features are then combined with the ones already computed in the
# previous spline-base pipeline. We can observe a nice performance improvemnt
# by modeling this pairwise interaction explicitly:
cyclic_spline_interactions_pipeline = make_pipeline(
FeatureUnion(
[
("marginal", cyclic_spline_transformer),
("interactions", hour_workday_interaction),
]
),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_interactions_pipeline, X, y, cv=ts_cv)
# %%
# Modeling non-linear feature interactions with kernels
# -----------------------------------------------------
#
# The previous analysis highlighted the need to model the interactions between
# `"workingday"` and `"hours"`. Another example of a such a non-linear
# interactions that we would like to model could be the impact of the rain that
# might not be the same during the working days and the week-ends and holidays
# for instance.
#
# To model all such interactions, we could either use a polynomial expansion on
# all marginal features at once, after their spline-based expansion. However
# this would create a quadratic number of features which can cause overfitting
# and computational tractability issues.
#
# Alternatively we can use the Nyström method to compute an approximate
# polynomial kernel expansion. Let us try the latter:
from sklearn.kernel_approximation import Nystroem
cyclic_spline_poly_pipeline = make_pipeline(
cyclic_spline_transformer,
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_poly_pipeline, X, y, cv=ts_cv)
# %%
#
# We observe that this model can almost rival the performance of the gradient
# boosted trees with an average error around 6% of the maximum demand.
#
# Note that while the final step of this pipeline is a linear regression model,
# the intermediate steps such as the spline feature extraction and the Nyström
# kernel approximation are highly non-linear. As a result the compound pipeline
# is much more expressive than a simple linear regression model with raw features.
#
# For the sake of completeness, we also evaluate the combination of one-hot
# encoding and kernel approximation:
one_hot_poly_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder="passthrough",
),
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_poly_pipeline, X, y, cv=ts_cv)
# %%
# While one-hot features were competitive with spline-based features when using
# linear models, this is no longer the case when using a low-rank approximation
# of a non-linear kernel: this can be explained by the fact that spline
# features are smoother and allow the kernel approximation to find a more
# expressive decision function.
#
# Let us now have a qualitative look at the predictions of the kernel models
# and of the gradient boosted trees that should be able to better model
# non-linear interactions between features:
gbrt_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
gbrt_predictions = gbrt_pipeline.predict(X.iloc[test_0])
one_hot_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_poly_predictions = one_hot_poly_pipeline.predict(X.iloc[test_0])
cyclic_spline_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_poly_predictions = cyclic_spline_poly_pipeline.predict(X.iloc[test_0])
# %%
# Again we zoom on the last 4 days of the test set:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by non-linear regression models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(
gbrt_predictions[last_hours],
"x-",
label="Gradient Boosted Trees",
)
ax.plot(
one_hot_poly_predictions[last_hours],
"x-",
label="One-hot + polynomial kernel",
)
ax.plot(
cyclic_spline_poly_predictions[last_hours],
"x-",
label="Splines + polynomial kernel",
)
_ = ax.legend()
# %%
# First, note that trees can naturally model non-linear feature interactions
# since, by default, decision trees are allowed to grow beyond a depth of 2
# levels.
#
# Here we can observe that the combinations of spline features and non-linear
# kernels works quite well and can almost rival the accuracy of the gradient
# boosting regression trees.
#
# On the contrary, one-hot time features do not perform that well with the low
# rank kernel model. In particular they significantly over-estimate the low
# demand hours more than the competing models.
#
# We also observe that none of the models can successfully predict some of the
# peak rentals at the rush hours during the working days. It is possible that
# access to additional features would be required to further improve the
# accuracy of the predictions. For instance, it could be useful to have access
# to the geographical repartition of the fleet at any point in time or the
# fraction of bikes that are immobilized because they need servicing.
#
# Let us finally get a more quantative look at the prediction errors of those
# three models using the true vs predicted demand scatter plots:
fig, axes = plt.subplots(ncols=3, figsize=(12, 4), sharey=True)
fig.suptitle("Non-linear regression models")
predictions = [
one_hot_poly_predictions,
cyclic_spline_poly_predictions,
gbrt_predictions,
]
labels = [
"One hot + polynomial kernel",
"Splines + polynomial kernel",
"Gradient Boosted Trees",
]
for ax, pred, label in zip(axes, predictions, labels):
ax.scatter(y.iloc[test_0].values, pred, alpha=0.3, label=label)
ax.plot([0, 1], [0, 1], "--", label="Perfect model")
ax.set(
xlim=(0, 1),
ylim=(0, 1),
xlabel="True demand",
ylabel="Predicted demand",
)
ax.legend()
# %%
# This visualization confirms the conclusions we draw on the previous plot.
#
# All models under-estimate the high demand events (working days rush hours),
# but gradient boosting a bit less so. The low demand events are well predicted
# on average by gradient boosting while the one-hot polynomial regression
# pipeline seems to systematically over-estimate demand in that regime. Overall
# the predictions of the gradient boosted trees are closer to the diagonal than
# for the kernel models.
#
# Concluding remarks
# ------------------
#
# We note that we could have obtained slightly better results for kernel models
# by using more components (higher rank kernel approximation) at the cost of
# longer fit and prediction durations. For large values of `n_components`, the
# performance of the one-hot features would even match the spline features.
#
# The `Nystroem` + `RidgeCV` classifier could also have been replaced by
# :class:`~sklearn.neural_network.MLPRegressor` with one or two hidden layers
# and we would have obtained quite similar results.
#
# The dataset we used in this case study is sampled on a hourly basis. However
# cyclic spline-based features could model time-within-day or time-within-week
# very efficiently with finer-grained time resolutions (for instance with
# measurements taken every minute instead of every hours) without introducing
# more features. One-hot encoding time representations would not offer this
# flexibility.
#
# Finally, in this notebook we used `RidgeCV` because it is very efficient from
# a computational point of view. However it models the target variable as a
# Gaussian random variable with constant variance. For positive regression
# problems, it is likely that using a Poisson or Gamma distribution would make
# more sense. This could be achieved by using
# `GridSearchCV(TweedieRegressor(power=2), param_grid({"alpha": alphas}))`
# instead of `RidgeCV`.
| 37.354293
| 97
| 0.733685
|
from sklearn.datasets import fetch_openml
bike_sharing = fetch_openml("Bike_Sharing_Demand", version=2, as_frame=True)
df = bike_sharing.frame
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 4))
average_week_demand = df.groupby(["weekday", "hour"]).mean()["count"]
average_week_demand.plot(ax=ax)
_ = ax.set(
title="Average hourly bike demand during the week",
xticks=[i * 24 for i in range(7)],
xticklabels=["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"],
xlabel="Time of the week",
ylabel="Number of bike rentals",
)
df["count"].max()
y = df["count"] / 1000
fig, ax = plt.subplots(figsize=(12, 4))
y.hist(bins=30, ax=ax)
_ = ax.set(
xlabel="Fraction of rented fleet demand",
ylabel="Number of hours",
)
X = df.drop("count", axis="columns")
X
ue_counts()
X["weather"].replace(to_replace="heavy_rain", value="rain", inplace=True)
X["weather"].value_counts()
X["season"].value_counts()
from sklearn.model_selection import TimeSeriesSplit
ts_cv = TimeSeriesSplit(
n_splits=5,
gap=48,
max_train_size=10000,
test_size=1000,
)
all_splits = list(ts_cv.split(X, y))
train_0, test_0 = all_splits[0]
X.iloc[test_0]
X.iloc[train_0]
train_4, test_4 = all_splits[4]
X.iloc[test_4]
X.iloc[train_4]
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.model_selection import cross_validate
categorical_columns = [
"weather",
"season",
"holiday",
"workingday",
]
categories = [
["clear", "misty", "rain"],
["spring", "summer", "fall", "winter"],
["False", "True"],
["False", "True"],
]
ordinal_encoder = OrdinalEncoder(categories=categories)
gbrt_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", ordinal_encoder, categorical_columns),
],
remainder="passthrough",
),
HistGradientBoostingRegressor(
categorical_features=range(4),
),
)
def evaluate(model, X, y, cv):
cv_results = cross_validate(
model,
X,
y,
cv=ts_cv,
scoring=["neg_mean_absolute_error", "neg_root_mean_squared_error"],
)
mae = -cv_results["test_neg_mean_absolute_error"]
rmse = -cv_results["test_neg_root_mean_squared_error"]
print(
f"Mean Absolute Error: {mae.mean():.3f} +/- {mae.std():.3f}\n"
f"Root Mean Squared Error: {rmse.mean():.3f} +/- {rmse.std():.3f}"
)
evaluate(gbrt_pipeline, X, y, cv=ts_cv)
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import RidgeCV
import numpy as np
one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
alphas = np.logspace(-6, 6, 25)
naive_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(naive_linear_pipeline, X, y, cv=ts_cv)
one_hot_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_linear_pipeline, X, y, cv=ts_cv)
from sklearn.preprocessing import FunctionTransformer
def sin_transformer(period):
return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi))
def cos_transformer(period):
return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi))
import pandas as pd
hour_df = pd.DataFrame(
np.arange(26).reshape(-1, 1),
columns=["hour"],
)
hour_df["hour_sin"] = sin_transformer(24).fit_transform(hour_df)["hour"]
hour_df["hour_cos"] = cos_transformer(24).fit_transform(hour_df)["hour"]
hour_df.plot(x="hour")
_ = plt.title("Trigonometric encoding for the 'hour' feature")
# how this representation maps the 24 hours of the day to a 2D space, akin to
# some sort of 24 hour version of an analog clock. Note that the "25th" hour is
# mapped back to the 1st hour because of the periodic nature of the sine/cosine
# representation.
fig, ax = plt.subplots(figsize=(7, 5))
sp = ax.scatter(hour_df["hour_sin"], hour_df["hour_cos"], c=hour_df["hour"])
ax.set(
xlabel="sin(hour)",
ylabel="cos(hour)",
)
_ = fig.colorbar(sp)
# %%
#
# We can now build a feature extraction pipeline using this strategy:
cyclic_cossin_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("month_sin", sin_transformer(12), ["month"]),
("month_cos", cos_transformer(12), ["month"]),
("weekday_sin", sin_transformer(7), ["weekday"]),
("weekday_cos", cos_transformer(7), ["weekday"]),
("hour_sin", sin_transformer(24), ["hour"]),
("hour_cos", cos_transformer(24), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_cossin_linear_pipeline = make_pipeline(
cyclic_cossin_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_cossin_linear_pipeline, X, y, cv=ts_cv)
# %%
#
# The performance of our linear regression model with this simple feature
# engineering is a bit better than using the original ordinal time features but
# worse than using the one-hot encoded time features. We will further analyze
# possible reasons for this disappointing outcome at the end of this notebook.
#
# Periodic spline features
# ------------------------
#
# We can try an alternative encoding of the periodic time-related features
# using spline transformations with a large enough number of splines, and as a
# result a larger number of expanded features:
from sklearn.preprocessing import SplineTransformer
def periodic_spline_transformer(period, n_splines=None, degree=3):
if n_splines is None:
n_splines = period
n_knots = n_splines + 1 # periodic and include_bias is True
return SplineTransformer(
degree=degree,
n_knots=n_knots,
knots=np.linspace(0, period, n_knots).reshape(n_knots, 1),
extrapolation="periodic",
include_bias=True,
)
# %%
#
# Again, let us visualize the effect of this feature expansion on some
# synthetic hour data with a bit of extrapolation beyond hour=23:
hour_df = pd.DataFrame(
np.linspace(0, 26, 1000).reshape(-1, 1),
columns=["hour"],
)
splines = periodic_spline_transformer(24, n_splines=12).fit_transform(hour_df)
splines_df = pd.DataFrame(
splines,
columns=[f"spline_{i}" for i in range(splines.shape[1])],
)
pd.concat([hour_df, splines_df], axis="columns").plot(x="hour", cmap=plt.cm.tab20b)
_ = plt.title("Periodic spline-based encoding for the 'hour' feature")
# %%
# Thanks to the use of the `extrapolation="periodic"` parameter, we observe
# that the feature encoding stays smooth when extrapolating beyond midnight.
#
# We can now build a predictive pipeline using this alternative periodic
# feature engineering strategy.
#
# It is possible to use fewer splines than discrete levels for those ordinal
# values. This makes spline-based encoding more efficient than one-hot encoding
# while preserving most of the expressivity:
cyclic_spline_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("cyclic_month", periodic_spline_transformer(12, n_splines=6), ["month"]),
("cyclic_weekday", periodic_spline_transformer(7, n_splines=3), ["weekday"]),
("cyclic_hour", periodic_spline_transformer(24, n_splines=12), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_spline_linear_pipeline = make_pipeline(
cyclic_spline_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_linear_pipeline, X, y, cv=ts_cv)
# %%
# Spline features make it possible for the linear model to successfully
# leverage the periodic time-related features and reduce the error from ~14% to
# ~10% of the maximum demand, which is similar to what we observed with the
# one-hot encoded features.
#
# Qualitative analysis of the impact of features on linear models predictions
# ---------------------------------------------------------------------------
#
# Here, we want to visualize the impact of the feature engineering choices on
# the time related shape of the predictions.
#
# To do so we consider an arbitrary time-based split to compare the predictions
# on a range of held out data points.
naive_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
naive_linear_predictions = naive_linear_pipeline.predict(X.iloc[test_0])
one_hot_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_linear_predictions = one_hot_linear_pipeline.predict(X.iloc[test_0])
cyclic_cossin_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_cossin_linear_predictions = cyclic_cossin_linear_pipeline.predict(X.iloc[test_0])
cyclic_spline_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_linear_predictions = cyclic_spline_linear_pipeline.predict(X.iloc[test_0])
# %%
# We visualize those predictions by zooming on the last 96 hours (4 days) of
# the test set to get some qualitative insights:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by linear models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(naive_linear_predictions[last_hours], "x-", label="Ordinal time features")
ax.plot(
cyclic_cossin_linear_predictions[last_hours],
"x-",
label="Trigonometric time features",
)
ax.plot(
cyclic_spline_linear_predictions[last_hours],
"x-",
label="Spline-based time features",
)
ax.plot(
one_hot_linear_predictions[last_hours],
"x-",
label="One-hot time features",
)
_ = ax.legend()
# %%
# We can draw the following conclusions from the above plot:
#
# - the **raw ordinal time-related features** are problematic because they do
# not capture the natural periodicity: we observe a big jump in the
# predictions at the end of each day when the hour features goes from 23 back
# to 0. We can expect similar artifacts at the end of each week or each year.
#
# - as expected, the **trigonometric features** (sine and cosine) do not have
# these discontinuities at midnight but the linear regression model fails to
# leverage those features to properly model intra-day variations.
# Using trigonometric features for higher harmonics or additional
# trigonometric features for the natural period with different phases could
# potentially fix this problem.
#
# - the **periodic spline-based features** fix those two problems at once: they
# give more expressivity to the linear model by making it possible to focus
# on specific hours thanks to the use of 12 splines. Furthermore the
# `extrapolation="periodic"` option enforces a smooth representation between
# `hour=23` and `hour=0`.
#
# - the **one-hot encoded features** behave similarly to the periodic
# spline-based features but are more spiky: for instance they can better
# model the morning peak during the week days since this peak lasts shorter
# than an hour. However, we will see in the following that what can be an
# advantage for linear models is not necessarily one for more expressive
# models.
# %%
# We can also compare the number of features extracted by each feature
# engineering pipeline:
naive_linear_pipeline[:-1].transform(X).shape
# %%
one_hot_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_cossin_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_spline_linear_pipeline[:-1].transform(X).shape
# %%
# This confirms that the one-hot encoding and the spline encoding strategies
# create a lot more features for the time representation than the alternatives,
# which in turn gives the downstream linear model more flexibility (degrees of
# freedom) to avoid underfitting.
#
# Finally, we observe that none of the linear models can approximate the true
# bike rentals demand, especially for the peaks that can be very sharp at rush
# hours during the working days but much flatter during the week-ends: the most
# accurate linear models based on splines or one-hot encoding tend to forecast
# peaks of commuting-related bike rentals even on the week-ends and
# under-estimate the commuting-related events during the working days.
#
# These systematic prediction errors reveal a form of under-fitting and can be
# explained by the lack of non-additive modeling of the interactions between
# features (in this case "workingday" and features derived from "hours"). This
# issue will be addressed in the following section.
# %%
# Modeling pairwise interactions with splines and polynomial features
# -------------------------------------------------------------------
#
# Linear models alone cannot model interaction effects between input features.
# It does not help that some features are marginally non-linear as is the case
# with features constructed by `SplineTransformer` (or one-hot encoding or
# binning).
#
# However, it is possible to use the `PolynomialFeatures` class on coarse
# grained splined encoded hours to model the "workingday"/"hours" interaction
# explicitly without introducing too many new variables:
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import FeatureUnion
hour_workday_interaction = make_pipeline(
ColumnTransformer(
[
("cyclic_hour", periodic_spline_transformer(24, n_splines=8), ["hour"]),
("workingday", FunctionTransformer(lambda x: x == "True"), ["workingday"]),
]
),
PolynomialFeatures(degree=2, interaction_only=True, include_bias=False),
)
# %%
# Those features are then combined with the ones already computed in the
# previous spline-base pipeline. We can observe a nice performance improvemnt
# by modeling this pairwise interaction explicitly:
cyclic_spline_interactions_pipeline = make_pipeline(
FeatureUnion(
[
("marginal", cyclic_spline_transformer),
("interactions", hour_workday_interaction),
]
),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_interactions_pipeline, X, y, cv=ts_cv)
# %%
# Modeling non-linear feature interactions with kernels
# -----------------------------------------------------
#
# The previous analysis highlighted the need to model the interactions between
# `"workingday"` and `"hours"`. Another example of a such a non-linear
# interactions that we would like to model could be the impact of the rain that
# might not be the same during the working days and the week-ends and holidays
# for instance.
#
# To model all such interactions, we could either use a polynomial expansion on
# all marginal features at once, after their spline-based expansion. However
# this would create a quadratic number of features which can cause overfitting
# and computational tractability issues.
#
# Alternatively we can use the Nyström method to compute an approximate
# polynomial kernel expansion. Let us try the latter:
from sklearn.kernel_approximation import Nystroem
cyclic_spline_poly_pipeline = make_pipeline(
cyclic_spline_transformer,
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_poly_pipeline, X, y, cv=ts_cv)
# %%
#
# We observe that this model can almost rival the performance of the gradient
# boosted trees with an average error around 6% of the maximum demand.
#
# Note that while the final step of this pipeline is a linear regression model,
# the intermediate steps such as the spline feature extraction and the Nyström
# kernel approximation are highly non-linear. As a result the compound pipeline
# is much more expressive than a simple linear regression model with raw features.
#
# For the sake of completeness, we also evaluate the combination of one-hot
# encoding and kernel approximation:
one_hot_poly_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder="passthrough",
),
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_poly_pipeline, X, y, cv=ts_cv)
# %%
# While one-hot features were competitive with spline-based features when using
# linear models, this is no longer the case when using a low-rank approximation
# of a non-linear kernel: this can be explained by the fact that spline
# features are smoother and allow the kernel approximation to find a more
# expressive decision function.
#
# Let us now have a qualitative look at the predictions of the kernel models
# and of the gradient boosted trees that should be able to better model
# non-linear interactions between features:
gbrt_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
gbrt_predictions = gbrt_pipeline.predict(X.iloc[test_0])
one_hot_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_poly_predictions = one_hot_poly_pipeline.predict(X.iloc[test_0])
cyclic_spline_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_poly_predictions = cyclic_spline_poly_pipeline.predict(X.iloc[test_0])
# %%
# Again we zoom on the last 4 days of the test set:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by non-linear regression models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(
gbrt_predictions[last_hours],
"x-",
label="Gradient Boosted Trees",
)
ax.plot(
one_hot_poly_predictions[last_hours],
"x-",
label="One-hot + polynomial kernel",
)
ax.plot(
cyclic_spline_poly_predictions[last_hours],
"x-",
label="Splines + polynomial kernel",
)
_ = ax.legend()
# %%
# First, note that trees can naturally model non-linear feature interactions
# since, by default, decision trees are allowed to grow beyond a depth of 2
# levels.
#
# Here we can observe that the combinations of spline features and non-linear
# kernels works quite well and can almost rival the accuracy of the gradient
# boosting regression trees.
#
# On the contrary, one-hot time features do not perform that well with the low
# rank kernel model. In particular they significantly over-estimate the low
# demand hours more than the competing models.
#
# We also observe that none of the models can successfully predict some of the
# peak rentals at the rush hours during the working days. It is possible that
# access to additional features would be required to further improve the
# accuracy of the predictions. For instance, it could be useful to have access
# to the geographical repartition of the fleet at any point in time or the
# fraction of bikes that are immobilized because they need servicing.
#
# Let us finally get a more quantative look at the prediction errors of those
# three models using the true vs predicted demand scatter plots:
fig, axes = plt.subplots(ncols=3, figsize=(12, 4), sharey=True)
fig.suptitle("Non-linear regression models")
predictions = [
one_hot_poly_predictions,
cyclic_spline_poly_predictions,
gbrt_predictions,
]
labels = [
"One hot + polynomial kernel",
"Splines + polynomial kernel",
"Gradient Boosted Trees",
]
for ax, pred, label in zip(axes, predictions, labels):
ax.scatter(y.iloc[test_0].values, pred, alpha=0.3, label=label)
ax.plot([0, 1], [0, 1], "--", label="Perfect model")
ax.set(
xlim=(0, 1),
ylim=(0, 1),
xlabel="True demand",
ylabel="Predicted demand",
)
ax.legend()
# %%
# This visualization confirms the conclusions we draw on the previous plot.
#
# All models under-estimate the high demand events (working days rush hours),
# but gradient boosting a bit less so. The low demand events are well predicted
# on average by gradient boosting while the one-hot polynomial regression
# pipeline seems to systematically over-estimate demand in that regime. Overall
# the predictions of the gradient boosted trees are closer to the diagonal than
# for the kernel models.
#
# Concluding remarks
# ------------------
#
# We note that we could have obtained slightly better results for kernel models
# by using more components (higher rank kernel approximation) at the cost of
# longer fit and prediction durations. For large values of `n_components`, the
# performance of the one-hot features would even match the spline features.
#
# The `Nystroem` + `RidgeCV` classifier could also have been replaced by
# :class:`~sklearn.neural_network.MLPRegressor` with one or two hidden layers
# and we would have obtained quite similar results.
#
# The dataset we used in this case study is sampled on a hourly basis. However
# cyclic spline-based features could model time-within-day or time-within-week
# very efficiently with finer-grained time resolutions (for instance with
# measurements taken every minute instead of every hours) without introducing
# more features. One-hot encoding time representations would not offer this
# flexibility.
#
# Finally, in this notebook we used `RidgeCV` because it is very efficient from
# a computational point of view. However it models the target variable as a
# Gaussian random variable with constant variance. For positive regression
# problems, it is likely that using a Poisson or Gamma distribution would make
# more sense. This could be achieved by using
# `GridSearchCV(TweedieRegressor(power=2), param_grid({"alpha": alphas}))`
# instead of `RidgeCV`.
| true
| true
|
7907d7df6df81490140dd917609f28f495547590
| 2,532
|
py
|
Python
|
mars/tensor/arithmetic/hypot.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/tensor/arithmetic/hypot.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/tensor/arithmetic/hypot.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorBinOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode='binary_and')
class TensorHypot(TensorBinOp):
_op_type_ = OperandDef.HYPOT
_func_name = 'hypot'
@infer_dtype(np.hypot)
def hypot(x1, x2, out=None, where=None, **kwargs):
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
z : Tensor
The hypotenuse of the triangle(s).
Examples
--------
>>> import mars.tensor as mt
>>> mt.hypot(3*mt.ones((3, 3)), 4*mt.ones((3, 3))).execute()
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> mt.hypot(3*mt.ones((3, 3)), [4]).execute()
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
op = TensorHypot(**kwargs)
return op(x1, x2, out=out, where=where)
| 32.461538
| 79
| 0.646919
|
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorBinOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode='binary_and')
class TensorHypot(TensorBinOp):
_op_type_ = OperandDef.HYPOT
_func_name = 'hypot'
@infer_dtype(np.hypot)
def hypot(x1, x2, out=None, where=None, **kwargs):
op = TensorHypot(**kwargs)
return op(x1, x2, out=out, where=where)
| true
| true
|
7907d84162afccf8d4510a0f463af3a1ee4581ef
| 9,083
|
py
|
Python
|
boldui/__init__.py
|
Wazzaps/boldui
|
447a392946b9e8e78e0f7a358d11247a6a55ea4e
|
[
"MIT"
] | 3
|
2022-01-04T15:22:50.000Z
|
2022-01-08T18:18:20.000Z
|
boldui/__init__.py
|
Wazzaps/boldui
|
447a392946b9e8e78e0f7a358d11247a6a55ea4e
|
[
"MIT"
] | null | null | null |
boldui/__init__.py
|
Wazzaps/boldui
|
447a392946b9e8e78e0f7a358d11247a6a55ea4e
|
[
"MIT"
] | 1
|
2022-01-28T00:27:05.000Z
|
2022-01-28T00:27:05.000Z
|
#!/usr/bin/env python3
from __future__ import annotations
import contextlib
import json
import os
import socket
import struct
import boldui.hotrefresh
from simplexp import Expr, var, Oplist
from typing import List
class Actions:
UPDATE_SCENE = 0
HANDLER_REPLY = 1
SET_VAR = 2
WATCH_ACK = 3
def stringify_op(obj, indent=0):
result = ''
if isinstance(obj, list):
result += '['
if len(obj) != 0:
result += '\n'
for op in obj:
result += ' ' * (indent + 2) + stringify_op(op, indent + 2) + ',\n'
if len(obj) != 0:
result += ' ' * indent
result += ']'
return result
elif isinstance(obj, dict) and 'type' in obj:
if obj['type'] in ('clear', 'rect', 'rrect', 'reply', 'setVar', 'evtHnd', 'watch', 'ackWatch', 'if', 'text', 'save',
'restore', 'clipRect', 'image'):
result += 'Ops.' + obj['type'] + '('
if len(obj.keys()) != 1:
result += '\n'
for key in obj.keys():
if key == 'type':
continue
result += ' ' * (indent + 2) + f'{key}={stringify_op(obj[key], indent + 2)},\n'
result += ' ' * indent
result += ')'
return result
return repr(obj)
class Ops:
@staticmethod
def clear(color):
return {'type': 'clear', 'color': color}
@staticmethod
def rect(rect, color):
return {'type': 'rect', 'rect': rect, 'color': color}
@staticmethod
def rrect(rect, color, radius):
return {'type': 'rrect', 'rect': rect, 'color': color, 'radius': radius}
@staticmethod
def reply(ident: int, data: List[Expr | int | float | None]):
return {'type': 'reply', 'id': ident, 'data': data}
@staticmethod
def set_var(name: str, value: Expr):
return {'type': 'setVar', 'name': name, 'value': value}
@staticmethod
def event_handler(rect, events, handler, oplist):
return {
'type': 'evtHnd',
'rect': rect,
'events': events,
'handler': handler,
'oplist': oplist,
}
@staticmethod
def watch_var(id, cond, wait_for_roundtrip, handler):
return {
'type': 'watch',
'id': id,
'cond': cond,
'waitForRoundtrip': wait_for_roundtrip,
'handler': handler
}
@staticmethod
def ack_watch(id):
return {
'type': 'ackWatch',
'id': id,
}
@staticmethod
def text(text, x, y, font_size, color):
return {
'type': 'text',
'text': text,
'x': x,
'y': y,
'fontSize': font_size,
'color': color,
}
@staticmethod
def if_(cond, t, f):
return {'type': 'if', 'cond': cond, 'then': t, 'else': f}
@staticmethod
def save():
return {'type': 'save'}
@staticmethod
def restore():
return {'type': 'restore'}
@staticmethod
def clip_rect(rect):
return {'type': 'clipRect', 'rect': rect}
@staticmethod
def image(uri, rect):
return {'type': 'image', 'uri': uri, 'rect': rect}
class ProtocolServer:
def __init__(self, address, reply_handler=None):
self.pending_vars = {}
self.address = address
self._scene = None
self._cached_scene = None
self.reply_handler = reply_handler
if os.path.exists(address):
os.remove(address)
SYSTEMD_SOCK_FD = 3
self.server = socket.fromfd(SYSTEMD_SOCK_FD, socket.AF_UNIX, socket.SOCK_STREAM)
self.socket = None
self._is_batch = False
self._batch_scene_updated = False
self._batch_vars = None
hotrefresh.init(self)
@property
def scene(self):
if self._cached_scene is None:
if callable(self._scene):
self._cached_scene = self._scene()
else:
self._cached_scene = self._scene
return self._cached_scene
@scene.setter
def scene(self, value):
self._scene = value
self._cached_scene = None
if self._is_batch:
self._batch_scene_updated = True
else:
self._send_scene()
def refresh_scene(self):
self._cached_scene = None
if self._is_batch:
self._batch_scene_updated = True
else:
self._send_scene()
@contextlib.contextmanager
def batch_update(self):
assert not self._is_batch
self._is_batch = True
self._batch_scene_updated = False
self._batch_vars = {}
yield
if self._batch_scene_updated:
self._send_scene()
elif self._batch_vars:
self._send_remote_var([(name, val) for name, val in self._batch_vars.items()])
self._is_batch = False
self._batch_scene_updated = False
self._batch_vars = None
def serve(self):
while True:
print('Waiting for connection...')
self.server.listen(1)
self.socket, addr = self.server.accept()
print('Client connected', addr)
self.socket.send(b"BoldUI\x00\x01")
# Read header
header = self.socket.recv(8)
if header != b"BoldUI\x00\x01":
print("Invalid header, disconnecting")
break
print("Handshake complete, sending initial scene")
if self.scene:
self._send_scene()
for var in self.pending_vars:
self.set_remote_var(var, self.pending_vars[var][0], self.pending_vars[var][1])
print(f'Server PID is {os.getpid()}')
while True:
packet = b''
packet_length = self.socket.recv(4)
if not packet_length:
break
packet_length = int.from_bytes(packet_length, 'big')
while len(packet) < packet_length:
packet += self.socket.recv(packet_length - len(packet))
if not packet:
break
self._handle_packet(packet)
print('Client disconnected')
break
def _send_packet(self, packet):
# print('Sending packet:', packet)
self.socket.send(len(packet).to_bytes(4, 'big') + packet)
def _handle_packet(self, packet):
action = int.from_bytes(packet[:4], 'big')
data = packet[4:]
if action == Actions.HANDLER_REPLY:
reply_count = int.from_bytes(data[:2], 'big')
data = data[2:]
with self.batch_update():
for i in range(reply_count):
reply_len = int.from_bytes(data[:2], 'big')
reply_id = int.from_bytes(data[2:6], 'big')
reply_data = data[6:6+reply_len]
data_array = []
while reply_data:
item_type = reply_data[0]
if item_type == 0:
data_array.append(int.from_bytes(reply_data[1:9], 'big', signed=True))
reply_data = reply_data[9:]
elif item_type == 1:
data_array.append(struct.unpack('>d', reply_data[1:9])[0])
reply_data = reply_data[9:]
else:
raise ValueError(f"Unknown item type {item_type}")
if self.reply_handler:
# print(f'Reply: {hex(reply_id)} : {data_array}')
self.reply_handler(reply_id, data_array)
else:
print('[app] Unknown packet type:', packet)
def _send_scene(self):
if self.socket:
combined_scene = self.scene
if self._batch_vars is not None:
for key, value in self._batch_vars.items():
combined_scene['vars'][key]['value'] = json.dumps(Oplist(Expr.to_dict(value)).to_list())
self._send_packet(Actions.UPDATE_SCENE.to_bytes(4, 'big') + json.dumps(self.scene).encode())
def set_remote_var(self, name, val_type, value):
self.pending_vars[name] = (val_type, value)
if self._is_batch:
self._batch_vars[name] = value
else:
self._send_remote_var([(name, value)])
def _send_remote_var(self, set_vars):
if self.socket:
parts = []
for name, value in set_vars:
value = Oplist(Expr.to_dict(value)).to_list()
parts.append(name.encode() + b'\x00' + json.dumps(value).encode())
self._send_packet(Actions.SET_VAR.to_bytes(4, 'big') + b'\x00'.join(parts))
def send_watch_ack(self, ack_id: int):
if self.socket:
self._send_packet(Actions.WATCH_ACK.to_bytes(4, 'big') + ack_id.to_bytes(8, 'big'))
| 31.106164
| 124
| 0.526148
|
from __future__ import annotations
import contextlib
import json
import os
import socket
import struct
import boldui.hotrefresh
from simplexp import Expr, var, Oplist
from typing import List
class Actions:
UPDATE_SCENE = 0
HANDLER_REPLY = 1
SET_VAR = 2
WATCH_ACK = 3
def stringify_op(obj, indent=0):
result = ''
if isinstance(obj, list):
result += '['
if len(obj) != 0:
result += '\n'
for op in obj:
result += ' ' * (indent + 2) + stringify_op(op, indent + 2) + ',\n'
if len(obj) != 0:
result += ' ' * indent
result += ']'
return result
elif isinstance(obj, dict) and 'type' in obj:
if obj['type'] in ('clear', 'rect', 'rrect', 'reply', 'setVar', 'evtHnd', 'watch', 'ackWatch', 'if', 'text', 'save',
'restore', 'clipRect', 'image'):
result += 'Ops.' + obj['type'] + '('
if len(obj.keys()) != 1:
result += '\n'
for key in obj.keys():
if key == 'type':
continue
result += ' ' * (indent + 2) + f'{key}={stringify_op(obj[key], indent + 2)},\n'
result += ' ' * indent
result += ')'
return result
return repr(obj)
class Ops:
@staticmethod
def clear(color):
return {'type': 'clear', 'color': color}
@staticmethod
def rect(rect, color):
return {'type': 'rect', 'rect': rect, 'color': color}
@staticmethod
def rrect(rect, color, radius):
return {'type': 'rrect', 'rect': rect, 'color': color, 'radius': radius}
@staticmethod
def reply(ident: int, data: List[Expr | int | float | None]):
return {'type': 'reply', 'id': ident, 'data': data}
@staticmethod
def set_var(name: str, value: Expr):
return {'type': 'setVar', 'name': name, 'value': value}
@staticmethod
def event_handler(rect, events, handler, oplist):
return {
'type': 'evtHnd',
'rect': rect,
'events': events,
'handler': handler,
'oplist': oplist,
}
@staticmethod
def watch_var(id, cond, wait_for_roundtrip, handler):
return {
'type': 'watch',
'id': id,
'cond': cond,
'waitForRoundtrip': wait_for_roundtrip,
'handler': handler
}
@staticmethod
def ack_watch(id):
return {
'type': 'ackWatch',
'id': id,
}
@staticmethod
def text(text, x, y, font_size, color):
return {
'type': 'text',
'text': text,
'x': x,
'y': y,
'fontSize': font_size,
'color': color,
}
@staticmethod
def if_(cond, t, f):
return {'type': 'if', 'cond': cond, 'then': t, 'else': f}
@staticmethod
def save():
return {'type': 'save'}
@staticmethod
def restore():
return {'type': 'restore'}
@staticmethod
def clip_rect(rect):
return {'type': 'clipRect', 'rect': rect}
@staticmethod
def image(uri, rect):
return {'type': 'image', 'uri': uri, 'rect': rect}
class ProtocolServer:
def __init__(self, address, reply_handler=None):
self.pending_vars = {}
self.address = address
self._scene = None
self._cached_scene = None
self.reply_handler = reply_handler
if os.path.exists(address):
os.remove(address)
SYSTEMD_SOCK_FD = 3
self.server = socket.fromfd(SYSTEMD_SOCK_FD, socket.AF_UNIX, socket.SOCK_STREAM)
self.socket = None
self._is_batch = False
self._batch_scene_updated = False
self._batch_vars = None
hotrefresh.init(self)
@property
def scene(self):
if self._cached_scene is None:
if callable(self._scene):
self._cached_scene = self._scene()
else:
self._cached_scene = self._scene
return self._cached_scene
@scene.setter
def scene(self, value):
self._scene = value
self._cached_scene = None
if self._is_batch:
self._batch_scene_updated = True
else:
self._send_scene()
def refresh_scene(self):
self._cached_scene = None
if self._is_batch:
self._batch_scene_updated = True
else:
self._send_scene()
@contextlib.contextmanager
def batch_update(self):
assert not self._is_batch
self._is_batch = True
self._batch_scene_updated = False
self._batch_vars = {}
yield
if self._batch_scene_updated:
self._send_scene()
elif self._batch_vars:
self._send_remote_var([(name, val) for name, val in self._batch_vars.items()])
self._is_batch = False
self._batch_scene_updated = False
self._batch_vars = None
def serve(self):
while True:
print('Waiting for connection...')
self.server.listen(1)
self.socket, addr = self.server.accept()
print('Client connected', addr)
self.socket.send(b"BoldUI\x00\x01")
header = self.socket.recv(8)
if header != b"BoldUI\x00\x01":
print("Invalid header, disconnecting")
break
print("Handshake complete, sending initial scene")
if self.scene:
self._send_scene()
for var in self.pending_vars:
self.set_remote_var(var, self.pending_vars[var][0], self.pending_vars[var][1])
print(f'Server PID is {os.getpid()}')
while True:
packet = b''
packet_length = self.socket.recv(4)
if not packet_length:
break
packet_length = int.from_bytes(packet_length, 'big')
while len(packet) < packet_length:
packet += self.socket.recv(packet_length - len(packet))
if not packet:
break
self._handle_packet(packet)
print('Client disconnected')
break
def _send_packet(self, packet):
self.socket.send(len(packet).to_bytes(4, 'big') + packet)
def _handle_packet(self, packet):
action = int.from_bytes(packet[:4], 'big')
data = packet[4:]
if action == Actions.HANDLER_REPLY:
reply_count = int.from_bytes(data[:2], 'big')
data = data[2:]
with self.batch_update():
for i in range(reply_count):
reply_len = int.from_bytes(data[:2], 'big')
reply_id = int.from_bytes(data[2:6], 'big')
reply_data = data[6:6+reply_len]
data_array = []
while reply_data:
item_type = reply_data[0]
if item_type == 0:
data_array.append(int.from_bytes(reply_data[1:9], 'big', signed=True))
reply_data = reply_data[9:]
elif item_type == 1:
data_array.append(struct.unpack('>d', reply_data[1:9])[0])
reply_data = reply_data[9:]
else:
raise ValueError(f"Unknown item type {item_type}")
if self.reply_handler:
self.reply_handler(reply_id, data_array)
else:
print('[app] Unknown packet type:', packet)
def _send_scene(self):
if self.socket:
combined_scene = self.scene
if self._batch_vars is not None:
for key, value in self._batch_vars.items():
combined_scene['vars'][key]['value'] = json.dumps(Oplist(Expr.to_dict(value)).to_list())
self._send_packet(Actions.UPDATE_SCENE.to_bytes(4, 'big') + json.dumps(self.scene).encode())
def set_remote_var(self, name, val_type, value):
self.pending_vars[name] = (val_type, value)
if self._is_batch:
self._batch_vars[name] = value
else:
self._send_remote_var([(name, value)])
def _send_remote_var(self, set_vars):
if self.socket:
parts = []
for name, value in set_vars:
value = Oplist(Expr.to_dict(value)).to_list()
parts.append(name.encode() + b'\x00' + json.dumps(value).encode())
self._send_packet(Actions.SET_VAR.to_bytes(4, 'big') + b'\x00'.join(parts))
def send_watch_ack(self, ack_id: int):
if self.socket:
self._send_packet(Actions.WATCH_ACK.to_bytes(4, 'big') + ack_id.to_bytes(8, 'big'))
| true
| true
|
7907d909b3eb1f58c7d792226124e4131a05139f
| 4,219
|
py
|
Python
|
Graphing/MeanActivityHorizontalBarChart.py
|
actuatech/fuel-tourism
|
60e6953cdcccf164e5cd03916a1c3b3c2b071a85
|
[
"MIT"
] | null | null | null |
Graphing/MeanActivityHorizontalBarChart.py
|
actuatech/fuel-tourism
|
60e6953cdcccf164e5cd03916a1c3b3c2b071a85
|
[
"MIT"
] | null | null | null |
Graphing/MeanActivityHorizontalBarChart.py
|
actuatech/fuel-tourism
|
60e6953cdcccf164e5cd03916a1c3b3c2b071a85
|
[
"MIT"
] | null | null | null |
import plotly.graph_objects as go
import pandas as pd
from .Colors import COLOR_DISCRETE_MAP
from Classification import CATEGORIES
def all_categories_grouping(row: pd.Series) -> str:
"""
Merge Category, Fuel and segment to a single string for unique categorization
"""
if row['Fuel'] == 'Battery Electric':
return row['Category'] + ' / ' + row['Fuel']
else:
try:
result = row['Fuel'] + ' / ' + row['Segment'] + ' / ' + row['Euro Standard']
except: # For Off Road type with no Segment nor Euro Standard
result = row['Fuel']
return result
def activity_horizontal_bar_chart(stock_and_mileage_df: pd.DataFrame.groupby, output_folder):
"""
Horizontal bar chart representing mean activity and other activities per unique categorization
:param stock_and_mileage_df: Dataframe of the vehicles registration list
:param output_folder: output folder name where to store resulting chart
:return: an html file containing the horizontal bar chart of the mean activity
"""
data = stock_and_mileage_df.copy()
# Delete off road data
data = data[data['Category'] != 'Off Road']
# Create single column classification
data['segmentation'] = data.apply(lambda row: all_categories_grouping(row), axis=1)
horizontal_plot = go.Figure()
# Add Activity statistics and stock traces
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Max_Activity'], mode='markers',
name='Activitat màxima', marker_color='rgb(288, 26, 28)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Min_Activity'], mode='markers',
name='Activitat mínima', marker_color='rgb(229, 196, 148)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Std_Activity'], mode='markers',
name="Desviació standard de l'activitat", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Stock'], mode='markers',
name="Estoc", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Mean_Lifetime_Activity'], mode='markers',
name="Lifetime cumulative activity mitja", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
# For each category add the mean activity bar chart (to diferenciate by same colors as Stock distribution Pie Chart)
for category in CATEGORIES:
horizontal_plot.add_trace(go.Bar(
y=data[data['Category'] == category]['segmentation'], x=data[data['Category'] == category]['Mean_Activity'],
orientation='h', marker_color=COLOR_DISCRETE_MAP[category],
name=f'Activitat mitjana {category}'
))
# Update plot information
horizontal_plot.update_layout(
title="Activitat mitjana anual segons classificació del parc de vehicles d'Andorra",
title_x=0.5,
height=4000,
width=1500,
template='plotly_white',
xaxis_title='Activitat mitja (km/any)',
yaxis_title='Tipologia de vehicle',
hovermode="y unified",
hoverlabel=dict(namelength=100),
xaxis_range=[0, stock_and_mileage_df['Max_Activity'].max()*1.05],
xaxis=dict(
tickmode='array',
tickvals=[0, 5000, 15000, 25000, 50000, 100000, 150000, 200000],
ticktext=['0', '5k', '15k', '25k', '50k', '100k', '150k', '200k'])
)
horizontal_plot.update_xaxes(showgrid=True, zeroline=True)
horizontal_plot.show()
# Save plot to html file
filename = output_folder + "Activitat mitjana anual segons classificació del parc de vehicles d'Andorra.html"
horizontal_plot.write_html(filename)
| 44.882979
| 120
| 0.605357
|
import plotly.graph_objects as go
import pandas as pd
from .Colors import COLOR_DISCRETE_MAP
from Classification import CATEGORIES
def all_categories_grouping(row: pd.Series) -> str:
if row['Fuel'] == 'Battery Electric':
return row['Category'] + ' / ' + row['Fuel']
else:
try:
result = row['Fuel'] + ' / ' + row['Segment'] + ' / ' + row['Euro Standard']
except:
result = row['Fuel']
return result
def activity_horizontal_bar_chart(stock_and_mileage_df: pd.DataFrame.groupby, output_folder):
data = stock_and_mileage_df.copy()
data = data[data['Category'] != 'Off Road']
data['segmentation'] = data.apply(lambda row: all_categories_grouping(row), axis=1)
horizontal_plot = go.Figure()
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Max_Activity'], mode='markers',
name='Activitat màxima', marker_color='rgb(288, 26, 28)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Min_Activity'], mode='markers',
name='Activitat mínima', marker_color='rgb(229, 196, 148)'
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Std_Activity'], mode='markers',
name="Desviació standard de l'activitat", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Stock'], mode='markers',
name="Estoc", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Mean_Lifetime_Activity'], mode='markers',
name="Lifetime cumulative activity mitja", marker=dict(
color='rgb(800, 800, 800)',
opacity=0)
))
# For each category add the mean activity bar chart (to diferenciate by same colors as Stock distribution Pie Chart)
for category in CATEGORIES:
horizontal_plot.add_trace(go.Bar(
y=data[data['Category'] == category]['segmentation'], x=data[data['Category'] == category]['Mean_Activity'],
orientation='h', marker_color=COLOR_DISCRETE_MAP[category],
name=f'Activitat mitjana {category}'
))
# Update plot information
horizontal_plot.update_layout(
title="Activitat mitjana anual segons classificació del parc de vehicles d'Andorra",
title_x=0.5,
height=4000,
width=1500,
template='plotly_white',
xaxis_title='Activitat mitja (km/any)',
yaxis_title='Tipologia de vehicle',
hovermode="y unified",
hoverlabel=dict(namelength=100),
xaxis_range=[0, stock_and_mileage_df['Max_Activity'].max()*1.05],
xaxis=dict(
tickmode='array',
tickvals=[0, 5000, 15000, 25000, 50000, 100000, 150000, 200000],
ticktext=['0', '5k', '15k', '25k', '50k', '100k', '150k', '200k'])
)
horizontal_plot.update_xaxes(showgrid=True, zeroline=True)
horizontal_plot.show()
filename = output_folder + "Activitat mitjana anual segons classificació del parc de vehicles d'Andorra.html"
horizontal_plot.write_html(filename)
| true
| true
|
7907daba23b38f50e978b56a229f23898be066ee
| 645
|
py
|
Python
|
context.py
|
kumailkermalli16/rcwa
|
a946c3819e5e52ad9c92a8a73c48360749b06196
|
[
"MIT"
] | 11
|
2020-03-11T08:46:55.000Z
|
2021-04-14T04:43:43.000Z
|
context.py
|
FelixSCT/rcwa
|
82571bd35e6b01994ccbd0c58080f3c80dc65024
|
[
"MIT"
] | 20
|
2020-10-02T00:25:19.000Z
|
2021-04-15T03:08:16.000Z
|
context.py
|
FelixSCT/rcwa
|
82571bd35e6b01994ccbd0c58080f3c80dc65024
|
[
"MIT"
] | 5
|
2021-09-20T08:07:51.000Z
|
2022-03-30T08:34:30.000Z
|
"""
Adds the source files to the path for files in any subdirectory
TODO: check that we have not alredy added to our path.
"""
import os
import sys
fileLocation = os.path.dirname(os.path.abspath(__file__))
sourceLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/source/'))
nkLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/nkData/'))
netlistLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/netlist/'))
testLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/test/'))
sys.path.insert(0, sourceLocation)
sys.path.insert(0, nkLocation)
sys.path.insert(0, netlistLocation)
sys.path.insert(0, testLocation)
| 35.833333
| 78
| 0.765891
|
import os
import sys
fileLocation = os.path.dirname(os.path.abspath(__file__))
sourceLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/source/'))
nkLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/nkData/'))
netlistLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/netlist/'))
testLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/test/'))
sys.path.insert(0, sourceLocation)
sys.path.insert(0, nkLocation)
sys.path.insert(0, netlistLocation)
sys.path.insert(0, testLocation)
| true
| true
|
7907dac376e1994a1443e54cf3d587bc2aeb6ada
| 3,458
|
py
|
Python
|
mlprimitives/adapters/pandas.py
|
albact7/MLPrimitives
|
9dbcbe219315a9b79aae825a34f5108802d8a19d
|
[
"MIT"
] | 42
|
2018-07-31T07:33:45.000Z
|
2020-10-26T05:51:35.000Z
|
mlprimitives/adapters/pandas.py
|
albact7/MLPrimitives
|
9dbcbe219315a9b79aae825a34f5108802d8a19d
|
[
"MIT"
] | 177
|
2018-08-28T18:06:20.000Z
|
2020-11-17T18:41:22.000Z
|
mlprimitives/adapters/pandas.py
|
albact7/MLPrimitives
|
9dbcbe219315a9b79aae825a34f5108802d8a19d
|
[
"MIT"
] | 28
|
2018-07-18T13:47:59.000Z
|
2020-10-21T18:53:15.000Z
|
import warnings
from mlprimitives.utils import import_object
_RESAMPLE_AGGS = [
'mean',
'median',
'prod',
'quantile',
'std',
'sum',
'var',
]
def resample(df, rule, on=None, groupby=(), aggregation='mean',
reset_index=True, time_index=None):
"""pd.DataFrame.resample adapter.
Call the `df.resample` method on the given time_index
and afterwards call the indicated aggregation.
Optionally group the dataframe by the indicated columns before
performing the resampling.
If groupby option is used, the result is a multi-index datagrame.
Args:
df (pandas.DataFrame):
DataFrame to resample.
rule (str or int):
The offset string or object representing target conversion or an
integer value that will be interpreted as the number of seconds.
on (str or None):
Name of the column to use as the time index. If ``None`` is given, the
DataFrame index is used.
groupby (list):
Optional list of columns to group by.
aggregation (callable or str):
Function or name of the function to use for the aggregation. If a name is given, it
can either be one of the standard pandas aggregation functions or the fully qualified
name of a python function that will be imported and used.
reset_index (bool):
Whether to reset the index after aggregating
time_index (str or None):
Deprecated: This has been renamed to `on`.
Name of the column to use as the time index. If ``None`` is given, the
DataFrame is index is used.
Returns:
pandas.Dataframe:
resampled dataframe
"""
if on is None and time_index is not None:
message = (
'resample `time_series` argument deprecated and will be removed'
' in future versions of MLPrimitives. Please use `on` instead.'
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
on = time_index
if groupby:
df = df.groupby(groupby)
if isinstance(rule, int):
rule = '{}s'.format(rule)
dtir = df.resample(rule, on=on)
if not callable(aggregation) and aggregation not in _RESAMPLE_AGGS:
try:
aggregation = import_object(aggregation)
except (AttributeError, ImportError, ValueError):
pass
df = dtir.aggregate(aggregation)
for name in df.index.names:
if name in df:
del df[name]
if reset_index:
df.reset_index(inplace=True)
return df
def _join_names(names):
"""Join the names of a multi-level index with an underscore."""
levels = (str(name) for name in names if name != '')
return '_'.join(levels)
def unstack(df, level=-1, reset_index=True):
"""pd.DataFrame.unstack adapter.
Call the `df.unstack` method using the indicated level and afterwards
join the column names using an underscore.
Args:
df (pandas.DataFrame): DataFrame to unstack.
level (str, int or list): Level(s) of index to unstack, can pass level name
reset_index (bool): Whether to reset the index after unstacking
Returns:
pandas.Dataframe: unstacked dataframe
"""
df = df.unstack(level=level)
if reset_index:
df = df.reset_index()
df.columns = df.columns.map(_join_names)
return df
| 30.333333
| 97
| 0.633314
|
import warnings
from mlprimitives.utils import import_object
_RESAMPLE_AGGS = [
'mean',
'median',
'prod',
'quantile',
'std',
'sum',
'var',
]
def resample(df, rule, on=None, groupby=(), aggregation='mean',
reset_index=True, time_index=None):
if on is None and time_index is not None:
message = (
'resample `time_series` argument deprecated and will be removed'
' in future versions of MLPrimitives. Please use `on` instead.'
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
on = time_index
if groupby:
df = df.groupby(groupby)
if isinstance(rule, int):
rule = '{}s'.format(rule)
dtir = df.resample(rule, on=on)
if not callable(aggregation) and aggregation not in _RESAMPLE_AGGS:
try:
aggregation = import_object(aggregation)
except (AttributeError, ImportError, ValueError):
pass
df = dtir.aggregate(aggregation)
for name in df.index.names:
if name in df:
del df[name]
if reset_index:
df.reset_index(inplace=True)
return df
def _join_names(names):
levels = (str(name) for name in names if name != '')
return '_'.join(levels)
def unstack(df, level=-1, reset_index=True):
df = df.unstack(level=level)
if reset_index:
df = df.reset_index()
df.columns = df.columns.map(_join_names)
return df
| true
| true
|
7907db87a8072c9923356373293a2eb7c7a3e234
| 2,178
|
py
|
Python
|
pulsar/scripts/_configure_slurm.py
|
usegalaxy-eu/pulsar
|
4dcaf61cceded8f0a83801cf1e9847e62656809f
|
[
"Apache-2.0"
] | 1
|
2021-05-18T02:27:00.000Z
|
2021-05-18T02:27:00.000Z
|
pulsar/scripts/_configure_slurm.py
|
usegalaxy-eu/pulsar
|
4dcaf61cceded8f0a83801cf1e9847e62656809f
|
[
"Apache-2.0"
] | null | null | null |
pulsar/scripts/_configure_slurm.py
|
usegalaxy-eu/pulsar
|
4dcaf61cceded8f0a83801cf1e9847e62656809f
|
[
"Apache-2.0"
] | null | null | null |
"""
This file is also being used by the GalaxyCloudRunner (gcr) Docker image.
"""
from getpass import getuser
from multiprocessing import cpu_count
from socket import gethostname
from string import Template
SLURM_CONFIG_TEMPLATE = '''
# slurm.conf file generated by configurator.html.
# Put this file on all nodes of your cluster.
# See the slurm.conf man page for more information.
#
ControlMachine=$hostname
#ControlAddr=
#BackupController=
#BackupAddr=
#
AuthType=auth/munge
CacheGroups=0
#CheckpointType=checkpoint/none
CryptoType=crypto/munge
MpiDefault=none
#PluginDir=
#PlugStackConfig=
#PrivateData=jobs
ProctrackType=proctrack/pgid
#Prolog=
#PrologSlurmctld=
#PropagatePrioProcess=0
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
ReturnToService=1
#SallocDefaultCommand=
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/tmp/slurmd
SlurmUser=$user
#SlurmdUser=root
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/tmp
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/none
#TaskPluginParam=
#TaskProlog=
InactiveLimit=0
KillWait=30
MinJobAge=300
#OverTimeLimit=0
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60
#VSizeFactor=0
Waittime=0
FastSchedule=1
SchedulerType=sched/backfill
SchedulerPort=7321
SelectType=select/linear
#SelectTypeParameters=
AccountingStorageType=accounting_storage/none
#AccountingStorageUser=
AccountingStoreJobComment=YES
ClusterName=cluster
#DebugFlags=
#JobCompHost=
#JobCompLoc=
#JobCompPass=
#JobCompPort=
JobCompType=jobcomp/none
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/none
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
NodeName=$hostname CPUs=$cpus State=UNKNOWN
PartitionName=debug Nodes=$hostname Default=YES MaxTime=INFINITE State=UP
'''
def main():
template_params = {"hostname": gethostname(),
"user": getuser(),
"cpus": cpu_count()}
config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params)
open("/etc/slurm-llnl/slurm.conf", "w").write(config_contents)
if __name__ == "__main__":
main()
| 22.453608
| 81
| 0.800735
|
from getpass import getuser
from multiprocessing import cpu_count
from socket import gethostname
from string import Template
SLURM_CONFIG_TEMPLATE = '''
# slurm.conf file generated by configurator.html.
# Put this file on all nodes of your cluster.
# See the slurm.conf man page for more information.
#
ControlMachine=$hostname
#ControlAddr=
#BackupController=
#BackupAddr=
#
AuthType=auth/munge
CacheGroups=0
#CheckpointType=checkpoint/none
CryptoType=crypto/munge
MpiDefault=none
#PluginDir=
#PlugStackConfig=
#PrivateData=jobs
ProctrackType=proctrack/pgid
#Prolog=
#PrologSlurmctld=
#PropagatePrioProcess=0
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
ReturnToService=1
#SallocDefaultCommand=
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/tmp/slurmd
SlurmUser=$user
#SlurmdUser=root
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/tmp
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/none
#TaskPluginParam=
#TaskProlog=
InactiveLimit=0
KillWait=30
MinJobAge=300
#OverTimeLimit=0
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60
#VSizeFactor=0
Waittime=0
FastSchedule=1
SchedulerType=sched/backfill
SchedulerPort=7321
SelectType=select/linear
#SelectTypeParameters=
AccountingStorageType=accounting_storage/none
#AccountingStorageUser=
AccountingStoreJobComment=YES
ClusterName=cluster
#DebugFlags=
#JobCompHost=
#JobCompLoc=
#JobCompPass=
#JobCompPort=
JobCompType=jobcomp/none
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/none
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
NodeName=$hostname CPUs=$cpus State=UNKNOWN
PartitionName=debug Nodes=$hostname Default=YES MaxTime=INFINITE State=UP
'''
def main():
template_params = {"hostname": gethostname(),
"user": getuser(),
"cpus": cpu_count()}
config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params)
open("/etc/slurm-llnl/slurm.conf", "w").write(config_contents)
if __name__ == "__main__":
main()
| true
| true
|
7907dbe959db150ecdba03325fe09e0918098f76
| 1,551
|
py
|
Python
|
src/transmittals/tests/test_templates.py
|
PhaseDMS/phase
|
4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e
|
[
"MIT"
] | 2
|
2021-09-10T19:40:30.000Z
|
2022-01-31T07:15:51.000Z
|
src/transmittals/tests/test_templates.py
|
PhaseDMS/phase
|
4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e
|
[
"MIT"
] | null | null | null |
src/transmittals/tests/test_templates.py
|
PhaseDMS/phase
|
4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e
|
[
"MIT"
] | 1
|
2021-09-10T19:40:42.000Z
|
2021-09-10T19:40:42.000Z
|
from django.test import TestCase
from django.urls import reverse
from accounts.factories import UserFactory
from transmittals.factories import create_transmittal
ack_button = '<a id="action-ack-transmittal"'
class TransmittalActionTests(TestCase):
def setUp(self):
self.trs = create_transmittal()
self.doc = self.trs.document
self.category = self.doc.category
self.url = reverse(
"document_detail",
args=[
self.category.organisation.slug,
self.category.slug,
self.doc.document_key,
],
)
self.user = UserFactory(
name="User", password="pass", is_superuser=True, category=self.category
)
self.client.login(username=self.user.email, password="pass")
def test_internal_user_cannot_ack_transmittal(self):
self.assertIsNone(self.trs.ack_of_receipt_date)
self.assertFalse(self.user.is_external)
res = self.client.get(self.url)
self.assertNotContains(res, ack_button)
def test_external_user_can_ack_transmittal(self):
self.user.is_external = True
self.user.save()
res = self.client.get(self.url)
self.assertContains(res, ack_button)
def test_transmittal_cannot_be_acked_twice(self):
self.user.is_external = True
self.trs.ack_receipt(self.user)
self.assertIsNotNone(self.trs.ack_of_receipt_date)
res = self.client.get(self.url)
self.assertNotContains(res, ack_button)
| 30.411765
| 83
| 0.661509
|
from django.test import TestCase
from django.urls import reverse
from accounts.factories import UserFactory
from transmittals.factories import create_transmittal
ack_button = '<a id="action-ack-transmittal"'
class TransmittalActionTests(TestCase):
def setUp(self):
self.trs = create_transmittal()
self.doc = self.trs.document
self.category = self.doc.category
self.url = reverse(
"document_detail",
args=[
self.category.organisation.slug,
self.category.slug,
self.doc.document_key,
],
)
self.user = UserFactory(
name="User", password="pass", is_superuser=True, category=self.category
)
self.client.login(username=self.user.email, password="pass")
def test_internal_user_cannot_ack_transmittal(self):
self.assertIsNone(self.trs.ack_of_receipt_date)
self.assertFalse(self.user.is_external)
res = self.client.get(self.url)
self.assertNotContains(res, ack_button)
def test_external_user_can_ack_transmittal(self):
self.user.is_external = True
self.user.save()
res = self.client.get(self.url)
self.assertContains(res, ack_button)
def test_transmittal_cannot_be_acked_twice(self):
self.user.is_external = True
self.trs.ack_receipt(self.user)
self.assertIsNotNone(self.trs.ack_of_receipt_date)
res = self.client.get(self.url)
self.assertNotContains(res, ack_button)
| true
| true
|
7907dcbcc0a65cc92db33cb1ee33822cd81fa136
| 2,223
|
py
|
Python
|
Server/Model/ModelUser.py
|
CorneliusTantius/TCON-API-V2
|
e9628df57291af10a824148e6a8edbb48e13c4e5
|
[
"MIT"
] | 1
|
2021-10-05T17:46:46.000Z
|
2021-10-05T17:46:46.000Z
|
Server/Model/ModelUser.py
|
CorneliusTantius/TCON-API-V2
|
e9628df57291af10a824148e6a8edbb48e13c4e5
|
[
"MIT"
] | null | null | null |
Server/Model/ModelUser.py
|
CorneliusTantius/TCON-API-V2
|
e9628df57291af10a824148e6a8edbb48e13c4e5
|
[
"MIT"
] | null | null | null |
### Package Import ###
from bson import ObjectId
from pydantic import BaseModel
from pydantic import fields
from pydantic.fields import Field
from typing import Optional
### AppCode Import ###
from Server.Model.POID import PyObjectId
###############################################################################
class User(BaseModel):
Id: PyObjectId = Field(default_factory=PyObjectId, alias='_id')
FirstName: str = Field(alias='FirstName')
LastName: str = Field(alias='LastName')
Email: str = Field(alias='Email')
PhoneNumber: str = Field(alias='PhoneNumber')
Password: str = Field(alias='Password')
About: Optional[str] = Field(alias = 'About')
ProfileUrl: Optional[str] = Field(alias='ProfileUrl')
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"FirstName": "Jane",
"LastName": "Doe",
"Email": "jdoe@example.com",
"PhoneNumber": "6285588974456",
"Password": "jdoee"
}
}
###############################################################################
class UserUpdateModel(BaseModel):
FirstName: Optional[str] = Field(alias ='FirstName')
LastName: Optional[str] = Field(alias='LastName')
Email: Optional[str] = Field(alias='Email')
PhoneNumber: Optional[str] = Field(alias='PhoneNumber')
Password: Optional[str] = Field(alias='Password')
About: Optional[str] = Field(alias = 'About')
ProfileUrl: Optional[str] = Field(alias='ProfileUrl')
class Config:
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"FirstName": "Jane",
"LastName": "Doe",
"Email": "jdoe@example.com",
"PhoneNumber": "6285588974456",
"Password": "jdoee",
"About": "About jane doe",
"ProfileUrl": "https://profileurlembed.com/file/janedoe"
}
}
###############################################################################
| 35.854839
| 79
| 0.530364
|
BaseModel
from pydantic import fields
from pydantic.fields import Field
from typing import Optional
| true
| true
|
7907dcd4b357df9d1515ca7dd479f62ed609eb66
| 688
|
py
|
Python
|
hcap/settings/general/middleware.py
|
fabiommendes/capacidade_hospitalar
|
4f675b574573eb3f51e6be8a927ea230bf2712c7
|
[
"MIT"
] | null | null | null |
hcap/settings/general/middleware.py
|
fabiommendes/capacidade_hospitalar
|
4f675b574573eb3f51e6be8a927ea230bf2712c7
|
[
"MIT"
] | 31
|
2020-04-11T13:38:17.000Z
|
2021-09-22T18:51:11.000Z
|
hcap/settings/general/middleware.py
|
fabiommendes/capacidade_hospitalar
|
4f675b574573eb3f51e6be8a927ea230bf2712c7
|
[
"MIT"
] | 1
|
2020-04-08T17:04:39.000Z
|
2020-04-08T17:04:39.000Z
|
"""
django:
https://docs.djangoproject.com/en/3.0/topics/http/middleware/
https://docs.djangoproject.com/en/3.0/ref/settings/#middleware
"""
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
| 38.222222
| 66
| 0.77907
|
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
| true
| true
|
7907ddb022b5f6355e69c096183a25f632e01923
| 22,793
|
py
|
Python
|
armin_analysis/model_tests.py
|
arminbahl/mutant_zebrafish_behavior
|
17bee04b35c23b0f93fcecac9758e6ba19872be1
|
[
"MIT"
] | null | null | null |
armin_analysis/model_tests.py
|
arminbahl/mutant_zebrafish_behavior
|
17bee04b35c23b0f93fcecac9758e6ba19872be1
|
[
"MIT"
] | null | null | null |
armin_analysis/model_tests.py
|
arminbahl/mutant_zebrafish_behavior
|
17bee04b35c23b0f93fcecac9758e6ba19872be1
|
[
"MIT"
] | null | null | null |
import pylab as pl
from get_fish_info import get_fish_info
from fit_integrator_model import get_model_result, get_target_result
import numpy as np
from pathlib import Path
import gmm_model_fit
import pandas as pd
from pymoo.factory import get_problem, get_visualization, get_decomposition
# import random
#
# for dt in [0.001, 0.002, 0.005, 0.01, 0.1]:
#
# tau = 4
# Is = np.arange(0, 30, dt)
# xs = np.empty_like(Is)
# xs[0]
#
# for i in range(1, len(Is)):
# dx = random.gauss(0.2, 5) - xs[i - 1]
# xs[i] = xs[i - 1] + dx * dt / tau
# pl.plot(Is, xs)
# pl.show()
# sdf
root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/surrogate_fish1")
#root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/scn1lab_NIBR")
#root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/disc1_hetinx")
df = pd.read_hdf(root_path / "all_data.h5", key="all_bouts")
#
# df_extracted_features, df_extracted_binned_features, \
# df_extracted_binned_features_same_direction, \
# df_extracted_binned_features_heading_angle_change_histograms, \
# df_extracted_binned_features_inter_bout_interval_histograms = get_mean_fish_info(df)
#
# print(df_extracted_features)
# pl.plot(df_extracted_features.loc["wt", :]["correctness"])
# pl.plot(df_extracted_features.loc["het", :]["correctness"])
# pl.plot(df_extracted_features.loc["hom", :]["correctness"])
#
# pl.figure()
# pl.plot(df_extracted_features.loc["wt", :]["inter_bout_interval"])
# pl.plot(df_extracted_features.loc["het", :]["inter_bout_interval"])
# pl.plot(df_extracted_features.loc["hom", :]["inter_bout_interval"])
#
# pl.figure()
# pl.plot(df_extracted_binned_features.loc["wt", 0])
# pl.plot(df_extracted_binned_features.loc["wt", 1])
# pl.plot(df_extracted_binned_features.loc["wt", 2])
# pl.plot(df_extracted_binned_features.loc["wt", 3])
#
# pl.figure()
# pl.plot(df_extracted_binned_features_same_direction.loc["wt"])
# pl.plot(df_extracted_binned_features_same_direction.loc["het"])
# pl.plot(df_extracted_binned_features_same_direction.loc["hom"])
#
#
# pl.figure()
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 0])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 1])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 2])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 3])
#
# pl.show()
#
#
# pl.show()
#
#
# print(df_extracted_features)
# gg
# sdf
genotype = "hom"
target_df_correctness_as_function_of_coherence, \
target_df_inter_bout_interval_as_function_of_coherence, \
target_df_binned_correctness, \
target_df_binned_same_direction, \
target_df_binned_features_heading_angle_change_histograms, \
target_df_binned_features_inter_bout_interval_histograms, \
target_df_gmm_fitting_results = get_target_result(root_path, genotype)
# colors = ["#000000", "#330000", "#990000", "#CC3333"]
#
# for i in range(4):
# pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), label=f"Coherence {i*25}%", color=colors[i], linewidth=2)
#
# pl.xlabel("Heading angle change (deg)")
# pl.ylabel("Probability")
# pl.legend()
#
# fig = pl.figure()
# fig.suptitle("Target functions")
# pl.subplot(211)
# pl.plot(target_df_correctness_as_function_of_coherence, 'o-', color='black')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot(target_df_inter_bout_interval_as_function_of_coherence, 'o-', color='black')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
medianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')
errornames = ["Error: 'Correctness as function of coherence'",
"Error: 'Inter-bout interval as function of coherence'",
"Error: 'Binned correctness at 25, 50, 100 %'",
"Error: 'Binned same direction'",
"Error: 'Histogram weights'"]
#errornames = ["Mixed"]
repeat = 1
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
#
#
# for i in range(7):
# F[-1, :, i] = F[-1, :, i] / np.max(F[-1, :, i])
# print(F.shape)
#
# i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4] + F[-1, :, 5] + F[-1, :, 6])
# print(F[-1, i6, 0])
# dd
#get_decomposition("asf").do(F[-1], [1, 1, 1, 1, 1, 1, 1]).argmin()
#print(I)
#sdfsdf
#X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}_single_error.npy")
#F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}_single_error.npy")
# from pymoo.factory import get_decision_making, get_reference_directions
#
# ref_dirs = get_reference_directions("das-dennis", 4, n_partitions=12)
# F = get_problem("dtlz1").pareto_front(ref_dirs)
#
# weights = np.array([10.25, 10.25, 0.25, 0.25])
# a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F, return_pseudo_weights=True)
# pl.plot(F[:, 0], F[:,1], 'o')
# pl.plot(F[a, 0], F[a,1], 'o')
# pl.show()
#
# print(a, pseudo_weights, F.shape)
# ghj
from pymoo.factory import get_decision_making, get_reference_directions
#weights = [1000, 1000, 1000, 0, 0, 0, 0]
#a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F[-1], return_pseudo_weights=True)
#print(pseudo_weights[0])
#print(a, pseudo_weights)
#dfg
for i in range(5):
#pl.hist(F[-1, :, i])
#pl.show()
#print(np.percentile(F[-1, :, i], 75))
#print(np.max(F[-1, :, i]) - np.min(F[-1, :, i]))
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
# print(F.shape)
#
#i6 = a
#i1 = np.argmin(F[-1, :, 0])
# i2 = np.argmin(F[-1, :, 1])
# i3 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500)
# i4 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3])
# i5 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25)
# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25 + F[-1, :, 6]*5800)
# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 6800)
# i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)
i6 = np.argmin(F[-1, :, 0] + 3*F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])
#from pymoo.factory import get_decision_making
#dm = get_decision_making("high-tradeoff")
#I = dm.do(pf)
# print(F.shape)
# np.set_printoptions(precision=4, suppress=True)
# print((X[-1, i]))
# #gdfgh
# for error_i in range(len(errornames)):
# pl.figure()
# pl.title(errornames[error_i])
# bp = pl.boxplot(F[:, :, error_i].T, whis=[5, 95], showfliers=False, medianprops=medianprops)
# for gen in range(50):
# sc = pl.scatter([gen+1], [F[gen, :, error_i].min()], s=5, marker='.', c='firebrick')
# pl.yscale("log")
# pl.xlabel("Generation")
# pl.ylabel("Log Error")
# pl.show()
# dd
#
# pl.figure()
# pl.title("Compromise between all error functions")
# #error = F[:, :, 0] + F[:, :, 1]*500 + F[:, :, 3] + F[:, :, 5]*0.25 + F[:, :, 6]*500
# error = F[:, :, 0] + F[:, :, 1]*2500 + F[:, :, 3]*5 + F[:, :, 5]*0.5 + F[:, :, 6]*1500
#
# bp = pl.boxplot(error.T, whis=[5, 95], showfliers=False, medianprops=medianprops)
# for gen in range(50):
# sc = pl.scatter([gen + 1], [error[gen].min()], s=10, marker='.', c='firebrick')
# pl.yscale("log")
# pl.xlabel("Generation")
# pl.ylabel("Log Error")
# pl.show()
# pl.figure()
# pl.scatter(F[-1, :, 0], F[-1, :, 1], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0], F[-1, i1, 1], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0], F[-1, i2, 1], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0], F[-1, i3, 1], s=15, marker='o', c='C3', label="Compromise")
# pl.legend()
# pl.xlabel(errornames[0])
# pl.ylabel(errornames[1])
#
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500, F[-1, :, 3], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500, F[-1, i1, 3], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500, F[-1, i2, 3], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500, F[-1, i3, 3], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500, F[-1, i4, 3], s=15, marker='o', c='C4', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1 and 2")
# pl.ylabel(errornames[3])
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3], F[-1, :, 5], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3], F[-1, i1, 5], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3], F[-1, i2, 5], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3], F[-1, i3, 5], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3], F[-1, i4, 5], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3")
# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3], F[-1, i5, 5], s=15, marker='o', c='C5', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1, 2, and 3")
# pl.ylabel(errornames[5])
#
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25, F[-1, :, 6], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3] + F[-1, i1, 5]*0.25, F[-1, i1, 6], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3] + F[-1, i2, 5]*0.25, F[-1, i2, 6], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3] + F[-1, i3, 5]*0.25, F[-1, i3, 6], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3] + F[-1, i4, 5]*0.25, F[-1, i4, 6], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3")
# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3] + F[-1, i5, 5]*0.25, F[-1, i5, 6], s=15, marker='o', c='C5', label="Compromise between 1, 2, 3, and 4")
# pl.scatter(F[-1, i6, 0] + F[-1, i6, 1]*500 + F[-1, i6, 3] + F[-1, i6, 5]*0.25, F[-1, i6, 6], s=15, marker='o', c='C6', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1, 2, 3, and 4")
# pl.ylabel(errornames[6])
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i1])
# fig.suptitle("Best for 'Correctness as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C1')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C1')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i2])
# fig.suptitle("Best for 'Inter-bout interval as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C2')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C2')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i3])
# fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i3])
# fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C3')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
#
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i4])
# fig.suptitle("Compromise between all three error functions")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C4')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C4')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C4')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
#
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i5])
# fig.suptitle("Compromise between all four error functions")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C5')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C5')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C5')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
# pl.subplot(224)
# pl.plot(target_df_binned_same_direction, 'o-', color='black')
# pl.plot(model_df_binned_same_direction, 'o--', color='C5')
# pl.xlabel("Time since last bout (s)")
# pl.ylabel("Correctness (%)")
fig = pl.figure()
model_df_correctness_as_function_of_coherence, \
model_df_inter_bout_interval_as_function_of_coherence, \
model_df_binned_correctness, \
model_df_binned_same_direction, \
model_df_binned_features_heading_angle_change_histograms, \
model_df_binned_features_inter_bout_interval_histograms, \
model_df_gmm_fitting_results = get_model_result(X[-1, i6])
fig.suptitle("Compromise between all five error functions")
pl.subplot(231)
pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Correctness (%)")
pl.subplot(232)
pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Inter-bout interval (s)")
pl.subplot(233)
for i in range(4):
pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C6')
pl.xlabel("Time (s)")
pl.ylabel("Correctness (%)")
pl.subplot(234)
pl.plot(target_df_binned_same_direction, 'o-', color='black')
pl.plot(model_df_binned_same_direction, 'o--', color='C6')
pl.xlabel("Time since last bout (s)")
pl.ylabel("Correctness (%)")
# pl.subplot(235)
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_left"].values, '-o', color='black', label='s_left')
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_center"].values, '-o', color='black', label='s_center')
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_right"].values, '-o', color='black', label='s_right')
#
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_left"].values, '--o', color='C6', label='s_left')
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_center"].values, '--o', color='C6', label='s_center')
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_right"].values, '--o', color='C6', label='s_right')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Weight")
# pl.legend()
pl.subplot(235)
for i in range(4):
pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"black")
pl.plot(model_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"C6", linestyle='--')
pl.xlabel("Heading angle change")
pl.ylabel("Probability")
pl.show()
found_parameters = []
for repeat in range(12):
for genotype in ["wt", "het", "hom"]:
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
for i in range(5):
#F[-1, :, i] = F[-1, :, i] / np.median(F[-1, :, i])
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + 5 * F[-1, :, 3] + F[-1, :, 5] + 5 * F[-1, :, 6])
#i6 = np.argmin(F[-1, :, 0] + 5 * F[-1, :, 1] + 20 * F[-1, :, 4] + F[-1, :, 5] + 5 * F[-1, :, 6])
i6 = np.argmin(F[-1, :, 0] + 3 * F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])
#i6 = np.argmin(F[-1, :, 0] + 2 * F[-1, :, 1] + F[-1, :, 2] + 3 * F[-1, :, 3] + F[-1, :, 5] + F[-1, :, 6])
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 500 + F[-1, :, 3] + F[-1, :, 5] * 0.25 + F[-1, :, 6] * 500)
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 1500)
#i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)
found_parameters.append([genotype, repeat, 49] + list(X[-1, i6, :]))
df = pd.DataFrame(found_parameters,
columns=["genotype",
"repeat",
"gen",
"tau",
"sigma",
"T",
"p_below",
"p_above"]).astype(dtype={"repeat": "int64", "gen": "int64"}, copy=False)
df.set_index(["genotype", 'repeat', 'gen'], inplace=True)
df.sort_index(inplace=True)
df.to_hdf(root_path / "found_parameters.h5", key="parameters", complevel=9)
| 47.092975
| 185
| 0.660641
|
import pylab as pl
from get_fish_info import get_fish_info
from fit_integrator_model import get_model_result, get_target_result
import numpy as np
from pathlib import Path
import gmm_model_fit
import pandas as pd
from pymoo.factory import get_problem, get_visualization, get_decomposition
root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/surrogate_fish1")
df = pd.read_hdf(root_path / "all_data.h5", key="all_bouts")
genotype = "hom"
target_df_correctness_as_function_of_coherence, \
target_df_inter_bout_interval_as_function_of_coherence, \
target_df_binned_correctness, \
target_df_binned_same_direction, \
target_df_binned_features_heading_angle_change_histograms, \
target_df_binned_features_inter_bout_interval_histograms, \
target_df_gmm_fitting_results = get_target_result(root_path, genotype)
medianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')
errornames = ["Error: 'Correctness as function of coherence'",
"Error: 'Inter-bout interval as function of coherence'",
"Error: 'Binned correctness at 25, 50, 100 %'",
"Error: 'Binned same direction'",
"Error: 'Histogram weights'"]
repeat = 1
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
from pymoo.factory import get_decision_making, get_reference_directions
for i in range(5):
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
fig = pl.figure()
model_df_correctness_as_function_of_coherence, \
model_df_inter_bout_interval_as_function_of_coherence, \
model_df_binned_correctness, \
model_df_binned_same_direction, \
model_df_binned_features_heading_angle_change_histograms, \
model_df_binned_features_inter_bout_interval_histograms, \
model_df_gmm_fitting_results = get_model_result(X[-1, i6])
fig.suptitle("Compromise between all five error functions")
pl.subplot(231)
pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Correctness (%)")
pl.subplot(232)
pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Inter-bout interval (s)")
pl.subplot(233)
for i in range(4):
pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C6')
pl.xlabel("Time (s)")
pl.ylabel("Correctness (%)")
pl.subplot(234)
pl.plot(target_df_binned_same_direction, 'o-', color='black')
pl.plot(model_df_binned_same_direction, 'o--', color='C6')
pl.xlabel("Time since last bout (s)")
pl.ylabel("Correctness (%)")
pl.subplot(235)
for i in range(4):
pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"black")
pl.plot(model_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"C6", linestyle='--')
pl.xlabel("Heading angle change")
pl.ylabel("Probability")
pl.show()
found_parameters = []
for repeat in range(12):
for genotype in ["wt", "het", "hom"]:
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
for i in range(5):
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
i6 = np.argmin(F[-1, :, 0] + 3 * F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])
found_parameters.append([genotype, repeat, 49] + list(X[-1, i6, :]))
df = pd.DataFrame(found_parameters,
columns=["genotype",
"repeat",
"gen",
"tau",
"sigma",
"T",
"p_below",
"p_above"]).astype(dtype={"repeat": "int64", "gen": "int64"}, copy=False)
df.set_index(["genotype", 'repeat', 'gen'], inplace=True)
df.sort_index(inplace=True)
df.to_hdf(root_path / "found_parameters.h5", key="parameters", complevel=9)
| true
| true
|
7907de8a2a7c5b8051e6416660fe0ab2b6c12acc
| 12,375
|
py
|
Python
|
src/analysis/TrainMood.py
|
pjshu/QQZoneMood
|
b637e4f26fa34aed415c326a50c708a91d79ec19
|
[
"MIT"
] | 487
|
2018-12-12T10:53:34.000Z
|
2022-03-27T08:38:42.000Z
|
src/analysis/TrainMood.py
|
DanielisLearning/QQZoneMood
|
bc949855271a4d9944e1501599755cfdfdb8cfd6
|
[
"MIT"
] | 18
|
2019-04-07T11:32:13.000Z
|
2021-04-26T13:07:12.000Z
|
src/analysis/TrainMood.py
|
DanielisLearning/QQZoneMood
|
bc949855271a4d9944e1501599755cfdfdb8cfd6
|
[
"MIT"
] | 126
|
2018-12-12T10:54:24.000Z
|
2022-03-13T16:32:36.000Z
|
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
"""
生成各种训练需要的数据集
"""
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
def calculate_score_for_each_mood(self):
"""
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
"""
# nima模型预测结果文件
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def calculate_send_time(self):
"""
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
"""
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
# 四个小时的时间差
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
# train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
# 使用apply会导致超过qps限额
# sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))
# self.mood_data_df['sentiment'] = sentiments
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
# data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
"""
导出待分类待的数据
:return:
"""
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
# train.calculate_score_for_each_mood()
# train.calculate_send_time()
# train.calculate_sentiment()
# train.export_df_after_clean()
train.export_train_text()
# train.export_classification_data()
# train.attach_image_object_for_each_mood()
# train.combine_text_type_data()
# train.combine_image_object()
# train.export_final_train_data()
| 43.269231
| 141
| 0.627232
|
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
def calculate_score_for_each_mood(self):
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def calculate_send_time(self):
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
train.export_train_text()
| true
| true
|
7907deac94beb95385049e8d105c2829b468beae
| 1,944
|
py
|
Python
|
AnomalyDetection/DB.py
|
Py-Contributors/Hands-on-Machine-learning-with-Scikit-learn-Tensorflow-and-Keras
|
cbb392b85e82d135adcd9591c43bfb4adaa73972
|
[
"MIT"
] | 4
|
2020-09-29T11:04:08.000Z
|
2020-10-31T19:35:24.000Z
|
AnomalyDetection/DB.py
|
codePerfectPlus/Hands-on-Machine-learning-with-Scikit-learn-Tensorflow-and-Keras
|
cbb392b85e82d135adcd9591c43bfb4adaa73972
|
[
"MIT"
] | 4
|
2020-10-11T03:50:01.000Z
|
2020-11-04T08:24:23.000Z
|
AnomalyDetection/DB.py
|
Py-Contributors/Hands-on-Machine-learning-with-Scikit-learn-Tensorflow-and-Keras
|
cbb392b85e82d135adcd9591c43bfb4adaa73972
|
[
"MIT"
] | 3
|
2020-09-27T07:43:12.000Z
|
2020-11-02T08:11:40.000Z
|
import numpy as np
import pandas as pd
%matplotlib auto
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
from sklearn.covariance import EllipticEnvelope
from sklearn.cluster import KMeans
from sklearn.
plt.style.use("fivethirtyeight")
fig=plt.figure(figsize=(12,15))
data=pd.read_csv("Social_Network_Ads.csv")
data=data.iloc[:,2:]
treeclass=RandomForestClassifier(n_estimators=100,max_depth=10)
X,y=data.iloc[:,:-1].values,data.iloc[:,-1].values
def plotting_decision_(X,Y,CL):
X=StandardScaler().fit_transform(X)
X_train,x_test,Y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
xx_min,xx_max=X[:,0].min()-0.5,X[:,0].max()+0.6
xx,yy=np.meshgrid(np.arange(xx_min,xx_max,0.2),np.arange(xx_min,xx_max,0.2))
cmap_bright=ListedColormap(["red","azure"])
cl=CL()
cl.fit(X_train,Y_train)
score=cl.predict(x_test)
Z=cl.decision_function(np.c_[xx.ravel(),yy.ravel()])
Z=Z.reshape(xx.shape)
plt.contour(xx,yy,Z,cmap=plt.cm.jet)
plt.scatter(X_train[:,0],X_train[:,1],c=Y_train,cmap=cmap_bright)
plt.text(xx.max()-.3,xx.min()+.3,(np.mean(score)),size=15,horizontalalignment="right")
#sns.relplot(x="Age",y="EstimatedSalary",data=data,hue="Purchased")
#sns.boxplot(x=data["Purchased"],y=data["EstimatedSalary"],whis=2,saturation=0.6)
#from sklearn.ensemble import IsolationForest
#IF=IsolationForest(n_estimators=100,bootstrap=False)
#IF.fit(X[:,0].reshape(-1,1))
#xx=np.linspace(X[:,0].min()-5,X[:,0].max()+5,len(data)).reshape(-1,1)
#outlier=IF.predict(xx)
#anomaly_score=IF.decision_function(xx)
#plt.plot(xx,anomaly_score,label="automated")
| 30.375
| 90
| 0.739712
|
import numpy as np
import pandas as pd
%matplotlib auto
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
from sklearn.covariance import EllipticEnvelope
from sklearn.cluster import KMeans
from sklearn.
plt.style.use("fivethirtyeight")
fig=plt.figure(figsize=(12,15))
data=pd.read_csv("Social_Network_Ads.csv")
data=data.iloc[:,2:]
treeclass=RandomForestClassifier(n_estimators=100,max_depth=10)
X,y=data.iloc[:,:-1].values,data.iloc[:,-1].values
def plotting_decision_(X,Y,CL):
X=StandardScaler().fit_transform(X)
X_train,x_test,Y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
xx_min,xx_max=X[:,0].min()-0.5,X[:,0].max()+0.6
xx,yy=np.meshgrid(np.arange(xx_min,xx_max,0.2),np.arange(xx_min,xx_max,0.2))
cmap_bright=ListedColormap(["red","azure"])
cl=CL()
cl.fit(X_train,Y_train)
score=cl.predict(x_test)
Z=cl.decision_function(np.c_[xx.ravel(),yy.ravel()])
Z=Z.reshape(xx.shape)
plt.contour(xx,yy,Z,cmap=plt.cm.jet)
plt.scatter(X_train[:,0],X_train[:,1],c=Y_train,cmap=cmap_bright)
plt.text(xx.max()-.3,xx.min()+.3,(np.mean(score)),size=15,horizontalalignment="right")
| false
| true
|
7907deeedee248487711662c9648165afa8d28f6
| 4,606
|
py
|
Python
|
tech_project/lib/python2.7/site-packages/cms/test_utils/util/context_managers.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | 4
|
2019-05-09T02:09:54.000Z
|
2021-11-09T11:27:19.000Z
|
cms/test_utils/util/context_managers.py
|
thisisalamin/django-cms
|
eeb1e4712b3866e243daf800c142e2199e4be9df
|
[
"BSD-3-Clause"
] | 5
|
2018-08-29T04:17:41.000Z
|
2018-09-04T05:15:38.000Z
|
cms/test_utils/util/context_managers.py
|
thisisalamin/django-cms
|
eeb1e4712b3866e243daf800c142e2199e4be9df
|
[
"BSD-3-Clause"
] | 4
|
2019-01-26T09:58:37.000Z
|
2019-06-24T08:12:43.000Z
|
# -*- coding: utf-8 -*-
import sys
from contextlib import contextmanager
from shutil import rmtree as _rmtree
from tempfile import template, mkdtemp, _exists
from cms.apphook_pool import apphook_pool
from django.contrib.auth import get_user_model
from django.utils.six.moves import StringIO
from django.utils.translation import get_language, activate
class NULL:
pass
class StdOverride(object):
def __init__(self, std='out', buffer=None):
self.std = std
self.buffer = buffer or StringIO()
def __enter__(self):
setattr(sys, 'std%s' % self.std, self.buffer)
return self.buffer
def __exit__(self, type, value, traceback):
setattr(sys, 'std%s' % self.std, getattr(sys, '__std%s__' % self.std))
class StdoutOverride(StdOverride):
"""
This overrides Python's the standard output and redirects it to a StringIO
object, so that on can test the output of the program.
example:
lines = None
with StdoutOverride() as buffer:
# print stuff
lines = buffer.getvalue()
"""
def __init__(self, buffer=None):
super(StdoutOverride, self).__init__('out', buffer)
class LanguageOverride(object):
def __init__(self, language):
self.newlang = language
def __enter__(self):
self.oldlang = get_language()
activate(self.newlang)
def __exit__(self, type, value, traceback):
activate(self.oldlang)
class TemporaryDirectory:
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def cleanup(self):
if _exists(self.name):
_rmtree(self.name)
def __exit__(self, exc, value, tb):
self.cleanup()
class UserLoginContext(object):
def __init__(self, testcase, user):
self.testcase = testcase
self.user = user
def __enter__(self):
loginok = self.testcase.client.login(username=getattr(self.user, get_user_model().USERNAME_FIELD),
password=getattr(self.user, get_user_model().USERNAME_FIELD))
self.old_user = getattr(self.testcase, 'user', None)
self.testcase.user = self.user
self.testcase.assertTrue(loginok)
def __exit__(self, exc, value, tb):
self.testcase.user = self.old_user
if not self.testcase.user:
delattr(self.testcase, 'user')
self.testcase.client.logout()
class ChangeModel(object):
"""
Changes attributes on a model while within the context.
These changes *ARE* saved to the database for the context!
"""
def __init__(self, instance, **overrides):
self.instance = instance
self.overrides = overrides
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(self.instance, key, NULL)
setattr(self.instance, key, value)
self.instance.save()
def __exit__(self, exc, value, tb):
for key in self.overrides.keys():
old_value = self.old[key]
if old_value is NULL:
delattr(self.instance, key)
else:
setattr(self.instance, key, old_value)
self.instance.save()
@contextmanager
def disable_logger(logger):
old = logger.disabled
logger.disabled = True
yield
logger.disabled = old
@contextmanager
def apphooks(*hooks):
_apphooks = apphook_pool.apphooks
_apps = apphook_pool.apps
_discovered = apphook_pool.discovered
apphook_pool.clear()
for hook in hooks:
apphook_pool.register(hook)
try:
yield
finally:
apphook_pool.apphooks = _apphooks
apphook_pool.apps = _apps
apphook_pool.discovered = _discovered
@contextmanager
def signal_tester(*signals):
env = SignalTester()
for signal in signals:
signal.connect(env)
try:
yield env
finally:
for signal in signals:
signal.disconnect(env)
class SignalTester(object):
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.calls.append((args, kwargs))
| 26.32
| 106
| 0.640035
|
import sys
from contextlib import contextmanager
from shutil import rmtree as _rmtree
from tempfile import template, mkdtemp, _exists
from cms.apphook_pool import apphook_pool
from django.contrib.auth import get_user_model
from django.utils.six.moves import StringIO
from django.utils.translation import get_language, activate
class NULL:
pass
class StdOverride(object):
def __init__(self, std='out', buffer=None):
self.std = std
self.buffer = buffer or StringIO()
def __enter__(self):
setattr(sys, 'std%s' % self.std, self.buffer)
return self.buffer
def __exit__(self, type, value, traceback):
setattr(sys, 'std%s' % self.std, getattr(sys, '__std%s__' % self.std))
class StdoutOverride(StdOverride):
def __init__(self, buffer=None):
super(StdoutOverride, self).__init__('out', buffer)
class LanguageOverride(object):
def __init__(self, language):
self.newlang = language
def __enter__(self):
self.oldlang = get_language()
activate(self.newlang)
def __exit__(self, type, value, traceback):
activate(self.oldlang)
class TemporaryDirectory:
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def cleanup(self):
if _exists(self.name):
_rmtree(self.name)
def __exit__(self, exc, value, tb):
self.cleanup()
class UserLoginContext(object):
def __init__(self, testcase, user):
self.testcase = testcase
self.user = user
def __enter__(self):
loginok = self.testcase.client.login(username=getattr(self.user, get_user_model().USERNAME_FIELD),
password=getattr(self.user, get_user_model().USERNAME_FIELD))
self.old_user = getattr(self.testcase, 'user', None)
self.testcase.user = self.user
self.testcase.assertTrue(loginok)
def __exit__(self, exc, value, tb):
self.testcase.user = self.old_user
if not self.testcase.user:
delattr(self.testcase, 'user')
self.testcase.client.logout()
class ChangeModel(object):
def __init__(self, instance, **overrides):
self.instance = instance
self.overrides = overrides
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(self.instance, key, NULL)
setattr(self.instance, key, value)
self.instance.save()
def __exit__(self, exc, value, tb):
for key in self.overrides.keys():
old_value = self.old[key]
if old_value is NULL:
delattr(self.instance, key)
else:
setattr(self.instance, key, old_value)
self.instance.save()
@contextmanager
def disable_logger(logger):
old = logger.disabled
logger.disabled = True
yield
logger.disabled = old
@contextmanager
def apphooks(*hooks):
_apphooks = apphook_pool.apphooks
_apps = apphook_pool.apps
_discovered = apphook_pool.discovered
apphook_pool.clear()
for hook in hooks:
apphook_pool.register(hook)
try:
yield
finally:
apphook_pool.apphooks = _apphooks
apphook_pool.apps = _apps
apphook_pool.discovered = _discovered
@contextmanager
def signal_tester(*signals):
env = SignalTester()
for signal in signals:
signal.connect(env)
try:
yield env
finally:
for signal in signals:
signal.disconnect(env)
class SignalTester(object):
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.calls.append((args, kwargs))
| true
| true
|
7907dfe24f2f594e8548feacc56082d64042413b
| 297
|
py
|
Python
|
signing/processorimpl/sayhiimplementation.py
|
nkrowlan/signing-server
|
53f9b8ffef493526c467d59b93fc71a6644a7b6a
|
[
"Apache-2.0"
] | null | null | null |
signing/processorimpl/sayhiimplementation.py
|
nkrowlan/signing-server
|
53f9b8ffef493526c467d59b93fc71a6644a7b6a
|
[
"Apache-2.0"
] | null | null | null |
signing/processorimpl/sayhiimplementation.py
|
nkrowlan/signing-server
|
53f9b8ffef493526c467d59b93fc71a6644a7b6a
|
[
"Apache-2.0"
] | null | null | null |
from twisted.internet import defer
from signing.processor import expose
class SayHiImplementation(object):
"""
Responds with 'hello, %s' % arg
"""
@expose
def say_hi(self, identifier):
d = defer.Deferred()
d.callback('hello, %s' % identifier)
return d
| 22.846154
| 44
| 0.632997
|
from twisted.internet import defer
from signing.processor import expose
class SayHiImplementation(object):
@expose
def say_hi(self, identifier):
d = defer.Deferred()
d.callback('hello, %s' % identifier)
return d
| true
| true
|
7907e202d68831469e2b292777fc5eca272ffa62
| 1,093
|
py
|
Python
|
yolo/config.py
|
banayoyo/yolo
|
c12a2f2097d0b892f1268bc51b44d3905c3ab75a
|
[
"MIT"
] | null | null | null |
yolo/config.py
|
banayoyo/yolo
|
c12a2f2097d0b892f1268bc51b44d3905c3ab75a
|
[
"MIT"
] | null | null | null |
yolo/config.py
|
banayoyo/yolo
|
c12a2f2097d0b892f1268bc51b44d3905c3ab75a
|
[
"MIT"
] | null | null | null |
import os
#
# path and dataset parameter
#
#该cfg文件,是通过import的方式进行配置的。并不是main的arg配置
DATA_PATH = 'data'
PASCAL_PATH = os.path.join(DATA_PATH, 'pascal_voc')
CACHE_PATH = os.path.join(PASCAL_PATH, 'cache')
OUTPUT_DIR = os.path.join(PASCAL_PATH, 'output')
WEIGHTS_DIR = os.path.join(PASCAL_PATH, 'weights')
WEIGHTS_FILE = None
# WEIGHTS_FILE = os.path.join(DATA_PATH, 'weights', 'YOLO_small.ckpt')
CLASSES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor']
FLIPPED = True
#
# model parameter
#
IMAGE_SIZE = 448
CELL_SIZE = 7
BOXES_PER_CELL = 2
ALPHA = 0.1
DISP_CONSOLE = False
OBJECT_SCALE = 1.0
NOOBJECT_SCALE = 1.0
CLASS_SCALE = 2.0
COORD_SCALE = 5.0
#
# solver parameter
#
GPU = ''
LEARNING_RATE = 0.0001
DECAY_STEPS = 30000
DECAY_RATE = 0.1
STAIRCASE = True
BATCH_SIZE = 45
MAX_ITER = 15000
SUMMARY_ITER = 10
SAVE_ITER = 1000
#
# test parameter
#
THRESHOLD = 0.2
IOU_THRESHOLD = 0.5
| 14.012821
| 71
| 0.670631
|
import os
DATA_PATH = 'data'
PASCAL_PATH = os.path.join(DATA_PATH, 'pascal_voc')
CACHE_PATH = os.path.join(PASCAL_PATH, 'cache')
OUTPUT_DIR = os.path.join(PASCAL_PATH, 'output')
WEIGHTS_DIR = os.path.join(PASCAL_PATH, 'weights')
WEIGHTS_FILE = None
CLASSES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor']
FLIPPED = True
IMAGE_SIZE = 448
CELL_SIZE = 7
BOXES_PER_CELL = 2
ALPHA = 0.1
DISP_CONSOLE = False
OBJECT_SCALE = 1.0
NOOBJECT_SCALE = 1.0
CLASS_SCALE = 2.0
COORD_SCALE = 5.0
GPU = ''
LEARNING_RATE = 0.0001
DECAY_STEPS = 30000
DECAY_RATE = 0.1
STAIRCASE = True
BATCH_SIZE = 45
MAX_ITER = 15000
SUMMARY_ITER = 10
SAVE_ITER = 1000
THRESHOLD = 0.2
IOU_THRESHOLD = 0.5
| true
| true
|
7907e21d7d422ea320284f04fa9d9aa2b281c061
| 1,896
|
py
|
Python
|
leetcode-CP/Problem solving/496. Next Greater Element I.py
|
vijay2020pc/100-days-of-code
|
b59e54471015b294bad408289e6d9101d7494b01
|
[
"MIT"
] | null | null | null |
leetcode-CP/Problem solving/496. Next Greater Element I.py
|
vijay2020pc/100-days-of-code
|
b59e54471015b294bad408289e6d9101d7494b01
|
[
"MIT"
] | null | null | null |
leetcode-CP/Problem solving/496. Next Greater Element I.py
|
vijay2020pc/100-days-of-code
|
b59e54471015b294bad408289e6d9101d7494b01
|
[
"MIT"
] | null | null | null |
The next greater element of some element x in an array is the first greater element that is to the right of x in the same array.
You are given two distinct 0-indexed integer arrays nums1 and nums2, where nums1 is a subset of nums2.
For each 0 <= i < nums1.length, find the index j such that nums1[i] == nums2[j] and determine the next greater element of nums2[j] in nums2. If there is no next greater element, then the answer for this query is -1.
Return an array ans of length nums1.length such that ans[i] is the next greater element as described above.
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2]
Output: [-1,3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 4 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
- 1 is underlined in nums2 = [1,3,4,2]. The next greater element is 3.
- 2 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4]
Output: [3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 2 is underlined in nums2 = [1,2,3,4]. The next greater element is 3.
- 4 is underlined in nums2 = [1,2,3,4]. There is no next greater element, so the answer is -1.
Constraints:
1 <= nums1.length <= nums2.length <= 1000
0 <= nums1[i], nums2[i] <= 104
All integers in nums1 and nums2 are unique.
All the integers of nums1 also appear in nums2.
Solution:
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
ans = defaultdict(lambda: -1)
stack = []
for i in range(len(nums2)):
while stack and stack[-1] < nums2[i]:
ans[stack.pop()] = nums2[i]
stack.append(nums2[i])
for i in range(len(nums1)):
nums1[i] = ans[nums1[i]]
return nums1
| 39.5
| 215
| 0.670359
|
The next greater element of some element x in an array is the first greater element that is to the right of x in the same array.
You are given two distinct 0-indexed integer arrays nums1 and nums2, where nums1 is a subset of nums2.
For each 0 <= i < nums1.length, find the index j such that nums1[i] == nums2[j] and determine the next greater element of nums2[j] in nums2. If there is no next greater element, then the answer for this query is -1.
Return an array ans of length nums1.length such that ans[i] is the next greater element as described above.
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2]
Output: [-1,3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 4 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
- 1 is underlined in nums2 = [1,3,4,2]. The next greater element is 3.
- 2 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4]
Output: [3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 2 is underlined in nums2 = [1,2,3,4]. The next greater element is 3.
- 4 is underlined in nums2 = [1,2,3,4]. There is no next greater element, so the answer is -1.
Constraints:
1 <= nums1.length <= nums2.length <= 1000
0 <= nums1[i], nums2[i] <= 104
All integers in nums1 and nums2 are unique.
All the integers of nums1 also appear in nums2.
Solution:
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
ans = defaultdict(lambda: -1)
stack = []
for i in range(len(nums2)):
while stack and stack[-1] < nums2[i]:
ans[stack.pop()] = nums2[i]
stack.append(nums2[i])
for i in range(len(nums1)):
nums1[i] = ans[nums1[i]]
return nums1
| false
| true
|
7907e2624a3fa7f31efa4869977ca38688805362
| 3,045
|
py
|
Python
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
githubmilesma/huaweicloud-sdk-python-v3
|
9d9449ed68a609ca65f0aa50b5b2a1c28445bf03
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateIndirectPartnerAccountResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'transfer_id': 'str'
}
attribute_map = {
'transfer_id': 'transfer_id'
}
def __init__(self, transfer_id=None):
"""UpdateIndirectPartnerAccountResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._transfer_id = None
self.discriminator = None
if transfer_id is not None:
self.transfer_id = transfer_id
@property
def transfer_id(self):
"""Gets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:return: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:rtype: str
"""
return self._transfer_id
@transfer_id.setter
def transfer_id(self, transfer_id):
"""Sets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:param transfer_id: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:type: str
"""
self._transfer_id = transfer_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateIndirectPartnerAccountResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.432432
| 89
| 0.572742
|
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateIndirectPartnerAccountResponse(SdkResponse):
sensitive_list = []
openapi_types = {
'transfer_id': 'str'
}
attribute_map = {
'transfer_id': 'transfer_id'
}
def __init__(self, transfer_id=None):
super().__init__()
self._transfer_id = None
self.discriminator = None
if transfer_id is not None:
self.transfer_id = transfer_id
@property
def transfer_id(self):
return self._transfer_id
@transfer_id.setter
def transfer_id(self, transfer_id):
self._transfer_id = transfer_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, UpdateIndirectPartnerAccountResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
7907e2ef9017c5ca381a123f82ee9a08309d578c
| 3,199
|
py
|
Python
|
server/app/scrapers/maarten.py
|
damienallen/makelaardij-notify
|
ea8e37e1b0f867487b90590c5273e7fb25d868cf
|
[
"MIT"
] | null | null | null |
server/app/scrapers/maarten.py
|
damienallen/makelaardij-notify
|
ea8e37e1b0f867487b90590c5273e7fb25d868cf
|
[
"MIT"
] | 15
|
2021-02-13T23:46:28.000Z
|
2021-02-25T15:36:08.000Z
|
server/app/scrapers/maarten.py
|
damienallen/makelaardij-notify
|
ea8e37e1b0f867487b90590c5273e7fb25d868cf
|
[
"MIT"
] | null | null | null |
import asyncio
from typing import List
from app.common import SkipListing
from app.scrapers.base import BaseScraper
class MaartenScraper(BaseScraper):
MAKELAARDIJ: str = "maarten"
BASE_URL: str = "https://www.maartenmakelaardij.nl"
# Specific functions
async def extract_object_urls(self, soup) -> List[str]:
"""
Extract apartment object urls
"""
items = soup.find_all("a")
urls: List[str] = []
for item in items:
if "woning/rotterdam-" in item["href"]:
urls.append(item["href"])
return list(set(urls))
async def get_page_url(self, page_num: int) -> str:
"""
Format page url
"""
return f"{self.BASE_URL}/aanbod/rotterdam/"
async def get_apartment_urls(self) -> List[str]:
"""
Fetch list of apartment urls from inventory
"""
urls = await self.scrape_page(0)
return urls
def extract_features(self, soup):
"""
Extract feature metadata from listing
"""
meta_data = {
"makelaardij": self.MAKELAARDIJ,
"building": {},
"unit": {"energy": {}, "tags": []},
}
dt = soup.find_all("dt")
dd = soup.find_all("dd")
# Features
for ind, key in enumerate(dt):
if "Bouwjaar" in key.string:
meta_data["building"]["year_constructed"] = self.find_int(
dd[ind].string
)
elif "Woonoppervlakte" in key.string:
meta_data["unit"]["area"] = self.find_float(dd[ind].text.split(" ")[0])
elif "Aantal kamers" in key.string:
meta_data["unit"]["num_rooms"] = self.find_int(dd[ind].text)
elif "verdiepingen" in key.string:
meta_data["unit"]["num_floors"] = self.find_int(dd[ind].text)
elif "Status" in key.string:
meta_data["available"] = "Beschikbaar" in dd[ind].text
elif "Buitenruimte" in key.string and "TUIN" in dd[ind].text:
meta_data["unit"]["tags"].append("garden")
# Other fields
meta_data["address"] = soup.find("span", {"class": "adres"}).string
meta_data["asking_price"] = self.find_int(
soup.find("span", {"class": "price"}).string.replace(".", "")
)
description = soup.find("div", {"id": "read-more-content"}).children
for p in description:
p_text = str(p.text)
if "Eigen grond" in p_text:
meta_data["unit"]["own_land"] = True
elif "erfpacht" in p_text:
meta_data["unit"]["own_land"] = False
if "Energielabel" in p_text:
label = p_text.split("Energielabel: ")[1][0]
meta_data["unit"]["energy"]["label"] = label
break
# Bounce broken listings
if not meta_data["unit"].get("area"):
raise SkipListing("Unable to find area")
return meta_data
if __name__ == "__main__":
scraper = MaartenScraper()
loop = asyncio.get_event_loop()
loop.run_until_complete(scraper.start())
| 29.897196
| 87
| 0.546733
|
import asyncio
from typing import List
from app.common import SkipListing
from app.scrapers.base import BaseScraper
class MaartenScraper(BaseScraper):
MAKELAARDIJ: str = "maarten"
BASE_URL: str = "https://www.maartenmakelaardij.nl"
async def extract_object_urls(self, soup) -> List[str]:
items = soup.find_all("a")
urls: List[str] = []
for item in items:
if "woning/rotterdam-" in item["href"]:
urls.append(item["href"])
return list(set(urls))
async def get_page_url(self, page_num: int) -> str:
return f"{self.BASE_URL}/aanbod/rotterdam/"
async def get_apartment_urls(self) -> List[str]:
urls = await self.scrape_page(0)
return urls
def extract_features(self, soup):
meta_data = {
"makelaardij": self.MAKELAARDIJ,
"building": {},
"unit": {"energy": {}, "tags": []},
}
dt = soup.find_all("dt")
dd = soup.find_all("dd")
for ind, key in enumerate(dt):
if "Bouwjaar" in key.string:
meta_data["building"]["year_constructed"] = self.find_int(
dd[ind].string
)
elif "Woonoppervlakte" in key.string:
meta_data["unit"]["area"] = self.find_float(dd[ind].text.split(" ")[0])
elif "Aantal kamers" in key.string:
meta_data["unit"]["num_rooms"] = self.find_int(dd[ind].text)
elif "verdiepingen" in key.string:
meta_data["unit"]["num_floors"] = self.find_int(dd[ind].text)
elif "Status" in key.string:
meta_data["available"] = "Beschikbaar" in dd[ind].text
elif "Buitenruimte" in key.string and "TUIN" in dd[ind].text:
meta_data["unit"]["tags"].append("garden")
meta_data["address"] = soup.find("span", {"class": "adres"}).string
meta_data["asking_price"] = self.find_int(
soup.find("span", {"class": "price"}).string.replace(".", "")
)
description = soup.find("div", {"id": "read-more-content"}).children
for p in description:
p_text = str(p.text)
if "Eigen grond" in p_text:
meta_data["unit"]["own_land"] = True
elif "erfpacht" in p_text:
meta_data["unit"]["own_land"] = False
if "Energielabel" in p_text:
label = p_text.split("Energielabel: ")[1][0]
meta_data["unit"]["energy"]["label"] = label
break
if not meta_data["unit"].get("area"):
raise SkipListing("Unable to find area")
return meta_data
if __name__ == "__main__":
scraper = MaartenScraper()
loop = asyncio.get_event_loop()
loop.run_until_complete(scraper.start())
| true
| true
|
7907e346838018cc0f6c31d867e0acc883026424
| 131
|
py
|
Python
|
app/forms.py
|
Ronlin1/To-Do-App
|
e641c0bd125643bfe050df6268a1f0224cdbbe5b
|
[
"MIT"
] | null | null | null |
app/forms.py
|
Ronlin1/To-Do-App
|
e641c0bd125643bfe050df6268a1f0224cdbbe5b
|
[
"MIT"
] | null | null | null |
app/forms.py
|
Ronlin1/To-Do-App
|
e641c0bd125643bfe050df6268a1f0224cdbbe5b
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Todo
class TodoForm(forms.ModelForm):
class Meta:
model = Todo
fields="__all__"
| 16.375
| 32
| 0.755725
|
from django import forms
from .models import Todo
class TodoForm(forms.ModelForm):
class Meta:
model = Todo
fields="__all__"
| true
| true
|
7907e3bfe411dcc29a4789cb39d72e3f363b0c16
| 2,230
|
py
|
Python
|
tests/diff.py
|
kdeyev/mongomock
|
c321eea5e00086dd6db1552477a3a474a9f4438e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/diff.py
|
kdeyev/mongomock
|
c321eea5e00086dd6db1552477a3a474a9f4438e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/diff.py
|
kdeyev/mongomock
|
c321eea5e00086dd6db1552477a3a474a9f4438e
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from platform import python_version
from six import integer_types, string_types, text_type
class _NO_VALUE(object):
pass
# we don't use NOTHING because it might be returned from various APIs
NO_VALUE = _NO_VALUE()
_SUPPORTED_TYPES = (float, bool, str, datetime.datetime, type(None)) + \
string_types + integer_types + (text_type, bytes) + (type,)
if python_version() < '3.0':
dict_type = dict
else:
from collections import abc
dict_type = abc.Mapping
def diff(a, b, path=None):
path = _make_path(path)
if isinstance(a, (list, tuple)):
return _diff_sequences(a, b, path)
if type(a).__name__ == 'SON':
a = dict(a)
if type(b).__name__ == 'SON':
b = dict(b)
if isinstance(a, dict_type):
return _diff_dicts(a, b, path)
if type(a).__name__ == 'ObjectId':
a = str(a)
if type(b).__name__ == 'ObjectId':
b = str(b)
if type(a).__name__ == 'Int64':
a = int(a)
if type(b).__name__ == 'Int64':
b = int(b)
if not isinstance(a, _SUPPORTED_TYPES):
raise NotImplementedError(
'Unsupported diff type: {0}'.format(type(a))) # pragma: no cover
if not isinstance(b, _SUPPORTED_TYPES):
raise NotImplementedError(
'Unsupported diff type: {0}'.format(type(b))) # pragma: no cover
if a != b:
return [(path[:], a, b)]
return []
def _diff_dicts(a, b, path):
if not isinstance(a, type(b)):
return [(path[:], type(a), type(b))]
returned = []
for key in set(a) | set(b):
a_value = a.get(key, NO_VALUE)
b_value = b.get(key, NO_VALUE)
path.append(key)
if a_value is NO_VALUE or b_value is NO_VALUE:
returned.append((path[:], a_value, b_value))
else:
returned.extend(diff(a_value, b_value, path))
path.pop()
return returned
def _diff_sequences(a, b, path):
if len(a) != len(b):
return [(path[:], a, b)]
returned = []
for i, a_i in enumerate(a):
path.append(i)
returned.extend(diff(a_i, b[i], path))
path.pop()
return returned
def _make_path(path):
if path is None:
return []
return path
| 26.86747
| 77
| 0.590583
|
import datetime
from platform import python_version
from six import integer_types, string_types, text_type
class _NO_VALUE(object):
pass
NO_VALUE = _NO_VALUE()
_SUPPORTED_TYPES = (float, bool, str, datetime.datetime, type(None)) + \
string_types + integer_types + (text_type, bytes) + (type,)
if python_version() < '3.0':
dict_type = dict
else:
from collections import abc
dict_type = abc.Mapping
def diff(a, b, path=None):
path = _make_path(path)
if isinstance(a, (list, tuple)):
return _diff_sequences(a, b, path)
if type(a).__name__ == 'SON':
a = dict(a)
if type(b).__name__ == 'SON':
b = dict(b)
if isinstance(a, dict_type):
return _diff_dicts(a, b, path)
if type(a).__name__ == 'ObjectId':
a = str(a)
if type(b).__name__ == 'ObjectId':
b = str(b)
if type(a).__name__ == 'Int64':
a = int(a)
if type(b).__name__ == 'Int64':
b = int(b)
if not isinstance(a, _SUPPORTED_TYPES):
raise NotImplementedError(
'Unsupported diff type: {0}'.format(type(a))) # pragma: no cover
if not isinstance(b, _SUPPORTED_TYPES):
raise NotImplementedError(
'Unsupported diff type: {0}'.format(type(b))) # pragma: no cover
if a != b:
return [(path[:], a, b)]
return []
def _diff_dicts(a, b, path):
if not isinstance(a, type(b)):
return [(path[:], type(a), type(b))]
returned = []
for key in set(a) | set(b):
a_value = a.get(key, NO_VALUE)
b_value = b.get(key, NO_VALUE)
path.append(key)
if a_value is NO_VALUE or b_value is NO_VALUE:
returned.append((path[:], a_value, b_value))
else:
returned.extend(diff(a_value, b_value, path))
path.pop()
return returned
def _diff_sequences(a, b, path):
if len(a) != len(b):
return [(path[:], a, b)]
returned = []
for i, a_i in enumerate(a):
path.append(i)
returned.extend(diff(a_i, b[i], path))
path.pop()
return returned
def _make_path(path):
if path is None:
return []
return path
| true
| true
|
7907e427b919f20227e8b4dd1ddb1ff67822781a
| 262
|
py
|
Python
|
2018_3_Cooper_Type/RoboFont/simple_interpolation.py
|
benkiel/python_workshops
|
9483c1fd5f7dd87e595289efb7376e1b81ff5ede
|
[
"MIT"
] | 6
|
2018-03-24T17:31:51.000Z
|
2021-11-18T06:02:09.000Z
|
2018_3_Cooper_Type/RoboFont/simple_interpolation.py
|
benkiel/python_workshops
|
9483c1fd5f7dd87e595289efb7376e1b81ff5ede
|
[
"MIT"
] | null | null | null |
2018_3_Cooper_Type/RoboFont/simple_interpolation.py
|
benkiel/python_workshops
|
9483c1fd5f7dd87e595289efb7376e1b81ff5ede
|
[
"MIT"
] | null | null | null |
font = CurrentFont()
one = font['A']
two = font['A.2']
steps = 4
if one.isCompatible(two):
for x in range(steps):
n = "A.interp" + str(x+1)
g = font.newGlyph(n)
f = (x+1)/(steps+1)
print f
g.interpolate(f, one, two)
| 18.714286
| 34
| 0.515267
|
font = CurrentFont()
one = font['A']
two = font['A.2']
steps = 4
if one.isCompatible(two):
for x in range(steps):
n = "A.interp" + str(x+1)
g = font.newGlyph(n)
f = (x+1)/(steps+1)
print f
g.interpolate(f, one, two)
| false
| true
|
7907e4b3624c79f06dda602a207ad90a216917bc
| 11,498
|
py
|
Python
|
stor/plotters/plotters.py
|
Stor-Network/stor-blockchain
|
3c3cd1a3b99592e88160107ca5b81afc0937b992
|
[
"Apache-2.0"
] | 19
|
2021-06-29T20:06:09.000Z
|
2022-02-09T04:33:00.000Z
|
stor/plotters/plotters.py
|
Stor-Network/stor-blockchain
|
3c3cd1a3b99592e88160107ca5b81afc0937b992
|
[
"Apache-2.0"
] | 8
|
2021-07-04T03:21:51.000Z
|
2021-12-27T07:56:09.000Z
|
stor/plotters/plotters.py
|
Stor-Network/stor-blockchain
|
3c3cd1a3b99592e88160107ca5b81afc0937b992
|
[
"Apache-2.0"
] | 6
|
2021-10-04T17:15:30.000Z
|
2022-03-15T08:40:01.000Z
|
import argparse
import binascii
import os
from enum import Enum
from stor.plotters.bladebit import get_bladebit_install_info, plot_bladebit
from stor.plotters.chiapos import get_chiapos_install_info, plot_stor
from stor.plotters.madmax import get_madmax_install_info, plot_madmax
from stor.plotters.install_plotter import install_plotter
from pathlib import Path
from typing import Any, Dict, Optional
class Options(Enum):
TMP_DIR = 1
TMP_DIR2 = 2
FINAL_DIR = 3
K = 4
MEMO = 5
ID = 6
BUFF = 7
NUM_BUCKETS = 8
STRIPE_SIZE = 9
NUM_THREADS = 10
NOBITFIELD = 11
PLOT_COUNT = 12
MADMAX_NUM_BUCKETS_PHRASE3 = 13
MADMAX_WAITFORCOPY = 14
POOLKEY = 15
FARMERKEY = 16
MADMAX_TMPTOGGLE = 17
POOLCONTRACT = 18
MADMAX_RMULTI2 = 19
BLADEBIT_WARMSTART = 20
BLADEBIT_NONUMA = 21
VERBOSE = 22
OVERRIDE_K = 23
ALT_FINGERPRINT = 24
EXCLUDE_FINAL_DIR = 25
CONNECT_TO_DAEMON = 26
stor_plotter = [
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.K,
Options.MEMO,
Options.ID,
Options.BUFF,
Options.NUM_BUCKETS,
Options.STRIPE_SIZE,
Options.NUM_THREADS,
Options.NOBITFIELD,
Options.OVERRIDE_K,
Options.ALT_FINGERPRINT,
Options.POOLCONTRACT,
Options.FARMERKEY,
Options.POOLKEY,
Options.PLOT_COUNT,
Options.EXCLUDE_FINAL_DIR,
Options.CONNECT_TO_DAEMON,
]
madmax_plotter = [
Options.K,
Options.PLOT_COUNT,
Options.NUM_THREADS,
Options.NUM_BUCKETS,
Options.MADMAX_NUM_BUCKETS_PHRASE3,
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.MADMAX_WAITFORCOPY,
Options.POOLKEY,
Options.FARMERKEY,
Options.POOLCONTRACT,
Options.MADMAX_TMPTOGGLE,
Options.MADMAX_RMULTI2,
Options.CONNECT_TO_DAEMON,
]
bladebit_plotter = [
Options.NUM_THREADS,
Options.PLOT_COUNT,
Options.FARMERKEY,
Options.POOLKEY,
Options.POOLCONTRACT,
Options.ID,
Options.BLADEBIT_WARMSTART,
Options.BLADEBIT_NONUMA,
Options.FINAL_DIR,
Options.VERBOSE,
Options.CONNECT_TO_DAEMON,
]
def get_plotters_root_path(root_path: Path) -> Path:
return root_path / "plotters"
def build_parser(subparsers, root_path, option_list, name, plotter_desc):
parser = subparsers.add_parser(name, description=plotter_desc)
for option in option_list:
if option is Options.K:
parser.add_argument(
"-k",
"--size",
type=int,
help="K value.",
default=32,
)
u_default = 0 if name == "chiapos" else 256
if option is Options.NUM_BUCKETS:
parser.add_argument(
"-u",
"--buckets",
type=int,
help="Number of buckets.",
default=u_default,
)
if option is Options.STRIPE_SIZE:
parser.add_argument(
"-s",
"--stripes",
type=int,
help="Stripe size.",
default=0,
)
if option is Options.TMP_DIR:
parser.add_argument(
"-t",
"--tmp_dir",
type=str,
dest="tmpdir",
help="Temporary directory 1.",
default=str(root_path) + "/",
)
if option is Options.TMP_DIR2:
parser.add_argument(
"-2",
"--tmp_dir2",
type=str,
dest="tmpdir2",
help="Temporary directory 2.",
default=str(root_path) + "/",
)
if option is Options.FINAL_DIR:
parser.add_argument(
"-d",
"--final_dir",
type=str,
dest="finaldir",
help="Final directory.",
default=str(root_path) + "/",
)
if option is Options.BUFF:
parser.add_argument(
"-b",
"--buffer",
type=int,
help="Size of the buffer, in MB.",
default=0,
)
r_default = 4 if name == "madmax" else 0
if option is Options.NUM_THREADS:
parser.add_argument(
"-r",
"--threads",
type=int,
help="Num threads.",
default=r_default,
)
if option is Options.NOBITFIELD:
parser.add_argument(
"-e",
"--nobitfield",
action="store_true",
help="Disable bitfield.",
default=False,
)
if option is Options.MEMO:
parser.add_argument(
"-m",
"--memo",
type=binascii.unhexlify,
help="Memo variable.",
)
if option is Options.ID:
parser.add_argument(
"-i",
"--id",
type=binascii.unhexlify,
help="Plot id",
)
if option is Options.PLOT_COUNT:
parser.add_argument(
"-n",
"--count",
type=int,
help="Number of plots to create (default = 1)",
default=1,
)
if option is Options.MADMAX_NUM_BUCKETS_PHRASE3:
parser.add_argument(
"-v",
"--buckets3",
type=int,
help="Number of buckets for phase 3+4 (default = 256)",
default=256,
)
if option is Options.MADMAX_WAITFORCOPY:
parser.add_argument(
"-w",
"--waitforcopy",
action="store_true",
help="Wait for copy to start next plot",
default=False,
)
if option is Options.MADMAX_TMPTOGGLE:
parser.add_argument(
"-G",
"--tmptoggle",
action="store_true",
help="Alternate tmpdir/tmpdir2 (default = false)",
default=False,
)
if option is Options.POOLCONTRACT:
parser.add_argument(
"-c",
"--contract",
type=str,
help="Pool Contract Address (64 chars)",
default="",
)
if option is Options.MADMAX_RMULTI2:
parser.add_argument(
"-K",
"--rmulti2",
type=int,
help="Thread multiplier for P2 (default = 1)",
default=1,
)
if option is Options.POOLKEY:
parser.add_argument(
"-p",
"--pool-key",
type=binascii.unhexlify,
help="Pool Public Key (48 bytes)",
default="",
)
if option is Options.FARMERKEY:
parser.add_argument(
"-f",
"--farmerkey",
type=binascii.unhexlify,
help="Farmer Public Key (48 bytes)",
default="",
)
if option is Options.BLADEBIT_WARMSTART:
parser.add_argument(
"-w",
"--warmstart",
action="store_true",
help="Warm start",
default=False,
)
if option is Options.BLADEBIT_NONUMA:
parser.add_argument(
"-m",
"--nonuma",
action="store_true",
help="Disable numa",
default=False,
)
if option is Options.VERBOSE:
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set verbose",
default=False,
)
if option is Options.OVERRIDE_K:
parser.add_argument(
"--override-k",
dest="override",
action="store_true",
help="Force size smaller than 32",
default=False,
)
if option is Options.ALT_FINGERPRINT:
parser.add_argument(
"-a",
"--alt_fingerprint",
type=int,
default=None,
help="Enter the alternative fingerprint of the key you want to use",
)
if option is Options.EXCLUDE_FINAL_DIR:
parser.add_argument(
"-x",
"--exclude_final_dir",
action="store_true",
help="Skips adding [final dir] to harvester for farming",
default=False,
)
if option is Options.CONNECT_TO_DAEMON:
parser.add_argument(
"-D",
"--connect-to-daemon",
action="store_true",
help=argparse.SUPPRESS,
default=False,
)
def call_plotters(root_path: Path, args):
# Add `plotters` section in STOR_ROOT.
stor_root_path = root_path
root_path = get_plotters_root_path(root_path)
if not root_path.is_dir():
if os.path.exists(root_path):
try:
os.remove(root_path)
except Exception as e:
print(f"Exception deleting old root path: {type(e)} {e}.")
if not os.path.exists(root_path):
print(f"Creating plotters folder within STOR_ROOT: {root_path}")
try:
os.mkdir(root_path)
except Exception as e:
print(f"Cannot create plotters root path {root_path} {type(e)} {e}.")
plotters = argparse.ArgumentParser(description="Available options.")
subparsers = plotters.add_subparsers(help="Available options", dest="plotter")
build_parser(subparsers, root_path, stor_plotter, "chiapos", "Storpos Plotter")
build_parser(subparsers, root_path, madmax_plotter, "madmax", "Madmax Plotter")
build_parser(subparsers, root_path, bladebit_plotter, "bladebit", "Bladebit Plotter")
install_parser = subparsers.add_parser("install", description="Install custom plotters.")
install_parser.add_argument(
"install_plotter", type=str, help="The plotters available for installing. Choose from madmax or bladebit."
)
args = plotters.parse_args(args)
if args.plotter == "chiapos":
plot_stor(args, stor_root_path)
if args.plotter == "madmax":
plot_madmax(args, stor_root_path, root_path)
if args.plotter == "bladebit":
plot_bladebit(args, stor_root_path, root_path)
if args.plotter == "install":
install_plotter(args.install_plotter, root_path)
def get_available_plotters(root_path) -> Dict[str, Any]:
plotters_root_path: Path = get_plotters_root_path(root_path)
plotters: Dict[str, Any] = {}
chiapos: Optional[Dict[str, Any]] = get_chiapos_install_info()
bladebit: Optional[Dict[str, Any]] = get_bladebit_install_info(plotters_root_path)
madmax: Optional[Dict[str, Any]] = get_madmax_install_info(plotters_root_path)
if chiapos is not None:
plotters["chiapos"] = chiapos
if bladebit is not None:
plotters["bladebit"] = bladebit
if madmax is not None:
plotters["madmax"] = madmax
return plotters
| 31.075676
| 114
| 0.529483
|
import argparse
import binascii
import os
from enum import Enum
from stor.plotters.bladebit import get_bladebit_install_info, plot_bladebit
from stor.plotters.chiapos import get_chiapos_install_info, plot_stor
from stor.plotters.madmax import get_madmax_install_info, plot_madmax
from stor.plotters.install_plotter import install_plotter
from pathlib import Path
from typing import Any, Dict, Optional
class Options(Enum):
TMP_DIR = 1
TMP_DIR2 = 2
FINAL_DIR = 3
K = 4
MEMO = 5
ID = 6
BUFF = 7
NUM_BUCKETS = 8
STRIPE_SIZE = 9
NUM_THREADS = 10
NOBITFIELD = 11
PLOT_COUNT = 12
MADMAX_NUM_BUCKETS_PHRASE3 = 13
MADMAX_WAITFORCOPY = 14
POOLKEY = 15
FARMERKEY = 16
MADMAX_TMPTOGGLE = 17
POOLCONTRACT = 18
MADMAX_RMULTI2 = 19
BLADEBIT_WARMSTART = 20
BLADEBIT_NONUMA = 21
VERBOSE = 22
OVERRIDE_K = 23
ALT_FINGERPRINT = 24
EXCLUDE_FINAL_DIR = 25
CONNECT_TO_DAEMON = 26
stor_plotter = [
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.K,
Options.MEMO,
Options.ID,
Options.BUFF,
Options.NUM_BUCKETS,
Options.STRIPE_SIZE,
Options.NUM_THREADS,
Options.NOBITFIELD,
Options.OVERRIDE_K,
Options.ALT_FINGERPRINT,
Options.POOLCONTRACT,
Options.FARMERKEY,
Options.POOLKEY,
Options.PLOT_COUNT,
Options.EXCLUDE_FINAL_DIR,
Options.CONNECT_TO_DAEMON,
]
madmax_plotter = [
Options.K,
Options.PLOT_COUNT,
Options.NUM_THREADS,
Options.NUM_BUCKETS,
Options.MADMAX_NUM_BUCKETS_PHRASE3,
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.MADMAX_WAITFORCOPY,
Options.POOLKEY,
Options.FARMERKEY,
Options.POOLCONTRACT,
Options.MADMAX_TMPTOGGLE,
Options.MADMAX_RMULTI2,
Options.CONNECT_TO_DAEMON,
]
bladebit_plotter = [
Options.NUM_THREADS,
Options.PLOT_COUNT,
Options.FARMERKEY,
Options.POOLKEY,
Options.POOLCONTRACT,
Options.ID,
Options.BLADEBIT_WARMSTART,
Options.BLADEBIT_NONUMA,
Options.FINAL_DIR,
Options.VERBOSE,
Options.CONNECT_TO_DAEMON,
]
def get_plotters_root_path(root_path: Path) -> Path:
return root_path / "plotters"
def build_parser(subparsers, root_path, option_list, name, plotter_desc):
parser = subparsers.add_parser(name, description=plotter_desc)
for option in option_list:
if option is Options.K:
parser.add_argument(
"-k",
"--size",
type=int,
help="K value.",
default=32,
)
u_default = 0 if name == "chiapos" else 256
if option is Options.NUM_BUCKETS:
parser.add_argument(
"-u",
"--buckets",
type=int,
help="Number of buckets.",
default=u_default,
)
if option is Options.STRIPE_SIZE:
parser.add_argument(
"-s",
"--stripes",
type=int,
help="Stripe size.",
default=0,
)
if option is Options.TMP_DIR:
parser.add_argument(
"-t",
"--tmp_dir",
type=str,
dest="tmpdir",
help="Temporary directory 1.",
default=str(root_path) + "/",
)
if option is Options.TMP_DIR2:
parser.add_argument(
"-2",
"--tmp_dir2",
type=str,
dest="tmpdir2",
help="Temporary directory 2.",
default=str(root_path) + "/",
)
if option is Options.FINAL_DIR:
parser.add_argument(
"-d",
"--final_dir",
type=str,
dest="finaldir",
help="Final directory.",
default=str(root_path) + "/",
)
if option is Options.BUFF:
parser.add_argument(
"-b",
"--buffer",
type=int,
help="Size of the buffer, in MB.",
default=0,
)
r_default = 4 if name == "madmax" else 0
if option is Options.NUM_THREADS:
parser.add_argument(
"-r",
"--threads",
type=int,
help="Num threads.",
default=r_default,
)
if option is Options.NOBITFIELD:
parser.add_argument(
"-e",
"--nobitfield",
action="store_true",
help="Disable bitfield.",
default=False,
)
if option is Options.MEMO:
parser.add_argument(
"-m",
"--memo",
type=binascii.unhexlify,
help="Memo variable.",
)
if option is Options.ID:
parser.add_argument(
"-i",
"--id",
type=binascii.unhexlify,
help="Plot id",
)
if option is Options.PLOT_COUNT:
parser.add_argument(
"-n",
"--count",
type=int,
help="Number of plots to create (default = 1)",
default=1,
)
if option is Options.MADMAX_NUM_BUCKETS_PHRASE3:
parser.add_argument(
"-v",
"--buckets3",
type=int,
help="Number of buckets for phase 3+4 (default = 256)",
default=256,
)
if option is Options.MADMAX_WAITFORCOPY:
parser.add_argument(
"-w",
"--waitforcopy",
action="store_true",
help="Wait for copy to start next plot",
default=False,
)
if option is Options.MADMAX_TMPTOGGLE:
parser.add_argument(
"-G",
"--tmptoggle",
action="store_true",
help="Alternate tmpdir/tmpdir2 (default = false)",
default=False,
)
if option is Options.POOLCONTRACT:
parser.add_argument(
"-c",
"--contract",
type=str,
help="Pool Contract Address (64 chars)",
default="",
)
if option is Options.MADMAX_RMULTI2:
parser.add_argument(
"-K",
"--rmulti2",
type=int,
help="Thread multiplier for P2 (default = 1)",
default=1,
)
if option is Options.POOLKEY:
parser.add_argument(
"-p",
"--pool-key",
type=binascii.unhexlify,
help="Pool Public Key (48 bytes)",
default="",
)
if option is Options.FARMERKEY:
parser.add_argument(
"-f",
"--farmerkey",
type=binascii.unhexlify,
help="Farmer Public Key (48 bytes)",
default="",
)
if option is Options.BLADEBIT_WARMSTART:
parser.add_argument(
"-w",
"--warmstart",
action="store_true",
help="Warm start",
default=False,
)
if option is Options.BLADEBIT_NONUMA:
parser.add_argument(
"-m",
"--nonuma",
action="store_true",
help="Disable numa",
default=False,
)
if option is Options.VERBOSE:
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set verbose",
default=False,
)
if option is Options.OVERRIDE_K:
parser.add_argument(
"--override-k",
dest="override",
action="store_true",
help="Force size smaller than 32",
default=False,
)
if option is Options.ALT_FINGERPRINT:
parser.add_argument(
"-a",
"--alt_fingerprint",
type=int,
default=None,
help="Enter the alternative fingerprint of the key you want to use",
)
if option is Options.EXCLUDE_FINAL_DIR:
parser.add_argument(
"-x",
"--exclude_final_dir",
action="store_true",
help="Skips adding [final dir] to harvester for farming",
default=False,
)
if option is Options.CONNECT_TO_DAEMON:
parser.add_argument(
"-D",
"--connect-to-daemon",
action="store_true",
help=argparse.SUPPRESS,
default=False,
)
def call_plotters(root_path: Path, args):
stor_root_path = root_path
root_path = get_plotters_root_path(root_path)
if not root_path.is_dir():
if os.path.exists(root_path):
try:
os.remove(root_path)
except Exception as e:
print(f"Exception deleting old root path: {type(e)} {e}.")
if not os.path.exists(root_path):
print(f"Creating plotters folder within STOR_ROOT: {root_path}")
try:
os.mkdir(root_path)
except Exception as e:
print(f"Cannot create plotters root path {root_path} {type(e)} {e}.")
plotters = argparse.ArgumentParser(description="Available options.")
subparsers = plotters.add_subparsers(help="Available options", dest="plotter")
build_parser(subparsers, root_path, stor_plotter, "chiapos", "Storpos Plotter")
build_parser(subparsers, root_path, madmax_plotter, "madmax", "Madmax Plotter")
build_parser(subparsers, root_path, bladebit_plotter, "bladebit", "Bladebit Plotter")
install_parser = subparsers.add_parser("install", description="Install custom plotters.")
install_parser.add_argument(
"install_plotter", type=str, help="The plotters available for installing. Choose from madmax or bladebit."
)
args = plotters.parse_args(args)
if args.plotter == "chiapos":
plot_stor(args, stor_root_path)
if args.plotter == "madmax":
plot_madmax(args, stor_root_path, root_path)
if args.plotter == "bladebit":
plot_bladebit(args, stor_root_path, root_path)
if args.plotter == "install":
install_plotter(args.install_plotter, root_path)
def get_available_plotters(root_path) -> Dict[str, Any]:
plotters_root_path: Path = get_plotters_root_path(root_path)
plotters: Dict[str, Any] = {}
chiapos: Optional[Dict[str, Any]] = get_chiapos_install_info()
bladebit: Optional[Dict[str, Any]] = get_bladebit_install_info(plotters_root_path)
madmax: Optional[Dict[str, Any]] = get_madmax_install_info(plotters_root_path)
if chiapos is not None:
plotters["chiapos"] = chiapos
if bladebit is not None:
plotters["bladebit"] = bladebit
if madmax is not None:
plotters["madmax"] = madmax
return plotters
| true
| true
|
7907e50f30897314ff3f27a94e696e46fe598cc1
| 26,018
|
py
|
Python
|
meshdynamic/meshDynamic-Density.py
|
deepkashiwa/DeepUrbanEvent
|
3356ee3030893e2806d23541b2650ec73dab3075
|
[
"MIT"
] | 17
|
2019-04-09T06:28:22.000Z
|
2022-03-13T09:31:55.000Z
|
meshdynamic/meshDynamic-Density.py
|
deepkashiwa/DeepUrbanEvent
|
3356ee3030893e2806d23541b2650ec73dab3075
|
[
"MIT"
] | 2
|
2021-04-12T02:23:01.000Z
|
2021-06-01T02:21:10.000Z
|
meshdynamic/meshDynamic-Density.py
|
deepkashiwa/DeepUrbanEvent
|
3356ee3030893e2806d23541b2650ec73dab3075
|
[
"MIT"
] | 1
|
2021-07-30T10:22:41.000Z
|
2021-07-30T10:22:41.000Z
|
import csv
import numpy as np
import os
import sys
import time
import jismesh.utils as ju
import pandas as pd
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from common.datastructure.Point import Point
from common.datastructure.Mesh import Mesh
# meshTokyo = Mesh('tokyo','500m')
# GRIDNUMBER = meshTokyo.lonNum * meshTokyo.latNum
# print(meshTokyo.size, GRIDNUMBER)
# InterpolatedStep = 12
def getTimestamps(fileName):
last_tid = ''
D = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
timestamp = line[1]
D.append(timestamp)
last_tid = tid
return D
def getMesh(mesh, readFileName, writeFileName):
cnt = 0
wf = open(writeFileName, 'w')
with open(readFileName, 'r') as rf:
for line in csv.reader(rf):
if cnt % 1000000 == 0:
print(cnt)
tid = line[0]
timestamp = line[1]
p = Point(float(line[2]), float(line[3]))
meshid = mesh.inWhichGrid(p)
wf.write(','.join([tid, timestamp, str(meshid)])+'\n')
cnt += 1
wf.close()
def genMeshDynamic(mesh, fileName, meshFileName):
MD = {}
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
timestamp = line[1]
meshid = line[2]
key = (timestamp, meshid)
if key in MD:
MD[key].add(tid)
else:
MD[key] = set(tid)
wf = open(meshFileName, 'w')
Timestamps = getTimestamps(fileName)
for ts in Timestamps:
for meshid in range(mesh.lonNum * mesh.latNum):
key = (ts, str(meshid))
if key in MD:
value = len(MD[key])
else:
value = 0
wf.write(','.join([key[0], key[1], str(value)]) + '\n')
wf.close()
def getGrids(fileName):
last_tid = ''
G = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
grid = line[1]
G.append(grid)
last_tid = tid
return G
def getDynamicMesh_mobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getDynamicMeshMobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getRfromDynamicMeshMobmap(meshcode_level, dynamicFileName, dynamicFileName1, dynamicFileName2):
df1 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df1.iloc[:,2] = np.log10(df1.iloc[:,2]+1) * 100
df2 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df2.iloc[:, 2] = np.log(df2.iloc[:,2]+1) * 100
with open(dynamicFileName1, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
with open(dynamicFileName2, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
df1.to_csv(dynamicFileName1, header=False, index=False, mode='a')
df2.to_csv(dynamicFileName2, header=False, index=False, mode='a')
def getDynamicMeshMobmapR(R, trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def genMeshDynamicTimeInterval(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicTimeInterval_Mobmap(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] == '@dynamic-mesh' or '"@use-mesh-code':
wf.write(line + '\n')
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicMobmap(mesh, meshFileName, mobmapFile, timestamp):
wf = open(mobmapFile, 'w')
wf.write('@static-mesh' + '\n')
wf.write(','.join([str(x) for x in
[mesh.minLat, mesh.minLon, mesh.dLat, mesh.dLon]]) + '\n')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if timestamp != line[0]:
continue
else:
meshid = line[1]
number = line[2]
xi, yi = mesh.Index[int(meshid)]
wf.write(','.join([str(item) for item in [yi, xi, number]]) + '\n')
wf.close()
def loadGTrajectory(fileName):
print('loadTrajectory Started : ', time.ctime())
TDB = {}
with open(fileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
# timestamp = line[1]
meshid = line[2]
if tid in TDB:
TDB[tid].append(meshid)
else:
TDB[tid] = [meshid]
print('loadTrajectory Ended : ', time.ctime())
return TDB
def getINDEX(mesh, gTrajFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('getTrajectoryINDEX Started : ', time.ctime())
Timestamps = getTimestamps(gTrajFileName)
print('timestamps...', len(Timestamps))
TDB = loadGTrajectory(gTrajFileName)
INDEX = []
for i in range(len(Timestamps)):
INDEX.append([])
for G in range(GRIDNUMBER):
INDEX[i].append(set()) # set().add
# print(np.array(INDEX).shape)
for tid in TDB:
traj = TDB[tid]
for i in range(len(traj)):
HH = i
if traj[i] == 'None':
pass
else:
gid = int(traj[i])
INDEX[HH][gid].add(tid) # set().add
return INDEX
def getGridImageIndex(mesh, window=15):
GRIDNUMBER = mesh.lonNum * mesh.latNum
IMG = []
for g in range(GRIDNUMBER):
R = np.zeros((window, window), dtype='int32')
current_x, current_y = mesh.Index[g]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
IMG.append(R)
return IMG
def genGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
for i in range(len(Timestamps) - 1):
for j in range(GRIDNUMBER):
cur_time = i
next_time = i + 1
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
trajfirst = INDEX[cur_time][cur_grid]
trajsecond = INDEX[next_time][next_grid]
transit_num = len(trajfirst & trajsecond)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
# This grid transit version is for 1minutes trajectory, more accurate, not for 5minutes.
# !!!!!!!!!!!!!!!!!!!! 1 minute trajectory data.
# TT is supposed to be 288 not 289 because it is interval.
def genGridTransit_5minutes_from_1minute(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
# Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
TT, SPAN = 24 * 12, 5
for i in range(TT):
for j in range(GRIDNUMBER):
cur_time = i
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
cur_time_start = cur_time * SPAN
cur_time_end = (cur_time + 1) * SPAN + 1
SS = set()
for pp in range(cur_time_start, cur_time_end):
trajfirst = INDEX[pp][cur_grid]
for qq in range(pp, cur_time_end):
trajsecond = INDEX[qq][next_grid]
SS.update(trajfirst & trajsecond)
transit_num = len(SS)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
def getGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps) - 1 # -1 is because of transit
print('getGridTransit Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def getGridPop(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 80, 80, 1
return R
def getGridPopPartition(R, M, K):
# Original 8*8 matrix N = 8 = M*K
# M = 4 # M*M sub matrix
# K = 2 # each sub matrix has the size of K * K
P = []
for i in range(M):
for j in range(M):
P.append(R[:, i*K:i*K+K, j*K:j*K+K, :])
return np.array(P)
def getGridPop2DNumpy(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
return R
def getGridPopTimeInterval(mesh, popFileName):
print('getGridPop', popFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = []
lastTimestamp = ''
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = line[0]
if timestamp != lastTimestamp:
Timestamps.append(timestamp)
lastTimestamp = timestamp
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 75, 80, 1
return R
def getGridTransitTimeInterval(mesh, transitFileName):
print('getGridTransit Started : ', transitFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
# Timestamps = []
# lastTimestamp = ''
# with open(transitFileName, 'r') as rf:
# tansistReader = csv.reader(rf)
# for line in tansistReader:
# timestamp = line[0]
# if timestamp != lastTimestamp:
# Timestamps.append(timestamp)
# lastTimestamp = timestamp
# TIMENUMBER = len(Timestamps)
TIMENUMBER = 24 * 12
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def shuffleTrainValidateTest(InterpolatedStep, path, fileName, R, testRate=0.2):
TIMESTEP = InterpolatedStep * 2
Sequence = []
for i in range(R.shape[0] - TIMESTEP):
Sequence.append(R[i:i+TIMESTEP, :, :, :])
Sequence = np.array(Sequence, dtype='int32')
INDEX = list(range(len(Sequence)))
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate))]
testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
train = Sequence[trainINDEX]
test = Sequence[testINDEX]
np.save(path + 'train_' + fileName, train)
np.save(path + 'test_' + fileName, test)
print(train.shape, test.shape)
# trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate - validateRate))]
# validateINDEX = INDEX[int(len(INDEX) * (1 - testRate - validateRate)):int(len(INDEX) * (1 - testRate))]
# testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
# train = Sequence[trainINDEX]
# validate = Sequence[validateINDEX]
# test = Sequence[testINDEX]
# np.save(path + 'train_' + fileName, train)
# np.save(path + 'validate_' + fileName, validate)
# np.save(path + 'test_' + fileName, test)
# print(train.shape, validate.shape, test.shape)
# or directly return not save to file because just too big.
# return train, validate, test
def getShuffledTrainTest(path, fileName, TrainTest):
return np.load(path + TrainTest + '_' + fileName + '.npy')
def testcode(mesh):
GRIDNUMBER = mesh.lonNum * mesh.latNum
window = 5
R = np.zeros((window, window), dtype='int32')
center = mesh.ReverseIndex[(2,2)]
current_x, current_y = mesh.Index[center]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
print(R)
for i in range(len(R)):
print(R[i])
for i in range(len(R)):
print(R[i][0], R[i][1], R[i][2], R[i][3], R[i][4])
T = R.reshape(-1)
print(T.tolist())
P = T.reshape(window, window)
print(P)
print(R.shape)
print(R[54][4178])
print(np.max(R) == 3369)
print(mesh.Index[3369])
x, y = mesh.Index[3369]
lon, lat = mesh.minLon + (x + 0.5) * mesh.dLon, \
mesh.minLat + (y + 0.5) * mesh.dLat
print(lon, lat)
print(mesh.lonNum, mesh.latNum)
T = np.array(range(GRIDNUMBER))
T = T.reshape(mesh.lonNum, mesh.latNum)
T = np.swapaxes(T, 1, 0)
T = T[::-1, :]
print(T)
print(T.shape)
def run5min201802(mesh, dataPATH, dates):
print('Now is getting trainig XS and YS...', dates)
# timestamp = '2011-10-20 09:00:00'
# filenameTime = timestamp[0:4] + timestamp[5:7] + timestamp[8:10] \
# + timestamp[11:13] + timestamp[14:16] + timestamp[17:19]
# print(filenameTime)
for date in dates:
# first step: from trajectory point to mesh
getMesh(dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv')
# second step: calculate mesh population at each timestamp
genMeshDynamic(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_pop.csv')
# fourth step: mesh transit between two consecutive timestamps
genGridTransit(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_transit.csv')
def getHHTransit(HH):
assert HH <= 22, 'Hour should not be over 22.'
dataPATH = '../interpo_data/'
date = '20111020'
R = getGridTransit(dataPATH + date + 'tokyo_meshtransit10min_1min_15.csv')
# (144, 72, 80, 225)
R = R[HH*6:HH*6+6, :, :, :]
# (6, 72, 80, 225)
R = R.reshape(R.shape[0], -1, R.shape[-1])
# (6, 5760, 225)
R = R.transpose(1, 0, 2)
# (5760, 6, 225)
R = R.reshape(R.shape[0], R.shape[1], int(R.shape[2]**0.5), int(R.shape[2]**0.5), 1)
return R
def runCrowdDensity():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217","20110218","20110219","20110220", "20110221",
"20110222","20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv')
genMeshDynamic(meshTokyo, dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_pop.csv')
# def runCrowdFlow_from5min():
# from common.dataparam.Param import alldates
# dataPATH = '../interpo_data/'
# meshTokyo = Mesh('tokyo', '500m')
# #meshcode_level = 4
#
# for date in alldates:
# print('this is date', date)
# genGridTransit(meshTokyo,
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit_from5min.csv')
# paper crowd flow is from 1min.!!!!!!!!!!!!
def runCrowdFlow():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217", "20110218", "20110219", "20110220", "20110221",
"20110222", "20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv')
genGridTransit_5minutes_from_1minute(meshTokyo,
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit.csv')
def main():
runCrowdDensity()
if __name__ == '__main__':
main()
| 36.542135
| 110
| 0.547506
|
import csv
import numpy as np
import os
import sys
import time
import jismesh.utils as ju
import pandas as pd
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from common.datastructure.Point import Point
from common.datastructure.Mesh import Mesh
def getTimestamps(fileName):
last_tid = ''
D = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
timestamp = line[1]
D.append(timestamp)
last_tid = tid
return D
def getMesh(mesh, readFileName, writeFileName):
cnt = 0
wf = open(writeFileName, 'w')
with open(readFileName, 'r') as rf:
for line in csv.reader(rf):
if cnt % 1000000 == 0:
print(cnt)
tid = line[0]
timestamp = line[1]
p = Point(float(line[2]), float(line[3]))
meshid = mesh.inWhichGrid(p)
wf.write(','.join([tid, timestamp, str(meshid)])+'\n')
cnt += 1
wf.close()
def genMeshDynamic(mesh, fileName, meshFileName):
MD = {}
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
timestamp = line[1]
meshid = line[2]
key = (timestamp, meshid)
if key in MD:
MD[key].add(tid)
else:
MD[key] = set(tid)
wf = open(meshFileName, 'w')
Timestamps = getTimestamps(fileName)
for ts in Timestamps:
for meshid in range(mesh.lonNum * mesh.latNum):
key = (ts, str(meshid))
if key in MD:
value = len(MD[key])
else:
value = 0
wf.write(','.join([key[0], key[1], str(value)]) + '\n')
wf.close()
def getGrids(fileName):
last_tid = ''
G = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
grid = line[1]
G.append(grid)
last_tid = tid
return G
def getDynamicMesh_mobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getDynamicMeshMobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getRfromDynamicMeshMobmap(meshcode_level, dynamicFileName, dynamicFileName1, dynamicFileName2):
df1 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df1.iloc[:,2] = np.log10(df1.iloc[:,2]+1) * 100
df2 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df2.iloc[:, 2] = np.log(df2.iloc[:,2]+1) * 100
with open(dynamicFileName1, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
with open(dynamicFileName2, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
df1.to_csv(dynamicFileName1, header=False, index=False, mode='a')
df2.to_csv(dynamicFileName2, header=False, index=False, mode='a')
def getDynamicMeshMobmapR(R, trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def genMeshDynamicTimeInterval(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicTimeInterval_Mobmap(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] == '@dynamic-mesh' or '"@use-mesh-code':
wf.write(line + '\n')
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicMobmap(mesh, meshFileName, mobmapFile, timestamp):
wf = open(mobmapFile, 'w')
wf.write('@static-mesh' + '\n')
wf.write(','.join([str(x) for x in
[mesh.minLat, mesh.minLon, mesh.dLat, mesh.dLon]]) + '\n')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if timestamp != line[0]:
continue
else:
meshid = line[1]
number = line[2]
xi, yi = mesh.Index[int(meshid)]
wf.write(','.join([str(item) for item in [yi, xi, number]]) + '\n')
wf.close()
def loadGTrajectory(fileName):
print('loadTrajectory Started : ', time.ctime())
TDB = {}
with open(fileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
# timestamp = line[1]
meshid = line[2]
if tid in TDB:
TDB[tid].append(meshid)
else:
TDB[tid] = [meshid]
print('loadTrajectory Ended : ', time.ctime())
return TDB
def getINDEX(mesh, gTrajFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('getTrajectoryINDEX Started : ', time.ctime())
Timestamps = getTimestamps(gTrajFileName)
print('timestamps...', len(Timestamps))
TDB = loadGTrajectory(gTrajFileName)
INDEX = []
for i in range(len(Timestamps)):
INDEX.append([])
for G in range(GRIDNUMBER):
INDEX[i].append(set()) # set().add
# print(np.array(INDEX).shape)
for tid in TDB:
traj = TDB[tid]
for i in range(len(traj)):
HH = i
if traj[i] == 'None':
pass
else:
gid = int(traj[i])
INDEX[HH][gid].add(tid) # set().add
return INDEX
def getGridImageIndex(mesh, window=15):
GRIDNUMBER = mesh.lonNum * mesh.latNum
IMG = []
for g in range(GRIDNUMBER):
R = np.zeros((window, window), dtype='int32')
current_x, current_y = mesh.Index[g]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
IMG.append(R)
return IMG
def genGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
for i in range(len(Timestamps) - 1):
for j in range(GRIDNUMBER):
cur_time = i
next_time = i + 1
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
trajfirst = INDEX[cur_time][cur_grid]
trajsecond = INDEX[next_time][next_grid]
transit_num = len(trajfirst & trajsecond)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
# This grid transit version is for 1minutes trajectory, more accurate, not for 5minutes.
# !!!!!!!!!!!!!!!!!!!! 1 minute trajectory data.
# TT is supposed to be 288 not 289 because it is interval.
def genGridTransit_5minutes_from_1minute(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
# Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
TT, SPAN = 24 * 12, 5
for i in range(TT):
for j in range(GRIDNUMBER):
cur_time = i
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
cur_time_start = cur_time * SPAN
cur_time_end = (cur_time + 1) * SPAN + 1
SS = set()
for pp in range(cur_time_start, cur_time_end):
trajfirst = INDEX[pp][cur_grid]
for qq in range(pp, cur_time_end):
trajsecond = INDEX[qq][next_grid]
SS.update(trajfirst & trajsecond)
transit_num = len(SS)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
def getGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps) - 1 # -1 is because of transit
print('getGridTransit Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def getGridPop(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 80, 80, 1
return R
def getGridPopPartition(R, M, K):
# Original 8*8 matrix N = 8 = M*K
# M = 4 # M*M sub matrix
# K = 2 # each sub matrix has the size of K * K
P = []
for i in range(M):
for j in range(M):
P.append(R[:, i*K:i*K+K, j*K:j*K+K, :])
return np.array(P)
def getGridPop2DNumpy(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
return R
def getGridPopTimeInterval(mesh, popFileName):
print('getGridPop', popFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = []
lastTimestamp = ''
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = line[0]
if timestamp != lastTimestamp:
Timestamps.append(timestamp)
lastTimestamp = timestamp
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 75, 80, 1
return R
def getGridTransitTimeInterval(mesh, transitFileName):
print('getGridTransit Started : ', transitFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
# Timestamps = []
# lastTimestamp = ''
# with open(transitFileName, 'r') as rf:
# tansistReader = csv.reader(rf)
# for line in tansistReader:
# timestamp = line[0]
# if timestamp != lastTimestamp:
# Timestamps.append(timestamp)
# lastTimestamp = timestamp
# TIMENUMBER = len(Timestamps)
TIMENUMBER = 24 * 12
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def shuffleTrainValidateTest(InterpolatedStep, path, fileName, R, testRate=0.2):
TIMESTEP = InterpolatedStep * 2
Sequence = []
for i in range(R.shape[0] - TIMESTEP):
Sequence.append(R[i:i+TIMESTEP, :, :, :])
Sequence = np.array(Sequence, dtype='int32')
INDEX = list(range(len(Sequence)))
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate))]
testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
train = Sequence[trainINDEX]
test = Sequence[testINDEX]
np.save(path + 'train_' + fileName, train)
np.save(path + 'test_' + fileName, test)
print(train.shape, test.shape)
# trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate - validateRate))]
# validateINDEX = INDEX[int(len(INDEX) * (1 - testRate - validateRate)):int(len(INDEX) * (1 - testRate))]
# testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
# train = Sequence[trainINDEX]
# validate = Sequence[validateINDEX]
# test = Sequence[testINDEX]
# np.save(path + 'train_' + fileName, train)
# np.save(path + 'validate_' + fileName, validate)
# np.save(path + 'test_' + fileName, test)
# print(train.shape, validate.shape, test.shape)
# or directly return not save to file because just too big.
# return train, validate, test
def getShuffledTrainTest(path, fileName, TrainTest):
return np.load(path + TrainTest + '_' + fileName + '.npy')
def testcode(mesh):
GRIDNUMBER = mesh.lonNum * mesh.latNum
window = 5
R = np.zeros((window, window), dtype='int32')
center = mesh.ReverseIndex[(2,2)]
current_x, current_y = mesh.Index[center]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
print(R)
for i in range(len(R)):
print(R[i])
for i in range(len(R)):
print(R[i][0], R[i][1], R[i][2], R[i][3], R[i][4])
T = R.reshape(-1)
print(T.tolist())
P = T.reshape(window, window)
print(P)
print(R.shape)
print(R[54][4178])
print(np.max(R) == 3369)
print(mesh.Index[3369])
x, y = mesh.Index[3369]
lon, lat = mesh.minLon + (x + 0.5) * mesh.dLon, \
mesh.minLat + (y + 0.5) * mesh.dLat
print(lon, lat)
print(mesh.lonNum, mesh.latNum)
T = np.array(range(GRIDNUMBER))
T = T.reshape(mesh.lonNum, mesh.latNum)
T = np.swapaxes(T, 1, 0)
T = T[::-1, :]
print(T)
print(T.shape)
def run5min201802(mesh, dataPATH, dates):
print('Now is getting trainig XS and YS...', dates)
# timestamp = '2011-10-20 09:00:00'
# filenameTime = timestamp[0:4] + timestamp[5:7] + timestamp[8:10] \
# + timestamp[11:13] + timestamp[14:16] + timestamp[17:19]
# print(filenameTime)
for date in dates:
# first step: from trajectory point to mesh
getMesh(dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv')
# second step: calculate mesh population at each timestamp
genMeshDynamic(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_pop.csv')
# fourth step: mesh transit between two consecutive timestamps
genGridTransit(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_transit.csv')
def getHHTransit(HH):
assert HH <= 22, 'Hour should not be over 22.'
dataPATH = '../interpo_data/'
date = '20111020'
R = getGridTransit(dataPATH + date + 'tokyo_meshtransit10min_1min_15.csv')
# (144, 72, 80, 225)
R = R[HH*6:HH*6+6, :, :, :]
# (6, 72, 80, 225)
R = R.reshape(R.shape[0], -1, R.shape[-1])
# (6, 5760, 225)
R = R.transpose(1, 0, 2)
# (5760, 6, 225)
R = R.reshape(R.shape[0], R.shape[1], int(R.shape[2]**0.5), int(R.shape[2]**0.5), 1)
return R
def runCrowdDensity():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217","20110218","20110219","20110220", "20110221",
"20110222","20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv')
genMeshDynamic(meshTokyo, dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_pop.csv')
# def runCrowdFlow_from5min():
# from common.dataparam.Param import alldates
# dataPATH = '../interpo_data/'
# meshTokyo = Mesh('tokyo', '500m')
# #meshcode_level = 4
#
# for date in alldates:
# print('this is date', date)
# genGridTransit(meshTokyo,
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit_from5min.csv')
# paper crowd flow is from 1min.!!!!!!!!!!!!
def runCrowdFlow():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217", "20110218", "20110219", "20110220", "20110221",
"20110222", "20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv')
genGridTransit_5minutes_from_1minute(meshTokyo,
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit.csv')
def main():
runCrowdDensity()
if __name__ == '__main__':
main()
| true
| true
|
7907e70df2cf5af58c8335db36af71166ec3b539
| 54,315
|
py
|
Python
|
Lib/site-packages/plotly/graph_objs/_splom.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 4
|
2020-02-05T11:26:47.000Z
|
2021-05-26T07:48:46.000Z
|
Lib/site-packages/plotly/graph_objs/_splom.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 6
|
2021-03-18T22:27:08.000Z
|
2022-03-11T23:40:50.000Z
|
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
|
kylenahas/180LoginV1
|
8f64be6e6016d47dff8febfcfa3bbd56e9042f89
|
[
"MIT"
] | 1
|
2020-02-02T21:17:12.000Z
|
2020-02-02T21:17:12.000Z
|
from plotly.basedatatypes import BaseTraceType
import copy
class Splom(BaseTraceType):
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['customdata']
@customdata.setter
def customdata(self, val):
self['customdata'] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on plot.ly for customdata .
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['customdatasrc']
@customdatasrc.setter
def customdatasrc(self, val):
self['customdatasrc'] = val
# diagonal
# --------
@property
def diagonal(self):
"""
The 'diagonal' property is an instance of Diagonal
that may be specified as:
- An instance of plotly.graph_objs.splom.Diagonal
- A dict of string/value properties that will be passed
to the Diagonal constructor
Supported dict properties:
visible
Determines whether or not subplots on the
diagonal are displayed.
Returns
-------
plotly.graph_objs.splom.Diagonal
"""
return self['diagonal']
@diagonal.setter
def diagonal(self, val):
self['diagonal'] = val
# dimensions
# ----------
@property
def dimensions(self):
"""
The 'dimensions' property is a tuple of instances of
Dimension that may be specified as:
- A list or tuple of instances of plotly.graph_objs.splom.Dimension
- A list or tuple of dicts of string/value properties that
will be passed to the Dimension constructor
Supported dict properties:
axis
plotly.graph_objs.splom.dimension.Axis instance
or dict with compatible properties
label
Sets the label corresponding to this splom
dimension.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the dimension values to be plotted.
valuessrc
Sets the source reference on plot.ly for
values .
visible
Determines whether or not this dimension is
shown on the graph. Note that even visible
false dimension contribute to the default grid
generate by this splom trace.
Returns
-------
tuple[plotly.graph_objs.splom.Dimension]
"""
return self['dimensions']
@dimensions.setter
def dimensions(self, val):
self['dimensions'] = val
# dimensiondefaults
# -----------------
@property
def dimensiondefaults(self):
"""
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the default
property values to use for elements of splom.dimensions
The 'dimensiondefaults' property is an instance of Dimension
that may be specified as:
- An instance of plotly.graph_objs.splom.Dimension
- A dict of string/value properties that will be passed
to the Dimension constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.splom.Dimension
"""
return self['dimensiondefaults']
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self['dimensiondefaults'] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['hoverinfo']
@hoverinfo.setter
def hoverinfo(self, val):
self['hoverinfo'] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on plot.ly for hoverinfo .
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hoverinfosrc']
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self['hoverinfosrc'] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of plotly.graph_objs.splom.Hoverlabel
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the length (in number of characters) of
the trace name in the hover labels for this
trace. -1 shows the whole name regardless of
length. 0-3 shows the first 0-3 characters, and
an integer >3 will show the whole name if it is
less than that many characters, but if it is
longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
Returns
-------
plotly.graph_objs.splom.Hoverlabel
"""
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}". See http
s://github.com/d3/d3-format/blob/master/README.md#locale_format
for details on the formatting syntax. The variables available
in `hovertemplate` are the ones emitted as event data described
at this link https://plot.ly/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example "<extra>{fullData.name}</extra>".
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['hovertemplate']
@hovertemplate.setter
def hovertemplate(self, val):
self['hovertemplate'] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on plot.ly for hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hovertemplatesrc']
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self['hovertemplatesrc'] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['hovertext']
@hovertext.setter
def hovertext(self, val):
self['hovertext'] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on plot.ly for hovertext .
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hovertextsrc']
@hovertextsrc.setter
def hovertextsrc(self, val):
self['hovertextsrc'] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ids']
@ids.setter
def ids(self, val):
self['ids'] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on plot.ly for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['idssrc']
@idssrc.setter
def idssrc(self, val):
self['idssrc'] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['legendgroup']
@legendgroup.setter
def legendgroup(self, val):
self['legendgroup'] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.splom.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
colorbar
plotly.graph_objs.splom.marker.ColorBar
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.cmin` and `marker.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
line
plotly.graph_objs.splom.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for
opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size
.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for
symbol .
Returns
-------
plotly.graph_objs.splom.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of plotly.graph_objs.splom.Selected
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
plotly.graph_objs.splom.selected.Marker
instance or dict with compatible properties
Returns
-------
plotly.graph_objs.splom.Selected
"""
return self['selected']
@selected.setter
def selected(self, val):
self['selected'] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self['selectedpoints']
@selectedpoints.setter
def selectedpoints(self, val):
self['selectedpoints'] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
# showlowerhalf
# -------------
@property
def showlowerhalf(self):
"""
Determines whether or not subplots on the lower half from the
diagonal are displayed.
The 'showlowerhalf' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlowerhalf']
@showlowerhalf.setter
def showlowerhalf(self, val):
self['showlowerhalf'] = val
# showupperhalf
# -------------
@property
def showupperhalf(self):
"""
Determines whether or not subplots on the upper half from the
diagonal are displayed.
The 'showupperhalf' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showupperhalf']
@showupperhalf.setter
def showupperhalf(self, val):
self['showupperhalf'] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of plotly.graph_objs.splom.Stream
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
Returns
-------
plotly.graph_objs.splom.Stream
"""
return self['stream']
@stream.setter
def stream(self, val):
self['stream'] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (x,y) pair to appear on
hover. If a single string, the same string appears over all the
data points. If an array of string, the items are mapped in
order to the this trace's (x,y) coordinates.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['text']
@text.setter
def text(self, val):
self['text'] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on plot.ly for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['textsrc']
@textsrc.setter
def textsrc(self, val):
self['textsrc'] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['uid']
@uid.setter
def uid(self, val):
self['uid'] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self['uirevision']
@uirevision.setter
def uirevision(self, val):
self['uirevision'] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of plotly.graph_objs.splom.Unselected
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
plotly.graph_objs.splom.unselected.Marker
instance or dict with compatible properties
Returns
-------
plotly.graph_objs.splom.Unselected
"""
return self['unselected']
@unselected.setter
def unselected(self, val):
self['unselected'] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
# xaxes
# -----
@property
def xaxes(self):
"""
Sets the list of x axes corresponding to dimensions of this
splom trace. By default, a splom will match the first N xaxes
where N is the number of input dimensions. Note that, in case
where `diagonal.visible` is false and `showupperhalf` or
`showlowerhalf` is false, this splom trace will generate one
less x-axis and one less y-axis.
The 'xaxes' property is an info array that may be specified as:
* a list of elements where:
The 'xaxes[i]' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
list
"""
return self['xaxes']
@xaxes.setter
def xaxes(self, val):
self['xaxes'] = val
# yaxes
# -----
@property
def yaxes(self):
"""
Sets the list of y axes corresponding to dimensions of this
splom trace. By default, a splom will match the first N yaxes
where N is the number of input dimensions. Note that, in case
where `diagonal.visible` is false and `showupperhalf` or
`showlowerhalf` is false, this splom trace will generate one
less x-axis and one less y-axis.
The 'yaxes' property is an info array that may be specified as:
* a list of elements where:
The 'yaxes[i]' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
list
"""
return self['yaxes']
@yaxes.setter
def yaxes(self, val):
self['yaxes'] = val
# type
# ----
@property
def type(self):
return self._props['type']
# property parent name
# --------------------
@property
def _parent_path_str(self):
return ''
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
diagonal
plotly.graph_objs.splom.Diagonal instance or dict with
compatible properties
dimensions
plotly.graph_objs.splom.Dimension instance or dict with
compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the
default property values to use for elements of
splom.dimensions
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.splom.Hoverlabel instance or dict
with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". See https://github.com/d3/d3-format
/blob/master/README.md#locale_format for details on the
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plot.ly/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified
per-point (the ones that are `arrayOk: true`) are
available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>".
hovertemplatesrc
Sets the source reference on plot.ly for hovertemplate
.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
plotly.graph_objs.splom.Marker instance or dict with
compatible properties
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.splom.Selected instance or dict with
compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower half
from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper half
from the diagonal are displayed.
stream
plotly.graph_objs.splom.Stream instance or dict with
compatible properties
text
Sets text elements associated with each (x,y) pair to
appear on hover. If a single string, the same string
appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
plotly.graph_objs.splom.Unselected instance or dict
with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxes
Sets the list of x axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N xaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
yaxes
Sets the list of y axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N yaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
"""
def __init__(
self,
arg=None,
customdata=None,
customdatasrc=None,
diagonal=None,
dimensions=None,
dimensiondefaults=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
marker=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
showlowerhalf=None,
showupperhalf=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
xaxes=None,
yaxes=None,
**kwargs
):
"""
Construct a new Splom object
Splom traces generate scatter plot matrix visualizations. Each
splom `dimensions` items correspond to a generated axis. Values
for each of those dimensions are set in `dimensions[i].values`.
Splom traces support all `scattergl` marker style attributes.
Specify `layout.grid` attributes and/or layout x-axis and
y-axis attributes for more control over the axis positioning
and style.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.Splom
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
diagonal
plotly.graph_objs.splom.Diagonal instance or dict with
compatible properties
dimensions
plotly.graph_objs.splom.Dimension instance or dict with
compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the
default property values to use for elements of
splom.dimensions
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.splom.Hoverlabel instance or dict
with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". See https://github.com/d3/d3-format
/blob/master/README.md#locale_format for details on the
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plot.ly/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified
per-point (the ones that are `arrayOk: true`) are
available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>".
hovertemplatesrc
Sets the source reference on plot.ly for hovertemplate
.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
plotly.graph_objs.splom.Marker instance or dict with
compatible properties
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.splom.Selected instance or dict with
compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower half
from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper half
from the diagonal are displayed.
stream
plotly.graph_objs.splom.Stream instance or dict with
compatible properties
text
Sets text elements associated with each (x,y) pair to
appear on hover. If a single string, the same string
appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
plotly.graph_objs.splom.Unselected instance or dict
with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxes
Sets the list of x axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N xaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
yaxes
Sets the list of y axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N yaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
Returns
-------
Splom
"""
super(Splom, self).__init__('splom')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Splom
constructor must be a dict or
an instance of plotly.graph_objs.Splom"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators import (splom as v_splom)
# Initialize validators
# ---------------------
self._validators['customdata'] = v_splom.CustomdataValidator()
self._validators['customdatasrc'] = v_splom.CustomdatasrcValidator()
self._validators['diagonal'] = v_splom.DiagonalValidator()
self._validators['dimensions'] = v_splom.DimensionsValidator()
self._validators['dimensiondefaults'] = v_splom.DimensionValidator()
self._validators['hoverinfo'] = v_splom.HoverinfoValidator()
self._validators['hoverinfosrc'] = v_splom.HoverinfosrcValidator()
self._validators['hoverlabel'] = v_splom.HoverlabelValidator()
self._validators['hovertemplate'] = v_splom.HovertemplateValidator()
self._validators['hovertemplatesrc'
] = v_splom.HovertemplatesrcValidator()
self._validators['hovertext'] = v_splom.HovertextValidator()
self._validators['hovertextsrc'] = v_splom.HovertextsrcValidator()
self._validators['ids'] = v_splom.IdsValidator()
self._validators['idssrc'] = v_splom.IdssrcValidator()
self._validators['legendgroup'] = v_splom.LegendgroupValidator()
self._validators['marker'] = v_splom.MarkerValidator()
self._validators['name'] = v_splom.NameValidator()
self._validators['opacity'] = v_splom.OpacityValidator()
self._validators['selected'] = v_splom.SelectedValidator()
self._validators['selectedpoints'] = v_splom.SelectedpointsValidator()
self._validators['showlegend'] = v_splom.ShowlegendValidator()
self._validators['showlowerhalf'] = v_splom.ShowlowerhalfValidator()
self._validators['showupperhalf'] = v_splom.ShowupperhalfValidator()
self._validators['stream'] = v_splom.StreamValidator()
self._validators['text'] = v_splom.TextValidator()
self._validators['textsrc'] = v_splom.TextsrcValidator()
self._validators['uid'] = v_splom.UidValidator()
self._validators['uirevision'] = v_splom.UirevisionValidator()
self._validators['unselected'] = v_splom.UnselectedValidator()
self._validators['visible'] = v_splom.VisibleValidator()
self._validators['xaxes'] = v_splom.XaxesValidator()
self._validators['yaxes'] = v_splom.YaxesValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('customdata', None)
self['customdata'] = customdata if customdata is not None else _v
_v = arg.pop('customdatasrc', None)
self['customdatasrc'
] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop('diagonal', None)
self['diagonal'] = diagonal if diagonal is not None else _v
_v = arg.pop('dimensions', None)
self['dimensions'] = dimensions if dimensions is not None else _v
_v = arg.pop('dimensiondefaults', None)
self['dimensiondefaults'
] = dimensiondefaults if dimensiondefaults is not None else _v
_v = arg.pop('hoverinfo', None)
self['hoverinfo'] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop('hoverinfosrc', None)
self['hoverinfosrc'] = hoverinfosrc if hoverinfosrc is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('hovertemplate', None)
self['hovertemplate'
] = hovertemplate if hovertemplate is not None else _v
_v = arg.pop('hovertemplatesrc', None)
self['hovertemplatesrc'
] = hovertemplatesrc if hovertemplatesrc is not None else _v
_v = arg.pop('hovertext', None)
self['hovertext'] = hovertext if hovertext is not None else _v
_v = arg.pop('hovertextsrc', None)
self['hovertextsrc'] = hovertextsrc if hovertextsrc is not None else _v
_v = arg.pop('ids', None)
self['ids'] = ids if ids is not None else _v
_v = arg.pop('idssrc', None)
self['idssrc'] = idssrc if idssrc is not None else _v
_v = arg.pop('legendgroup', None)
self['legendgroup'] = legendgroup if legendgroup is not None else _v
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('selected', None)
self['selected'] = selected if selected is not None else _v
_v = arg.pop('selectedpoints', None)
self['selectedpoints'
] = selectedpoints if selectedpoints is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('showlowerhalf', None)
self['showlowerhalf'
] = showlowerhalf if showlowerhalf is not None else _v
_v = arg.pop('showupperhalf', None)
self['showupperhalf'
] = showupperhalf if showupperhalf is not None else _v
_v = arg.pop('stream', None)
self['stream'] = stream if stream is not None else _v
_v = arg.pop('text', None)
self['text'] = text if text is not None else _v
_v = arg.pop('textsrc', None)
self['textsrc'] = textsrc if textsrc is not None else _v
_v = arg.pop('uid', None)
self['uid'] = uid if uid is not None else _v
_v = arg.pop('uirevision', None)
self['uirevision'] = uirevision if uirevision is not None else _v
_v = arg.pop('unselected', None)
self['unselected'] = unselected if unselected is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
_v = arg.pop('xaxes', None)
self['xaxes'] = xaxes if xaxes is not None else _v
_v = arg.pop('yaxes', None)
self['yaxes'] = yaxes if yaxes is not None else _v
# Read-only literals
# ------------------
from _plotly_utils.basevalidators import LiteralValidator
self._props['type'] = 'splom'
self._validators['type'] = LiteralValidator(
plotly_name='type', parent_name='splom', val='splom'
)
arg.pop('type', None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 36.773866
| 89
| 0.572273
|
from plotly.basedatatypes import BaseTraceType
import copy
class Splom(BaseTraceType):
@property
def customdata(self):
return self['customdata']
@customdata.setter
def customdata(self, val):
self['customdata'] = val
@property
def customdatasrc(self):
return self['customdatasrc']
@customdatasrc.setter
def customdatasrc(self, val):
self['customdatasrc'] = val
@property
def diagonal(self):
return self['diagonal']
@diagonal.setter
def diagonal(self, val):
self['diagonal'] = val
@property
def dimensions(self):
return self['dimensions']
@dimensions.setter
def dimensions(self, val):
self['dimensions'] = val
@property
def dimensiondefaults(self):
return self['dimensiondefaults']
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self['dimensiondefaults'] = val
@property
def hoverinfo(self):
return self['hoverinfo']
@hoverinfo.setter
def hoverinfo(self, val):
self['hoverinfo'] = val
@property
def hoverinfosrc(self):
return self['hoverinfosrc']
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self['hoverinfosrc'] = val
@property
def hoverlabel(self):
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
@property
def hovertemplate(self):
return self['hovertemplate']
@hovertemplate.setter
def hovertemplate(self, val):
self['hovertemplate'] = val
@property
def hovertemplatesrc(self):
return self['hovertemplatesrc']
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self['hovertemplatesrc'] = val
@property
def hovertext(self):
return self['hovertext']
@hovertext.setter
def hovertext(self, val):
self['hovertext'] = val
@property
def hovertextsrc(self):
return self['hovertextsrc']
@hovertextsrc.setter
def hovertextsrc(self, val):
self['hovertextsrc'] = val
@property
def ids(self):
return self['ids']
@ids.setter
def ids(self, val):
self['ids'] = val
@property
def idssrc(self):
return self['idssrc']
@idssrc.setter
def idssrc(self, val):
self['idssrc'] = val
@property
def legendgroup(self):
return self['legendgroup']
@legendgroup.setter
def legendgroup(self, val):
self['legendgroup'] = val
@property
def marker(self):
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
@property
def name(self):
return self['name']
@name.setter
def name(self, val):
self['name'] = val
@property
def opacity(self):
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
@property
def selected(self):
return self['selected']
@selected.setter
def selected(self, val):
self['selected'] = val
@property
def selectedpoints(self):
return self['selectedpoints']
@selectedpoints.setter
def selectedpoints(self, val):
self['selectedpoints'] = val
@property
def showlegend(self):
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
@property
def showlowerhalf(self):
return self['showlowerhalf']
@showlowerhalf.setter
def showlowerhalf(self, val):
self['showlowerhalf'] = val
@property
def showupperhalf(self):
return self['showupperhalf']
@showupperhalf.setter
def showupperhalf(self, val):
self['showupperhalf'] = val
@property
def stream(self):
return self['stream']
@stream.setter
def stream(self, val):
self['stream'] = val
@property
def text(self):
return self['text']
@text.setter
def text(self, val):
self['text'] = val
@property
def textsrc(self):
return self['textsrc']
@textsrc.setter
def textsrc(self, val):
self['textsrc'] = val
@property
def uid(self):
return self['uid']
@uid.setter
def uid(self, val):
self['uid'] = val
@property
def uirevision(self):
return self['uirevision']
@uirevision.setter
def uirevision(self, val):
self['uirevision'] = val
@property
def unselected(self):
return self['unselected']
@unselected.setter
def unselected(self, val):
self['unselected'] = val
@property
def visible(self):
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
@property
def xaxes(self):
return self['xaxes']
@xaxes.setter
def xaxes(self, val):
self['xaxes'] = val
@property
def yaxes(self):
return self['yaxes']
@yaxes.setter
def yaxes(self, val):
self['yaxes'] = val
@property
def type(self):
return self._props['type']
@property
def _parent_path_str(self):
return ''
@property
def _prop_descriptions(self):
return """\
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
diagonal
plotly.graph_objs.splom.Diagonal instance or dict with
compatible properties
dimensions
plotly.graph_objs.splom.Dimension instance or dict with
compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the
default property values to use for elements of
splom.dimensions
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.splom.Hoverlabel instance or dict
with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". See https://github.com/d3/d3-format
/blob/master/README.md#locale_format for details on the
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plot.ly/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified
per-point (the ones that are `arrayOk: true`) are
available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>".
hovertemplatesrc
Sets the source reference on plot.ly for hovertemplate
.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
plotly.graph_objs.splom.Marker instance or dict with
compatible properties
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.splom.Selected instance or dict with
compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower half
from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper half
from the diagonal are displayed.
stream
plotly.graph_objs.splom.Stream instance or dict with
compatible properties
text
Sets text elements associated with each (x,y) pair to
appear on hover. If a single string, the same string
appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
plotly.graph_objs.splom.Unselected instance or dict
with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxes
Sets the list of x axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N xaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
yaxes
Sets the list of y axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N yaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
"""
def __init__(
self,
arg=None,
customdata=None,
customdatasrc=None,
diagonal=None,
dimensions=None,
dimensiondefaults=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
marker=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
showlowerhalf=None,
showupperhalf=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
xaxes=None,
yaxes=None,
**kwargs
):
super(Splom, self).__init__('splom')
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Splom
constructor must be a dict or
an instance of plotly.graph_objs.Splom"""
)
self._skip_invalid = kwargs.pop('skip_invalid', False)
from plotly.validators import (splom as v_splom)
self._validators['customdata'] = v_splom.CustomdataValidator()
self._validators['customdatasrc'] = v_splom.CustomdatasrcValidator()
self._validators['diagonal'] = v_splom.DiagonalValidator()
self._validators['dimensions'] = v_splom.DimensionsValidator()
self._validators['dimensiondefaults'] = v_splom.DimensionValidator()
self._validators['hoverinfo'] = v_splom.HoverinfoValidator()
self._validators['hoverinfosrc'] = v_splom.HoverinfosrcValidator()
self._validators['hoverlabel'] = v_splom.HoverlabelValidator()
self._validators['hovertemplate'] = v_splom.HovertemplateValidator()
self._validators['hovertemplatesrc'
] = v_splom.HovertemplatesrcValidator()
self._validators['hovertext'] = v_splom.HovertextValidator()
self._validators['hovertextsrc'] = v_splom.HovertextsrcValidator()
self._validators['ids'] = v_splom.IdsValidator()
self._validators['idssrc'] = v_splom.IdssrcValidator()
self._validators['legendgroup'] = v_splom.LegendgroupValidator()
self._validators['marker'] = v_splom.MarkerValidator()
self._validators['name'] = v_splom.NameValidator()
self._validators['opacity'] = v_splom.OpacityValidator()
self._validators['selected'] = v_splom.SelectedValidator()
self._validators['selectedpoints'] = v_splom.SelectedpointsValidator()
self._validators['showlegend'] = v_splom.ShowlegendValidator()
self._validators['showlowerhalf'] = v_splom.ShowlowerhalfValidator()
self._validators['showupperhalf'] = v_splom.ShowupperhalfValidator()
self._validators['stream'] = v_splom.StreamValidator()
self._validators['text'] = v_splom.TextValidator()
self._validators['textsrc'] = v_splom.TextsrcValidator()
self._validators['uid'] = v_splom.UidValidator()
self._validators['uirevision'] = v_splom.UirevisionValidator()
self._validators['unselected'] = v_splom.UnselectedValidator()
self._validators['visible'] = v_splom.VisibleValidator()
self._validators['xaxes'] = v_splom.XaxesValidator()
self._validators['yaxes'] = v_splom.YaxesValidator()
_v = arg.pop('customdata', None)
self['customdata'] = customdata if customdata is not None else _v
_v = arg.pop('customdatasrc', None)
self['customdatasrc'
] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop('diagonal', None)
self['diagonal'] = diagonal if diagonal is not None else _v
_v = arg.pop('dimensions', None)
self['dimensions'] = dimensions if dimensions is not None else _v
_v = arg.pop('dimensiondefaults', None)
self['dimensiondefaults'
] = dimensiondefaults if dimensiondefaults is not None else _v
_v = arg.pop('hoverinfo', None)
self['hoverinfo'] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop('hoverinfosrc', None)
self['hoverinfosrc'] = hoverinfosrc if hoverinfosrc is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('hovertemplate', None)
self['hovertemplate'
] = hovertemplate if hovertemplate is not None else _v
_v = arg.pop('hovertemplatesrc', None)
self['hovertemplatesrc'
] = hovertemplatesrc if hovertemplatesrc is not None else _v
_v = arg.pop('hovertext', None)
self['hovertext'] = hovertext if hovertext is not None else _v
_v = arg.pop('hovertextsrc', None)
self['hovertextsrc'] = hovertextsrc if hovertextsrc is not None else _v
_v = arg.pop('ids', None)
self['ids'] = ids if ids is not None else _v
_v = arg.pop('idssrc', None)
self['idssrc'] = idssrc if idssrc is not None else _v
_v = arg.pop('legendgroup', None)
self['legendgroup'] = legendgroup if legendgroup is not None else _v
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('selected', None)
self['selected'] = selected if selected is not None else _v
_v = arg.pop('selectedpoints', None)
self['selectedpoints'
] = selectedpoints if selectedpoints is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('showlowerhalf', None)
self['showlowerhalf'
] = showlowerhalf if showlowerhalf is not None else _v
_v = arg.pop('showupperhalf', None)
self['showupperhalf'
] = showupperhalf if showupperhalf is not None else _v
_v = arg.pop('stream', None)
self['stream'] = stream if stream is not None else _v
_v = arg.pop('text', None)
self['text'] = text if text is not None else _v
_v = arg.pop('textsrc', None)
self['textsrc'] = textsrc if textsrc is not None else _v
_v = arg.pop('uid', None)
self['uid'] = uid if uid is not None else _v
_v = arg.pop('uirevision', None)
self['uirevision'] = uirevision if uirevision is not None else _v
_v = arg.pop('unselected', None)
self['unselected'] = unselected if unselected is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
_v = arg.pop('xaxes', None)
self['xaxes'] = xaxes if xaxes is not None else _v
_v = arg.pop('yaxes', None)
self['yaxes'] = yaxes if yaxes is not None else _v
from _plotly_utils.basevalidators import LiteralValidator
self._props['type'] = 'splom'
self._validators['type'] = LiteralValidator(
plotly_name='type', parent_name='splom', val='splom'
)
arg.pop('type', None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| true
| true
|
7907e7aa9cfdf06580aff6881f0cf146bb88eecb
| 628
|
py
|
Python
|
Part 1/Chapter 7/exercise_7.6.py
|
kg55555/pypractice
|
1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef
|
[
"MIT"
] | null | null | null |
Part 1/Chapter 7/exercise_7.6.py
|
kg55555/pypractice
|
1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef
|
[
"MIT"
] | null | null | null |
Part 1/Chapter 7/exercise_7.6.py
|
kg55555/pypractice
|
1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef
|
[
"MIT"
] | null | null | null |
repeat_age = 0
while repeat_age < 3:
age = int(
input(f"Check your movie ticket price by typing your age below. You may check {3 - repeat_age} more times\n"))
if age < 3:
print("Your ticket is free!")
elif 3 <= age <= 12:
print("Your ticket costs $10")
elif age > 12:
print("Your ticket costs $15")
if repeat_age == 2:
break
check = input("Would you like to check another ticket price? Type 'quit' to exit this program\n")
if check == 'quit':
break
repeat_age += 1
print(f"Thank you for using our service! You have checked {repeat_age + 1} times")
| 34.888889
| 118
| 0.61465
|
repeat_age = 0
while repeat_age < 3:
age = int(
input(f"Check your movie ticket price by typing your age below. You may check {3 - repeat_age} more times\n"))
if age < 3:
print("Your ticket is free!")
elif 3 <= age <= 12:
print("Your ticket costs $10")
elif age > 12:
print("Your ticket costs $15")
if repeat_age == 2:
break
check = input("Would you like to check another ticket price? Type 'quit' to exit this program\n")
if check == 'quit':
break
repeat_age += 1
print(f"Thank you for using our service! You have checked {repeat_age + 1} times")
| true
| true
|
7907e86fa266d45c6b06d853531bbb74f1ff95d1
| 1,735
|
py
|
Python
|
fastv8/doc/_extensions/backports.py
|
gantech/fastv8DriverProgram
|
565b0f8f6b019a112d7b35f9d841a6af04cb6cce
|
[
"Apache-2.0"
] | null | null | null |
fastv8/doc/_extensions/backports.py
|
gantech/fastv8DriverProgram
|
565b0f8f6b019a112d7b35f9d841a6af04cb6cce
|
[
"Apache-2.0"
] | null | null | null |
fastv8/doc/_extensions/backports.py
|
gantech/fastv8DriverProgram
|
565b0f8f6b019a112d7b35f9d841a6af04cb6cce
|
[
"Apache-2.0"
] | 5
|
2018-09-20T08:27:07.000Z
|
2021-06-27T01:15:44.000Z
|
import collections
Set = set
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
From: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
if __name__=="__main__":
a = OrderedSet()
| 22.828947
| 74
| 0.591931
|
import collections
Set = set
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end]
self.map = {}
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear()
if __name__=="__main__":
a = OrderedSet()
| true
| true
|
7907e91bb87fb323f8cc6dcc83cbd1f8983ee6b2
| 695
|
py
|
Python
|
tests/test_user_commands.py
|
TakingItCasual/easymc
|
e0feb3c6e9ea172fc0128561789b965c29ed45ca
|
[
"MIT"
] | null | null | null |
tests/test_user_commands.py
|
TakingItCasual/easymc
|
e0feb3c6e9ea172fc0128561789b965c29ed45ca
|
[
"MIT"
] | 1
|
2021-01-01T11:12:14.000Z
|
2021-01-01T11:12:14.000Z
|
tests/test_user_commands.py
|
TakingItCasual/easymc
|
e0feb3c6e9ea172fc0128561789b965c29ed45ca
|
[
"MIT"
] | null | null | null |
from time import sleep
from ec2mc import __main__
def test_user_commands():
"""test all user commands."""
assert __main__.main([
"user", "create", "ec2mc_test_user", "setup_users", "--default"
]) is not False
sleep(5)
assert __main__.main([
"user", "list"
]) is not False
assert __main__.main([
"user", "set_group", "EC2MC_TEST_USER", "basic_users"
]) is not False
assert __main__.main([
"user", "be", "takingitcasual"
]) is not False
assert __main__.main([
"user", "rotate_key", "Ec2Mc_TeSt_UsEr"
]) is not False
assert __main__.main([
"user", "delete", "eC2mC_tEsT_uSeR"
]) is not False
| 26.730769
| 71
| 0.604317
|
from time import sleep
from ec2mc import __main__
def test_user_commands():
assert __main__.main([
"user", "create", "ec2mc_test_user", "setup_users", "--default"
]) is not False
sleep(5)
assert __main__.main([
"user", "list"
]) is not False
assert __main__.main([
"user", "set_group", "EC2MC_TEST_USER", "basic_users"
]) is not False
assert __main__.main([
"user", "be", "takingitcasual"
]) is not False
assert __main__.main([
"user", "rotate_key", "Ec2Mc_TeSt_UsEr"
]) is not False
assert __main__.main([
"user", "delete", "eC2mC_tEsT_uSeR"
]) is not False
| true
| true
|
7907e9266410cff8eabd178fb2aa50dec1922640
| 693
|
py
|
Python
|
tests/test_logic/test_rhythm/test_Part.py
|
aParthemer/MidiCompose
|
1bed3d47b7b9b484b0ea02ba5e15bf8b51aaf11b
|
[
"MIT"
] | null | null | null |
tests/test_logic/test_rhythm/test_Part.py
|
aParthemer/MidiCompose
|
1bed3d47b7b9b484b0ea02ba5e15bf8b51aaf11b
|
[
"MIT"
] | 7
|
2022-02-01T23:48:46.000Z
|
2022-03-17T02:36:34.000Z
|
tests/test_logic/test_rhythm/test_Part.py
|
aParthemer/MidiCompose
|
1bed3d47b7b9b484b0ea02ba5e15bf8b51aaf11b
|
[
"MIT"
] | null | null | null |
import pytest
from MidiCompose.logic.rhythm.beat import Beat
from MidiCompose.logic.rhythm.measure import Measure
from MidiCompose.logic.rhythm.part import Part
@pytest.fixture
def part_1():
m1 = Measure([Beat([1,2,1,2]),
Beat([1,0,0,1])])
m2 = Measure([Beat([2,2,1,1]),
Beat([2,2,2,2])])
part = Part([m1,m2])
return part
def test_empty_constructor():
p = Part()
assert p.n_measures == 1
assert p.n_beats == 1
assert p.n_note_on == 0
def test_n_note_on(part_1):
assert part_1.n_note_on == 6
def test_iterator(part_1):
for m in part_1:
assert type(m) == Measure
| 19.25
| 53
| 0.590188
|
import pytest
from MidiCompose.logic.rhythm.beat import Beat
from MidiCompose.logic.rhythm.measure import Measure
from MidiCompose.logic.rhythm.part import Part
@pytest.fixture
def part_1():
m1 = Measure([Beat([1,2,1,2]),
Beat([1,0,0,1])])
m2 = Measure([Beat([2,2,1,1]),
Beat([2,2,2,2])])
part = Part([m1,m2])
return part
def test_empty_constructor():
p = Part()
assert p.n_measures == 1
assert p.n_beats == 1
assert p.n_note_on == 0
def test_n_note_on(part_1):
assert part_1.n_note_on == 6
def test_iterator(part_1):
for m in part_1:
assert type(m) == Measure
| true
| true
|
7907ec371b511a265e56c2629766fb2c36188a38
| 3,156
|
py
|
Python
|
amzASINScrapper/amzASINScrapper/settings.py
|
sunil-dhaka/python-webScrappers
|
1f5bd923bd6d3ddce9e209f8d50e08d6b12648ac
|
[
"MIT"
] | null | null | null |
amzASINScrapper/amzASINScrapper/settings.py
|
sunil-dhaka/python-webScrappers
|
1f5bd923bd6d3ddce9e209f8d50e08d6b12648ac
|
[
"MIT"
] | null | null | null |
amzASINScrapper/amzASINScrapper/settings.py
|
sunil-dhaka/python-webScrappers
|
1f5bd923bd6d3ddce9e209f8d50e08d6b12648ac
|
[
"MIT"
] | null | null | null |
# Scrapy settings for amzASINScrapper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amzASINScrapper'
SPIDER_MODULES = ['amzASINScrapper.spiders']
NEWSPIDER_MODULE = 'amzASINScrapper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'amzASINScrapper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amzASINScrapper.middlewares.AmzasinscrapperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'amzASINScrapper.middlewares.AmzasinscrapperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'amzASINScrapper.pipelines.AmzasinscrapperPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.460674
| 103
| 0.78327
|
BOT_NAME = 'amzASINScrapper'
SPIDER_MODULES = ['amzASINScrapper.spiders']
NEWSPIDER_MODULE = 'amzASINScrapper.spiders'
ROBOTSTXT_OBEY = True
| true
| true
|
7907ec6fdb92f0a64458d8e3aa6c8595c39b15a1
| 1,072
|
py
|
Python
|
tests/test_parse_import_time.py
|
Victor333Huesca/importtime-output-wrapper
|
15941ffe30a93a2d5ec1832e16df160caa1d51e4
|
[
"MIT"
] | 1
|
2021-02-10T13:15:47.000Z
|
2021-02-10T13:15:47.000Z
|
tests/test_parse_import_time.py
|
dominikwalk/importtime_output_wrapper
|
67c94371cd92ea66f4dbdd8840cf6120db4160c0
|
[
"MIT"
] | 1
|
2021-09-01T19:25:33.000Z
|
2021-09-01T19:25:33.000Z
|
tests/test_parse_import_time.py
|
dominikwalk/importtime_output_wrapper
|
67c94371cd92ea66f4dbdd8840cf6120db4160c0
|
[
"MIT"
] | null | null | null |
import pytest
from importtime_output_wrapper import Import
from importtime_output_wrapper import parse_import_time
from importtime_output_wrapper import InvalidInput
imp_a0 = Import(name="a0", t_self=4, t_cumu=5, depth=2, childs=[])
imp_a1 = Import(name="a1", t_self=3, t_cumu=4, depth=2, childs=[])
imp_b0 = Import(name="b0", t_self=4, t_cumu=5, depth=2, childs=[])
imp_b1 = Import(name="b1", t_self=3, t_cumu=4, depth=2, childs=[])
imp_b = Import(name="b", t_self=2, t_cumu=3, depth=1, childs=[imp_b0, imp_b1])
imp_a = Import(name="a", t_self=1, t_cumu=2, depth=1, childs=[imp_a0, imp_a1])
root = Import(name="root", t_self=0, t_cumu=0, depth=0, childs=[imp_a, imp_b])
test_tree = [root]
with open("tests/sample_importtime_output") as f:
test_output_string = f.read()
@pytest.mark.parametrize(("test_input", "expected"), ((test_output_string, test_tree),))
def test_parse_std_err(test_input, expected):
assert parse_import_time(test_input) == expected
def test_parse_empty_std_err():
with pytest.raises(InvalidInput):
parse_import_time("")
| 34.580645
| 88
| 0.731343
|
import pytest
from importtime_output_wrapper import Import
from importtime_output_wrapper import parse_import_time
from importtime_output_wrapper import InvalidInput
imp_a0 = Import(name="a0", t_self=4, t_cumu=5, depth=2, childs=[])
imp_a1 = Import(name="a1", t_self=3, t_cumu=4, depth=2, childs=[])
imp_b0 = Import(name="b0", t_self=4, t_cumu=5, depth=2, childs=[])
imp_b1 = Import(name="b1", t_self=3, t_cumu=4, depth=2, childs=[])
imp_b = Import(name="b", t_self=2, t_cumu=3, depth=1, childs=[imp_b0, imp_b1])
imp_a = Import(name="a", t_self=1, t_cumu=2, depth=1, childs=[imp_a0, imp_a1])
root = Import(name="root", t_self=0, t_cumu=0, depth=0, childs=[imp_a, imp_b])
test_tree = [root]
with open("tests/sample_importtime_output") as f:
test_output_string = f.read()
@pytest.mark.parametrize(("test_input", "expected"), ((test_output_string, test_tree),))
def test_parse_std_err(test_input, expected):
assert parse_import_time(test_input) == expected
def test_parse_empty_std_err():
with pytest.raises(InvalidInput):
parse_import_time("")
| true
| true
|
7907ed3159ccaed9f860fc0385957aca72dd15fc
| 4,310
|
py
|
Python
|
contents/2_Q_Learning_maze/maze_env.py
|
zhao-jin/Reinforcement-learning-with-tensorflow
|
a4a816f1570be55016909f703fb1fd1ceae9c5a0
|
[
"MIT"
] | null | null | null |
contents/2_Q_Learning_maze/maze_env.py
|
zhao-jin/Reinforcement-learning-with-tensorflow
|
a4a816f1570be55016909f703fb1fd1ceae9c5a0
|
[
"MIT"
] | null | null | null |
contents/2_Q_Learning_maze/maze_env.py
|
zhao-jin/Reinforcement-learning-with-tensorflow
|
a4a816f1570be55016909f703fb1fd1ceae9c5a0
|
[
"MIT"
] | null | null | null |
"""
Reinforcement learning maze example.
Red rectangle: explorer.
Black rectangles: hells [reward = -1].
Yellow bin circle: paradise [reward = +1].
All other states: ground [reward = 0].
This script is the environment part of this example. The RL is in RL_brain.py.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""
import numpy as np
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
UNIT = 40 # pixels
MAZE_H = 4 # grid height
MAZE_W = 4 # grid width
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
s_ = self.canvas.coords(self.rect) # next state
# reward function
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
#s_ = 'terminal'
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
#s_ = 'terminal'
else:
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.01)
self.update()
def update():
for t in range(10):
s = env.reset()
while True:
env.render()
a = 1
s, r, done = env.step(a)
if done:
break
if __name__ == '__main__':
env = Maze()
env.after(100, update)
env.mainloop()
| 29.121622
| 84
| 0.517633
|
import numpy as np
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
UNIT = 40
MAZE_H = 4
MAZE_W = 4
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
origin = np.array([20, 20])
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0:
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1:
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2:
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3:
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1])
s_ = self.canvas.coords(self.rect)
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
else:
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.01)
self.update()
def update():
for t in range(10):
s = env.reset()
while True:
env.render()
a = 1
s, r, done = env.step(a)
if done:
break
if __name__ == '__main__':
env = Maze()
env.after(100, update)
env.mainloop()
| true
| true
|
7907eda9ab081c94bfbf0706c3bbf2a82b8b0777
| 52,414
|
py
|
Python
|
vendors/rez-2.23.1-py2.7/rez/vendor/memcache/memcache.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | 4
|
2019-01-11T03:41:28.000Z
|
2019-09-12T06:57:17.000Z
|
vendors/rez-2.23.1-py2.7/rez/vendor/memcache/memcache.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | null | null | null |
vendors/rez-2.23.1-py2.7/rez/vendor/memcache/memcache.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | 2
|
2019-01-10T05:00:18.000Z
|
2020-02-15T16:32:56.000Z
|
#!/usr/bin/env python
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(key, obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
import os
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from binascii import crc32 # zlib version is not cross-platform
def cmemcache_hash(key):
return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1)
serverHashFunction = cmemcache_hash
def useOldServerHashFunction():
"""Use the old python-memcache server hash function."""
global serverHashFunction
serverHashFunction = crc32
try:
from zlib import compress, decompress
_supports_compress = True
except ImportError:
_supports_compress = False
# quickly define a decompress just in case we recv compressed data.
def decompress(val):
raise _Error("received compressed data but I don't support compression (import error)")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
valid_key_chars_re = re.compile('[\x21-\x7e\x80-\xff]+$')
# Original author: Evan Martin of Danga Interactive
__author__ = "Sean Reifschneider <jafo-memcached@tummy.com>"
__version__ = "1.53"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
# http://en.wikipedia.org/wiki/Python_Software_Foundation_License
__license__ = "Python Software Foundation License"
SERVER_MAX_KEY_LENGTH = 250
# Storing values larger than 1MB requires recompiling memcached. If you do,
# this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N"
# after importing this module.
SERVER_MAX_VALUE_LENGTH = 1024*1024
class _Error(Exception):
pass
class _ConnectionDeadError(Exception):
pass
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# TODO: add the pure-python local implementation
class local(object):
pass
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
_SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
class Client(local):
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace, set_multi
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete, delete_multi
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
"""
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_FLAG_COMPRESSED = 1<<3
_SERVER_RETRIES = 10 # how many times to try finding a free server.
# exceptions for Client
class MemcachedKeyError(Exception):
pass
class MemcachedKeyLengthError(MemcachedKeyError):
pass
class MemcachedKeyCharacterError(MemcachedKeyError):
pass
class MemcachedKeyNoneError(MemcachedKeyError):
pass
class MemcachedKeyTypeError(MemcachedKeyError):
pass
class MemcachedStringEncodingError(Exception):
pass
def __init__(self, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
server_max_key_length=SERVER_MAX_KEY_LENGTH,
server_max_value_length=SERVER_MAX_VALUE_LENGTH,
dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT,
cache_cas = False, flush_on_reconnect=0, check_keys=True):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
@param pickleProtocol: number to mandate protocol used by (c)Pickle.
@param pickler: optional override of default Pickler to allow subclassing.
@param unpickler: optional override of default Unpickler to allow subclassing.
@param pload: optional persistent_load function to call on pickle loading.
Useful for cPickle since subclassing isn't allowed.
@param pid: optional persistent_id function to call on pickle storing.
Useful for cPickle since subclassing isn't allowed.
@param dead_retry: number of seconds before retrying a blacklisted
server. Default to 30 s.
@param socket_timeout: timeout in seconds for all calls to a server. Defaults
to 3 seconds.
@param cache_cas: (default False) If true, cas operations will be
cached. WARNING: This cache is not expired internally, if you have
a long-running process you will need to expire it manually via
client.reset_cas(), or the cache can grow unlimited.
@param server_max_key_length: (default SERVER_MAX_KEY_LENGTH)
Data that is larger than this will not be sent to the server.
@param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH)
Data that is larger than this will not be sent to the server.
@param flush_on_reconnect: optional flag which prevents a scenario that
can cause stale data to be read: If there's more than one memcached
server and the connection to one is interrupted, keys that mapped to
that server will get reassigned to another. If the first server comes
back, those keys will map to it again. If it still has its data, get()s
can read stale data that was overwritten on another server. This flag
is off by default for backwards compatibility.
@param check_keys: (default True) If True, the key is checked to
ensure it is the correct length and composed of the right characters.
"""
local.__init__(self)
self.debug = debug
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.flush_on_reconnect = flush_on_reconnect
self.set_servers(servers)
self.stats = {}
self.cache_cas = cache_cas
self.reset_cas()
self.do_check_key = check_keys
# Allow users to modify pickling/unpickling behavior
self.pickleProtocol = pickleProtocol
self.pickler = pickler
self.unpickler = unpickler
self.persistent_load = pload
self.persistent_id = pid
self.server_max_key_length = server_max_key_length
self.server_max_value_length = server_max_value_length
# figure out the pickler style
file = StringIO()
try:
pickler = self.pickler(file, protocol = self.pickleProtocol)
self.picklerIsKeyword = True
except TypeError:
self.picklerIsKeyword = False
def reset_cas(self):
"""
Reset the cas cache. This is only used if the Client() object
was created with "cache_cas=True". If used, this cache does not
expire internally, so it can grow unbounded if you do not clear it
yourself.
"""
self.cas_ids = {}
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,
socket_timeout=self.socket_timeout,
flush_on_reconnect=self.flush_on_reconnect)
for s in servers]
self._init_buckets()
def get_stats(self, stat_args = None):
'''Get statistics from each of the servers.
@param stat_args: Additional arguments to pass to the memcache
"stats" command.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
elif s.family == socket.AF_INET6:
name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
if not stat_args:
s.send_cmd('stats')
else:
s.send_cmd('stats ' + stat_args)
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while 1:
line = readline()
if not line or line.strip() in ('END', 'RESET'):
break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def get_slabs(self):
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
elif s.family == socket.AF_INET6:
name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
serverData = {}
data.append(( name, serverData ))
s.send_cmd('stats items')
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
item = line.split(' ', 2)
#0 = STAT, 1 = ITEM, 2 = Value
slab = item[1].split(':', 2)
#0 = items, 1 = Slab #, 2 = Name
if slab[1] not in serverData:
serverData[slab[1]] = {}
serverData[slab[1]][slab[2]] = item[2]
return data
def flush_all(self):
"""Expire all data in memcache servers that are reachable."""
for s in self.servers:
if not s.connect(): continue
s.flush()
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if func not in self.stats:
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.deaduntil = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if isinstance(key, tuple):
serverhash, key = key
else:
serverhash = serverHashFunction(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
#print "(using server %s)" % server,
return server, key
serverhash = serverHashFunction(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete_multi(self, keys, time=0, key_prefix=''):
'''
Delete multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
>>> mc.delete_multi(['key1', 'key2'])
1
>>> mc.get_multi(['key1', 'key2']) == {}
1
This method is recommended over iterated regular L{delete}s as it reduces total latency, since
your app doesn't have to wait for each round-trip of L{delete} before sending
the next one.
@param keys: An iterable of keys to clear
@param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
@param key_prefix: Optional string to prepend to each key when sending to memcache.
See docs for L{get_multi} and L{set_multi}.
@return: 1 if no failure in communication with any memcacheds.
@rtype: int
'''
self._statlog('delete_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
rc = 1
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
if time != None:
for key in server_keys[server]: # These are mangled keys
write("delete %s %d\r\n" % (key, time))
else:
for key in server_keys[server]: # These are mangled keys
write("delete %s\r\n" % key)
try:
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
rc = 0
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
for server, keys in server_keys.iteritems():
try:
for key in keys:
server.expect("DELETED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
rc = 0
return rc
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@param time: number of seconds any subsequent set / update commands
should fail. Defaults to None for no delay.
@rtype: int
'''
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None and time != 0:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
line = server.readline()
if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1
self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s'
% repr(line))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value
for C{key} by C{delta}, or by 1 if C{delta} is unspecified.
Returns None if C{key} doesn't exist on server, otherwise it
returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache,
and it must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing or None on error.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
if line == None or line.strip() =='NOT_FOUND': return None
return int(line)
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
def add(self, key, val, time = 0, min_compress_len = 0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn't already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time, min_compress_len)
def append(self, key, val, time=0, min_compress_len=0):
'''Append the value to the end of the existing key's value.
Only stores in memcache if key already exists.
Also see L{prepend}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("append", key, val, time, min_compress_len)
def prepend(self, key, val, time=0, min_compress_len=0):
'''Prepend the value to the beginning of the existing key's value.
Only stores in memcache if key already exists.
Also see L{append}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("prepend", key, val, time, min_compress_len)
def replace(self, key, val, time=0, min_compress_len=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time, min_compress_len)
def set(self, key, val, time=0, min_compress_len=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
'''
return self._set("set", key, val, time, min_compress_len)
def cas(self, key, val, time=0, min_compress_len=0):
'''Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len)
def _map_and_prefix_keys(self, key_iterable, key_prefix):
"""Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
prefixed key -> original key.
"""
# Check it just once ...
key_extra_len=len(key_prefix)
if key_prefix and self.do_check_key:
self.check_key(key_prefix)
# server (_Host) -> list of unprefixed server keys in mapping
server_keys = {}
prefixed_to_orig_key = {}
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if isinstance(orig_key, tuple):
# Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
# Ensure call to _get_server gets a Tuple as well.
str_orig_key = str(orig_key[1])
server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
else:
str_orig_key = str(orig_key) # set_multi supports int / long keys.
server, key = self._get_server(key_prefix + str_orig_key)
# Now check to make sure key length is proper ...
if self.do_check_key:
self.check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
prefixed_to_orig_key[key] = orig_key
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
'''
Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
This method is recommended over regular L{set} as it lowers the
number of total packets flying around your network, reducing
total latency, since your app doesn't have to wait for each
round-trip of L{set} before sending the next one.
@param mapping: A dict of key/value pairs to set.
@param time: Tells memcached the time which this value should
expire, either as a delta number of seconds, or an absolute
unix time-since-the-epoch value. See the memcached protocol
docs section "Storage Commands" for more info on <exptime>. We
default to 0 == cache forever.
@param key_prefix: Optional string to prepend to each key when
sending to memcache. Allows you to efficiently stuff these
keys into a pseudo-namespace in memcache:
>>> notset_keys = mc.set_multi(
... {'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
>>> len(notset_keys) == 0
True
>>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
True
Causes key 'subspace_key1' and 'subspace_key2' to be
set. Useful in conjunction with a higher-level layer which
applies namespaces to data in memcache. In this case, the
return result would be the list of notset original keys,
prefix not applied.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress()
routine. If the value being cached is a string, then
the length of the string is measured, else if the value
is an object, then the length of the pickle result is
measured. If the resulting attempt at compression yeilds
a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
@return: List of keys which failed to be stored [ memcache out of
memory, etc. ].
@rtype: list
'''
self._statlog('set_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
mapping.iterkeys(), key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
notstored = [] # original keys.
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(
mapping[prefixed_to_orig_key[key]],
min_compress_len)
if store_info:
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0],
time, store_info[1], store_info[2]))
else:
notstored.append(prefixed_to_orig_key[key])
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
# short-circuit if there are no servers, just return all keys
if not server_keys: return(mapping.keys())
for server, keys in server_keys.iteritems():
try:
for key in keys:
if server.readline() == 'STORED':
continue
else:
notstored.append(prefixed_to_orig_key[key]) #un-mangle.
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
"""
Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
"""
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
file = StringIO()
if self.picklerIsKeyword:
pickler = self.pickler(file, protocol = self.pickleProtocol)
else:
pickler = self.pickler(file, self.pickleProtocol)
if self.persistent_id:
pickler.persistent_id = self.persistent_id
pickler.dump(val)
val = file.getvalue()
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could
# import zlib and this string is longer than our min threshold.
if min_compress_len and _supports_compress and lv > min_compress_len:
comp_val = compress(val)
# Only retain the result if the compression result is smaller
# than the original.
if len(comp_val) < lv:
flags |= Client._FLAG_COMPRESSED
val = comp_val
# silently do not store if value length exceeds maximum
if self.server_max_value_length != 0 and \
len(val) > self.server_max_value_length: return(0)
return (flags, len(val), val)
def _set(self, cmd, key, val, time, min_compress_len = 0):
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
def _unsafe_set():
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info: return(0)
if cmd == 'cas':
if key not in self.cas_ids:
return self._set('set', key, val, time, min_compress_len)
fullcmd = "%s %s %d %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1],
self.cas_ids[key], store_info[2])
else:
fullcmd = "%s %s %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
server.send_cmd(fullcmd)
return(server.expect("STORED", raise_exception=True)
== "STORED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
try:
return _unsafe_set()
except _ConnectionDeadError:
# retry once
try:
if server._get_socket():
return _unsafe_set()
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return 0
def _get(self, cmd, key):
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
def _unsafe_get():
self._statlog(cmd)
try:
server.send_cmd("%s %s" % (cmd, key))
rkey = flags = rlen = cas_id = None
if cmd == 'gets':
rkey, flags, rlen, cas_id, = self._expect_cas_value(server,
raise_exception=True)
if rkey and self.cache_cas:
self.cas_ids[rkey] = cas_id
else:
rkey, flags, rlen, = self._expectvalue(server,
raise_exception=True)
if not rkey:
return None
try:
value = self._recv_value(server, flags, rlen)
finally:
server.expect("END", raise_exception=True)
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
return value
try:
return _unsafe_get()
except _ConnectionDeadError:
# retry once
try:
if server.connect():
return _unsafe_get()
return None
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return None
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
return self._get('get', key)
def gets(self, key):
'''Retrieves a key from the memcache. Used in conjunction with 'cas'.
@return: The value or None.
'''
return self._get('gets', key)
def get_multi(self, keys, key_prefix=''):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
>>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
>>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
1
>>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{get} before sending
the next one.
See also L{set_multi}.
@param keys: An array of keys.
@param key_prefix: A string to prefix each key when we communicate with memcache.
Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
@return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
'''
self._statlog('get_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.iterkeys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
line = server.readline()
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return retvals
def _expect_cas_value(self, server, line=None, raise_exception=False):
if not line:
line = server.readline(raise_exception)
if line and line[:5] == 'VALUE':
resp, rkey, flags, len, cas_id = line.split()
return (rkey, int(flags), int(len), int(cas_id))
else:
return (None, None, None, None)
def _expectvalue(self, server, line=None, raise_exception=False):
if not line:
line = server.readline(raise_exception)
if line and line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d"
% (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags & Client._FLAG_COMPRESSED:
buf = decompress(buf)
if flags == 0 or flags == Client._FLAG_COMPRESSED:
# Either a bare string or a compressed string now decompressed...
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif flags & Client._FLAG_PICKLE:
try:
file = StringIO(buf)
unpickler = self.unpickler(file)
if self.persistent_load:
unpickler.persistent_load = self.persistent_load
val = unpickler.load()
except Exception, e:
self.debuglog('Pickle error: %s\n' % e)
return None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
def check_key(self, key, key_extra_len=0):
"""Checks sanity of key. Fails if:
Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
Contains control characters (Raises MemcachedKeyCharacterError).
Is not a string (Raises MemcachedStringEncodingError)
Is an unicode string (Raises MemcachedStringEncodingError)
Is not a string (Raises MemcachedKeyError)
Is None (Raises MemcachedKeyError)
"""
if isinstance(key, tuple): key = key[1]
if not key:
raise Client.MemcachedKeyNoneError("Key is None")
if isinstance(key, unicode):
raise Client.MemcachedStringEncodingError(
"Keys must be str()'s, not unicode. Convert your unicode "
"strings using mystring.encode(charset)!")
if not isinstance(key, str):
raise Client.MemcachedKeyTypeError("Key must be str()'s")
if isinstance(key, basestring):
if self.server_max_key_length != 0 and \
len(key) + key_extra_len > self.server_max_key_length:
raise Client.MemcachedKeyLengthError("Key length is > %s"
% self.server_max_key_length)
if not valid_key_chars_re.match(key):
raise Client.MemcachedKeyCharacterError(
"Control characters not allowed")
class _Host(object):
def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY,
socket_timeout=_SOCKET_TIMEOUT, flush_on_reconnect=0):
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.debug = debug
self.flush_on_reconnect = flush_on_reconnect
if isinstance(host, tuple):
host, self.weight = host
else:
self.weight = 1
# parse the connection string
m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
if not m:
m = re.match(r'^(?P<proto>inet6):'
r'\[(?P<host>[^\[\]]+)\](:(?P<port>[0-9]+))?$', host)
if not m:
m = re.match(r'^(?P<proto>inet):'
r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m: m = re.match(r'^(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m:
raise ValueError('Unable to parse connection string: "%s"' % host)
hostData = m.groupdict()
if hostData.get('proto') == 'unix':
self.family = socket.AF_UNIX
self.address = hostData['path']
elif hostData.get('proto') == 'inet6':
self.family = socket.AF_INET6
self.ip = hostData['host']
self.port = int(hostData.get('port') or 11211)
self.address = ( self.ip, self.port )
else:
self.family = socket.AF_INET
self.ip = hostData['host']
self.port = int(hostData.get('port') or 11211)
self.address = ( self.ip, self.port )
self.deaduntil = 0
self.socket = None
self.flush_on_next_connect = 0
self.buffer = ''
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + self.dead_retry
if self.flush_on_reconnect:
self.flush_on_next_connect = 1
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(self.family, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'): s.settimeout(self.socket_timeout)
try:
s.connect(self.address)
except socket.timeout, msg:
self.mark_dead("connect: %s" % msg)
return None
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
self.buffer = ''
if self.flush_on_next_connect:
self.flush()
self.flush_on_next_connect = 0
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
self.socket.sendall(cmd + '\r\n')
def send_cmds(self, cmds):
""" cmds already has trailing \r\n's applied """
self.socket.sendall(cmds)
def readline(self, raise_exception=False):
"""Read a line and return it. If "raise_exception" is set,
raise _ConnectionDeadError if the read fails, otherwise return
an empty string.
"""
buf = self.buffer
if self.socket:
recv = self.socket.recv
else:
recv = lambda bufsize: ''
while True:
index = buf.find('\r\n')
if index >= 0:
break
data = recv(4096)
if not data:
# connection close, let's kill it and raise
self.mark_dead('connection closed in readline()')
if raise_exception:
raise _ConnectionDeadError()
else:
return ''
buf += data
self.buffer = buf[index+2:]
return buf[:index]
def expect(self, text, raise_exception=False):
line = self.readline(raise_exception)
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'"
% (text, line))
return line
def recv(self, rlen):
self_socket_recv = self.socket.recv
buf = self.buffer
while len(buf) < rlen:
foo = self_socket_recv(max(rlen - len(buf), 4096))
buf += foo
if not foo:
raise _Error( 'Read %d bytes, expecting %d, '
'read returned 0 length bytes' % ( len(buf), rlen ))
self.buffer = buf[rlen:]
return buf[:rlen]
def flush(self):
self.send_cmd('flush_all')
self.expect('OK')
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
if self.family == socket.AF_INET:
return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
elif self.family == socket.AF_INET6:
return "inet6:[%s]:%d%s" % (self.address[0], self.address[1], d)
else:
return "unix:%s%s" % (self.address, d)
def _doctest():
import doctest, memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(memcache, globs=globs)
if __name__ == "__main__":
failures = 0
print "Testing docstrings..."
_doctest()
print "Running tests:"
print
serverList = [["127.0.0.1:11211"]]
if '--do-unix' in sys.argv:
serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
for servers in serverList:
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, basestring):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
global failures
print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print "OK"
return 1
else:
print "FAIL"; failures = failures + 1
return 0
class FooStruct(object):
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print "Testing delete ...",
if mc.delete("long"):
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Checking results of delete ..."
if mc.get("long") == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing get_multi ...",
print mc.get_multi(["a_string", "an_integer"])
# removed from the protocol
#if test_setget("timed_delete", 'foo'):
# print "Testing timed delete ...",
# if mc.delete("timed_delete", 1):
# print "OK"
# else:
# print "FAIL"; failures = failures + 1
# print "Checking results of timed delete ..."
# if mc.get("timed_delete") == None:
# print "OK"
# else:
# print "FAIL"; failures = failures + 1
print "Testing get(unknown value) ...",
print to_s(mc.get("unknown_value"))
f = FooStruct()
test_setget("foostruct", f)
print "Testing incr ...",
x = mc.incr("an_integer", 1)
if x == 43:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing decr ...",
x = mc.decr("an_integer", 1)
if x == 42:
print "OK"
else:
print "FAIL"; failures = failures + 1
sys.stdout.flush()
# sanity tests
print "Testing sending spaces...",
sys.stdout.flush()
try:
x = mc.set("this has spaces", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending control characters...",
try:
x = mc.set("this\x10has\x11control characters\x02", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using insanely long key...",
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH, 1)
except Client.MemcachedKeyLengthError, msg:
print "FAIL"; failures = failures + 1
else:
print "OK"
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'a', 1)
except Client.MemcachedKeyLengthError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending a unicode-string key...",
try:
x = mc.set(unicode('keyhere'), 1)
except Client.MemcachedStringEncodingError, msg:
print "OK",
else:
print "FAIL",; failures = failures + 1
try:
x = mc.set((unicode('a')*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except:
print "FAIL",; failures = failures + 1
else:
print "OK",
import pickle
s = pickle.loads('V\\u4f1a\np0\n.')
try:
x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except Client.MemcachedKeyLengthError:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using a value larger than the memcached value limit..."
print 'NOTE: "MemCached: while expecting[...]" is normal...'
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
if mc.get('keyhere') == None:
print "OK",
else:
print "FAIL",; failures = failures + 1
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
if mc.get('keyhere') == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing set_multi() with no memcacheds running",
mc.disconnect_all()
errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
if errors != []:
print "FAIL"; failures = failures + 1
else:
print "OK"
print "Testing delete_multi() with no memcacheds running",
mc.disconnect_all()
ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
if ret != 1:
print "FAIL"; failures = failures + 1
else:
print "OK"
if failures > 0:
print '*** THERE WERE FAILED TESTS'
sys.exit(1)
sys.exit(0)
# vim: ts=4 sw=4 et :
| 36.93728
| 164
| 0.571946
|
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(key, obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
import os
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from binascii import crc32
def cmemcache_hash(key):
return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1)
serverHashFunction = cmemcache_hash
def useOldServerHashFunction():
"""Use the old python-memcache server hash function."""
global serverHashFunction
serverHashFunction = crc32
try:
from zlib import compress, decompress
_supports_compress = True
except ImportError:
_supports_compress = False
def decompress(val):
raise _Error("received compressed data but I don't support compression (import error)")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
valid_key_chars_re = re.compile('[\x21-\x7e\x80-\xff]+$')
# Original author: Evan Martin of Danga Interactive
__author__ = "Sean Reifschneider <jafo-memcached@tummy.com>"
__version__ = "1.53"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
# http://en.wikipedia.org/wiki/Python_Software_Foundation_License
__license__ = "Python Software Foundation License"
SERVER_MAX_KEY_LENGTH = 250
# Storing values larger than 1MB requires recompiling memcached. If you do,
# this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N"
# after importing this module.
SERVER_MAX_VALUE_LENGTH = 1024*1024
class _Error(Exception):
pass
class _ConnectionDeadError(Exception):
pass
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# TODO: add the pure-python local implementation
class local(object):
pass
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
_SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
class Client(local):
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace, set_multi
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete, delete_multi
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
"""
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_FLAG_COMPRESSED = 1<<3
_SERVER_RETRIES = 10 # how many times to try finding a free server.
# exceptions for Client
class MemcachedKeyError(Exception):
pass
class MemcachedKeyLengthError(MemcachedKeyError):
pass
class MemcachedKeyCharacterError(MemcachedKeyError):
pass
class MemcachedKeyNoneError(MemcachedKeyError):
pass
class MemcachedKeyTypeError(MemcachedKeyError):
pass
class MemcachedStringEncodingError(Exception):
pass
def __init__(self, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
server_max_key_length=SERVER_MAX_KEY_LENGTH,
server_max_value_length=SERVER_MAX_VALUE_LENGTH,
dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT,
cache_cas = False, flush_on_reconnect=0, check_keys=True):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
@param pickleProtocol: number to mandate protocol used by (c)Pickle.
@param pickler: optional override of default Pickler to allow subclassing.
@param unpickler: optional override of default Unpickler to allow subclassing.
@param pload: optional persistent_load function to call on pickle loading.
Useful for cPickle since subclassing isn't allowed.
@param pid: optional persistent_id function to call on pickle storing.
Useful for cPickle since subclassing isn't allowed.
@param dead_retry: number of seconds before retrying a blacklisted
server. Default to 30 s.
@param socket_timeout: timeout in seconds for all calls to a server. Defaults
to 3 seconds.
@param cache_cas: (default False) If true, cas operations will be
cached. WARNING: This cache is not expired internally, if you have
a long-running process you will need to expire it manually via
client.reset_cas(), or the cache can grow unlimited.
@param server_max_key_length: (default SERVER_MAX_KEY_LENGTH)
Data that is larger than this will not be sent to the server.
@param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH)
Data that is larger than this will not be sent to the server.
@param flush_on_reconnect: optional flag which prevents a scenario that
can cause stale data to be read: If there's more than one memcached
server and the connection to one is interrupted, keys that mapped to
that server will get reassigned to another. If the first server comes
back, those keys will map to it again. If it still has its data, get()s
can read stale data that was overwritten on another server. This flag
is off by default for backwards compatibility.
@param check_keys: (default True) If True, the key is checked to
ensure it is the correct length and composed of the right characters.
"""
local.__init__(self)
self.debug = debug
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.flush_on_reconnect = flush_on_reconnect
self.set_servers(servers)
self.stats = {}
self.cache_cas = cache_cas
self.reset_cas()
self.do_check_key = check_keys
# Allow users to modify pickling/unpickling behavior
self.pickleProtocol = pickleProtocol
self.pickler = pickler
self.unpickler = unpickler
self.persistent_load = pload
self.persistent_id = pid
self.server_max_key_length = server_max_key_length
self.server_max_value_length = server_max_value_length
# figure out the pickler style
file = StringIO()
try:
pickler = self.pickler(file, protocol = self.pickleProtocol)
self.picklerIsKeyword = True
except TypeError:
self.picklerIsKeyword = False
def reset_cas(self):
"""
Reset the cas cache. This is only used if the Client() object
was created with "cache_cas=True". If used, this cache does not
expire internally, so it can grow unbounded if you do not clear it
yourself.
"""
self.cas_ids = {}
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,
socket_timeout=self.socket_timeout,
flush_on_reconnect=self.flush_on_reconnect)
for s in servers]
self._init_buckets()
def get_stats(self, stat_args = None):
'''Get statistics from each of the servers.
@param stat_args: Additional arguments to pass to the memcache
"stats" command.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
elif s.family == socket.AF_INET6:
name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
if not stat_args:
s.send_cmd('stats')
else:
s.send_cmd('stats ' + stat_args)
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while 1:
line = readline()
if not line or line.strip() in ('END', 'RESET'):
break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def get_slabs(self):
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
elif s.family == socket.AF_INET6:
name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
serverData = {}
data.append(( name, serverData ))
s.send_cmd('stats items')
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
item = line.split(' ', 2)
#0 = STAT, 1 = ITEM, 2 = Value
slab = item[1].split(':', 2)
#0 = items, 1 = Slab #, 2 = Name
if slab[1] not in serverData:
serverData[slab[1]] = {}
serverData[slab[1]][slab[2]] = item[2]
return data
def flush_all(self):
"""Expire all data in memcache servers that are reachable."""
for s in self.servers:
if not s.connect(): continue
s.flush()
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if func not in self.stats:
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.deaduntil = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if isinstance(key, tuple):
serverhash, key = key
else:
serverhash = serverHashFunction(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
#print "(using server %s)" % server,
return server, key
serverhash = serverHashFunction(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete_multi(self, keys, time=0, key_prefix=''):
'''
Delete multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
>>> mc.delete_multi(['key1', 'key2'])
1
>>> mc.get_multi(['key1', 'key2']) == {}
1
This method is recommended over iterated regular L{delete}s as it reduces total latency, since
your app doesn't have to wait for each round-trip of L{delete} before sending
the next one.
@param keys: An iterable of keys to clear
@param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
@param key_prefix: Optional string to prepend to each key when sending to memcache.
See docs for L{get_multi} and L{set_multi}.
@return: 1 if no failure in communication with any memcacheds.
@rtype: int
'''
self._statlog('delete_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
dead_servers = []
rc = 1
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
if time != None:
for key in server_keys[server]:
write("delete %s %d\r\n" % (key, time))
else:
for key in server_keys[server]:
write("delete %s\r\n" % key)
try:
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
rc = 0
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
for server in dead_servers:
del server_keys[server]
for server, keys in server_keys.iteritems():
try:
for key in keys:
server.expect("DELETED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
rc = 0
return rc
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@param time: number of seconds any subsequent set / update commands
should fail. Defaults to None for no delay.
@rtype: int
'''
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None and time != 0:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
line = server.readline()
if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1
self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s'
% repr(line))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value
for C{key} by C{delta}, or by 1 if C{delta} is unspecified.
Returns None if C{key} doesn't exist on server, otherwise it
returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache,
and it must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing or None on error.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
if line == None or line.strip() =='NOT_FOUND': return None
return int(line)
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
def add(self, key, val, time = 0, min_compress_len = 0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn't already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time, min_compress_len)
def append(self, key, val, time=0, min_compress_len=0):
'''Append the value to the end of the existing key's value.
Only stores in memcache if key already exists.
Also see L{prepend}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("append", key, val, time, min_compress_len)
def prepend(self, key, val, time=0, min_compress_len=0):
'''Prepend the value to the beginning of the existing key's value.
Only stores in memcache if key already exists.
Also see L{append}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("prepend", key, val, time, min_compress_len)
def replace(self, key, val, time=0, min_compress_len=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time, min_compress_len)
def set(self, key, val, time=0, min_compress_len=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
'''
return self._set("set", key, val, time, min_compress_len)
def cas(self, key, val, time=0, min_compress_len=0):
'''Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len)
def _map_and_prefix_keys(self, key_iterable, key_prefix):
"""Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
prefixed key -> original key.
"""
key_extra_len=len(key_prefix)
if key_prefix and self.do_check_key:
self.check_key(key_prefix)
server_keys = {}
prefixed_to_orig_key = {}
for orig_key in key_iterable:
if isinstance(orig_key, tuple):
str_orig_key = str(orig_key[1])
server, key = self._get_server((orig_key[0], key_prefix + str_orig_key))
else:
str_orig_key = str(orig_key)
server, key = self._get_server(key_prefix + str_orig_key)
if self.do_check_key:
self.check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
prefixed_to_orig_key[key] = orig_key
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
'''
Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
This method is recommended over regular L{set} as it lowers the
number of total packets flying around your network, reducing
total latency, since your app doesn't have to wait for each
round-trip of L{set} before sending the next one.
@param mapping: A dict of key/value pairs to set.
@param time: Tells memcached the time which this value should
expire, either as a delta number of seconds, or an absolute
unix time-since-the-epoch value. See the memcached protocol
docs section "Storage Commands" for more info on <exptime>. We
default to 0 == cache forever.
@param key_prefix: Optional string to prepend to each key when
sending to memcache. Allows you to efficiently stuff these
keys into a pseudo-namespace in memcache:
>>> notset_keys = mc.set_multi(
... {'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
>>> len(notset_keys) == 0
True
>>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
True
Causes key 'subspace_key1' and 'subspace_key2' to be
set. Useful in conjunction with a higher-level layer which
applies namespaces to data in memcache. In this case, the
return result would be the list of notset original keys,
prefix not applied.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress()
routine. If the value being cached is a string, then
the length of the string is measured, else if the value
is an object, then the length of the pickle result is
measured. If the resulting attempt at compression yeilds
a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
@return: List of keys which failed to be stored [ memcache out of
memory, etc. ].
@rtype: list
'''
self._statlog('set_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
mapping.iterkeys(), key_prefix)
dead_servers = []
notstored = []
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
try:
for key in server_keys[server]:
store_info = self._val_to_store_info(
mapping[prefixed_to_orig_key[key]],
min_compress_len)
if store_info:
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0],
time, store_info[1], store_info[2]))
else:
notstored.append(prefixed_to_orig_key[key])
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
for server in dead_servers:
del server_keys[server]
# short-circuit if there are no servers, just return all keys
if not server_keys: return(mapping.keys())
for server, keys in server_keys.iteritems():
try:
for key in keys:
if server.readline() == 'STORED':
continue
else:
notstored.append(prefixed_to_orig_key[key]) #un-mangle.
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
"""
Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
"""
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
file = StringIO()
if self.picklerIsKeyword:
pickler = self.pickler(file, protocol = self.pickleProtocol)
else:
pickler = self.pickler(file, self.pickleProtocol)
if self.persistent_id:
pickler.persistent_id = self.persistent_id
pickler.dump(val)
val = file.getvalue()
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could
# import zlib and this string is longer than our min threshold.
if min_compress_len and _supports_compress and lv > min_compress_len:
comp_val = compress(val)
# Only retain the result if the compression result is smaller
# than the original.
if len(comp_val) < lv:
flags |= Client._FLAG_COMPRESSED
val = comp_val
# silently do not store if value length exceeds maximum
if self.server_max_value_length != 0 and \
len(val) > self.server_max_value_length: return(0)
return (flags, len(val), val)
def _set(self, cmd, key, val, time, min_compress_len = 0):
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
def _unsafe_set():
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info: return(0)
if cmd == 'cas':
if key not in self.cas_ids:
return self._set('set', key, val, time, min_compress_len)
fullcmd = "%s %s %d %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1],
self.cas_ids[key], store_info[2])
else:
fullcmd = "%s %s %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
server.send_cmd(fullcmd)
return(server.expect("STORED", raise_exception=True)
== "STORED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
try:
return _unsafe_set()
except _ConnectionDeadError:
# retry once
try:
if server._get_socket():
return _unsafe_set()
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return 0
def _get(self, cmd, key):
if self.do_check_key:
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
def _unsafe_get():
self._statlog(cmd)
try:
server.send_cmd("%s %s" % (cmd, key))
rkey = flags = rlen = cas_id = None
if cmd == 'gets':
rkey, flags, rlen, cas_id, = self._expect_cas_value(server,
raise_exception=True)
if rkey and self.cache_cas:
self.cas_ids[rkey] = cas_id
else:
rkey, flags, rlen, = self._expectvalue(server,
raise_exception=True)
if not rkey:
return None
try:
value = self._recv_value(server, flags, rlen)
finally:
server.expect("END", raise_exception=True)
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
return value
try:
return _unsafe_get()
except _ConnectionDeadError:
# retry once
try:
if server.connect():
return _unsafe_get()
return None
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return None
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
return self._get('get', key)
def gets(self, key):
'''Retrieves a key from the memcache. Used in conjunction with 'cas'.
@return: The value or None.
'''
return self._get('gets', key)
def get_multi(self, keys, key_prefix=''):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
>>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
>>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
1
>>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{get} before sending
the next one.
See also L{set_multi}.
@param keys: An array of keys.
@param key_prefix: A string to prefix each key when we communicate with memcache.
Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
@return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
'''
self._statlog('get_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.iterkeys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[prefixed_to_orig_key[rkey]] = val
line = server.readline()
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return retvals
def _expect_cas_value(self, server, line=None, raise_exception=False):
if not line:
line = server.readline(raise_exception)
if line and line[:5] == 'VALUE':
resp, rkey, flags, len, cas_id = line.split()
return (rkey, int(flags), int(len), int(cas_id))
else:
return (None, None, None, None)
def _expectvalue(self, server, line=None, raise_exception=False):
if not line:
line = server.readline(raise_exception)
if line and line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d"
% (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2]
if flags & Client._FLAG_COMPRESSED:
buf = decompress(buf)
if flags == 0 or flags == Client._FLAG_COMPRESSED:
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif flags & Client._FLAG_PICKLE:
try:
file = StringIO(buf)
unpickler = self.unpickler(file)
if self.persistent_load:
unpickler.persistent_load = self.persistent_load
val = unpickler.load()
except Exception, e:
self.debuglog('Pickle error: %s\n' % e)
return None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
def check_key(self, key, key_extra_len=0):
"""Checks sanity of key. Fails if:
Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
Contains control characters (Raises MemcachedKeyCharacterError).
Is not a string (Raises MemcachedStringEncodingError)
Is an unicode string (Raises MemcachedStringEncodingError)
Is not a string (Raises MemcachedKeyError)
Is None (Raises MemcachedKeyError)
"""
if isinstance(key, tuple): key = key[1]
if not key:
raise Client.MemcachedKeyNoneError("Key is None")
if isinstance(key, unicode):
raise Client.MemcachedStringEncodingError(
"Keys must be str()'s, not unicode. Convert your unicode "
"strings using mystring.encode(charset)!")
if not isinstance(key, str):
raise Client.MemcachedKeyTypeError("Key must be str()'s")
if isinstance(key, basestring):
if self.server_max_key_length != 0 and \
len(key) + key_extra_len > self.server_max_key_length:
raise Client.MemcachedKeyLengthError("Key length is > %s"
% self.server_max_key_length)
if not valid_key_chars_re.match(key):
raise Client.MemcachedKeyCharacterError(
"Control characters not allowed")
class _Host(object):
def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY,
socket_timeout=_SOCKET_TIMEOUT, flush_on_reconnect=0):
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.debug = debug
self.flush_on_reconnect = flush_on_reconnect
if isinstance(host, tuple):
host, self.weight = host
else:
self.weight = 1
m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
if not m:
m = re.match(r'^(?P<proto>inet6):'
r'\[(?P<host>[^\[\]]+)\](:(?P<port>[0-9]+))?$', host)
if not m:
m = re.match(r'^(?P<proto>inet):'
r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m: m = re.match(r'^(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m:
raise ValueError('Unable to parse connection string: "%s"' % host)
hostData = m.groupdict()
if hostData.get('proto') == 'unix':
self.family = socket.AF_UNIX
self.address = hostData['path']
elif hostData.get('proto') == 'inet6':
self.family = socket.AF_INET6
self.ip = hostData['host']
self.port = int(hostData.get('port') or 11211)
self.address = ( self.ip, self.port )
else:
self.family = socket.AF_INET
self.ip = hostData['host']
self.port = int(hostData.get('port') or 11211)
self.address = ( self.ip, self.port )
self.deaduntil = 0
self.socket = None
self.flush_on_next_connect = 0
self.buffer = ''
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + self.dead_retry
if self.flush_on_reconnect:
self.flush_on_next_connect = 1
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(self.family, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'): s.settimeout(self.socket_timeout)
try:
s.connect(self.address)
except socket.timeout, msg:
self.mark_dead("connect: %s" % msg)
return None
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
self.buffer = ''
if self.flush_on_next_connect:
self.flush()
self.flush_on_next_connect = 0
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
self.socket.sendall(cmd + '\r\n')
def send_cmds(self, cmds):
""" cmds already has trailing \r\n's applied """
self.socket.sendall(cmds)
def readline(self, raise_exception=False):
"""Read a line and return it. If "raise_exception" is set,
raise _ConnectionDeadError if the read fails, otherwise return
an empty string.
"""
buf = self.buffer
if self.socket:
recv = self.socket.recv
else:
recv = lambda bufsize: ''
while True:
index = buf.find('\r\n')
if index >= 0:
break
data = recv(4096)
if not data:
# connection close, let's kill it and raise
self.mark_dead('connection closed in readline()')
if raise_exception:
raise _ConnectionDeadError()
else:
return ''
buf += data
self.buffer = buf[index+2:]
return buf[:index]
def expect(self, text, raise_exception=False):
line = self.readline(raise_exception)
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'"
% (text, line))
return line
def recv(self, rlen):
self_socket_recv = self.socket.recv
buf = self.buffer
while len(buf) < rlen:
foo = self_socket_recv(max(rlen - len(buf), 4096))
buf += foo
if not foo:
raise _Error( 'Read %d bytes, expecting %d, '
'read returned 0 length bytes' % ( len(buf), rlen ))
self.buffer = buf[rlen:]
return buf[:rlen]
def flush(self):
self.send_cmd('flush_all')
self.expect('OK')
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
if self.family == socket.AF_INET:
return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
elif self.family == socket.AF_INET6:
return "inet6:[%s]:%d%s" % (self.address[0], self.address[1], d)
else:
return "unix:%s%s" % (self.address, d)
def _doctest():
import doctest, memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(memcache, globs=globs)
if __name__ == "__main__":
failures = 0
print "Testing docstrings..."
_doctest()
print "Running tests:"
print
serverList = [["127.0.0.1:11211"]]
if '--do-unix' in sys.argv:
serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
for servers in serverList:
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, basestring):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
global failures
print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print "OK"
return 1
else:
print "FAIL"; failures = failures + 1
return 0
class FooStruct(object):
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print "Testing delete ...",
if mc.delete("long"):
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Checking results of delete ..."
if mc.get("long") == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing get_multi ...",
print mc.get_multi(["a_string", "an_integer"])
print "Testing get(unknown value) ...",
print to_s(mc.get("unknown_value"))
f = FooStruct()
test_setget("foostruct", f)
print "Testing incr ...",
x = mc.incr("an_integer", 1)
if x == 43:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing decr ...",
x = mc.decr("an_integer", 1)
if x == 42:
print "OK"
else:
print "FAIL"; failures = failures + 1
sys.stdout.flush()
print "Testing sending spaces...",
sys.stdout.flush()
try:
x = mc.set("this has spaces", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending control characters...",
try:
x = mc.set("this\x10has\x11control characters\x02", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using insanely long key...",
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH, 1)
except Client.MemcachedKeyLengthError, msg:
print "FAIL"; failures = failures + 1
else:
print "OK"
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'a', 1)
except Client.MemcachedKeyLengthError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending a unicode-string key...",
try:
x = mc.set(unicode('keyhere'), 1)
except Client.MemcachedStringEncodingError, msg:
print "OK",
else:
print "FAIL",; failures = failures + 1
try:
x = mc.set((unicode('a')*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except:
print "FAIL",; failures = failures + 1
else:
print "OK",
import pickle
s = pickle.loads('V\\u4f1a\np0\n.')
try:
x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except Client.MemcachedKeyLengthError:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using a value larger than the memcached value limit..."
print 'NOTE: "MemCached: while expecting[...]" is normal...'
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
if mc.get('keyhere') == None:
print "OK",
else:
print "FAIL",; failures = failures + 1
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
if mc.get('keyhere') == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing set_multi() with no memcacheds running",
mc.disconnect_all()
errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
if errors != []:
print "FAIL"; failures = failures + 1
else:
print "OK"
print "Testing delete_multi() with no memcacheds running",
mc.disconnect_all()
ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
if ret != 1:
print "FAIL"; failures = failures + 1
else:
print "OK"
if failures > 0:
print '*** THERE WERE FAILED TESTS'
sys.exit(1)
sys.exit(0)
| false
| true
|
7907ee4e628b32129acbf4bf8f02deab8f4d8296
| 80
|
py
|
Python
|
qtpy/_version.py
|
hwansysgit/qtpy
|
e79af98a46a2fa029a625a44ed71ba96953e0d27
|
[
"MIT"
] | null | null | null |
qtpy/_version.py
|
hwansysgit/qtpy
|
e79af98a46a2fa029a625a44ed71ba96953e0d27
|
[
"MIT"
] | 1
|
2021-01-30T19:12:13.000Z
|
2021-01-30T19:12:13.000Z
|
qtpy/_version.py
|
hwansysgit/qtpy
|
e79af98a46a2fa029a625a44ed71ba96953e0d27
|
[
"MIT"
] | null | null | null |
version_info = (1, 5, 0, 'dev0')
__version__ = '.'.join(map(str, version_info))
| 26.666667
| 46
| 0.65
|
version_info = (1, 5, 0, 'dev0')
__version__ = '.'.join(map(str, version_info))
| true
| true
|
7907eeddd2f2d25f2b3bc404e33f8130b7b979d4
| 872
|
py
|
Python
|
src/fancontroller.py
|
olivierbenard/raspberrypi-fan-controller
|
f79439a0d1beee285b104917c721bee483ad0b4a
|
[
"MIT"
] | null | null | null |
src/fancontroller.py
|
olivierbenard/raspberrypi-fan-controller
|
f79439a0d1beee285b104917c721bee483ad0b4a
|
[
"MIT"
] | null | null | null |
src/fancontroller.py
|
olivierbenard/raspberrypi-fan-controller
|
f79439a0d1beee285b104917c721bee483ad0b4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import vcgencmd
from gpiozero import OutputDevice
# IMPORTANT: maximum temperature is 85°C and cpu throttled at 80°C
ON_THRESHOLD = 70 # (degrees Celsius) fan starts at this temperature
OFF_THRESHOLD = 60 # (degrees Celsius) fan shuts down at this temperature
SLEEP_INTERVAL = 5 # (seconds) how often the core temperature is checked
GPIO_PIN = 18 # (number) which GPIO pin is used to control the fan
def main():
vc = vcgencmd.Vcgencmd()
fan = OutputDevice(GPIO_PIN)
while True:
temperature = int(vc.measure_temp())
# NOTE: fan.value = 1 if "on" else 0
if temperature > ON_THRESHOLD and not fan.value:
fan.on()
elif fan.value and temperature < OFF_THRESHOLD:
fan.off()
time.sleep(SLEEP_INTERVAL)
if __name__ == '__main__':
main()
| 26.424242
| 73
| 0.673165
|
import time
import vcgencmd
from gpiozero import OutputDevice
ON_THRESHOLD = 70
OFF_THRESHOLD = 60
SLEEP_INTERVAL = 5
GPIO_PIN = 18
def main():
vc = vcgencmd.Vcgencmd()
fan = OutputDevice(GPIO_PIN)
while True:
temperature = int(vc.measure_temp())
if temperature > ON_THRESHOLD and not fan.value:
fan.on()
elif fan.value and temperature < OFF_THRESHOLD:
fan.off()
time.sleep(SLEEP_INTERVAL)
if __name__ == '__main__':
main()
| true
| true
|
7907ef0d781da87b4aa04d715e0bd5be9db67085
| 2,295
|
py
|
Python
|
tests/returns/test_get_backupdir_path.py
|
tombaker/mklists_old
|
cf3ca814cf2cfc785a8cdbddd33162b9ee658570
|
[
"MIT"
] | 1
|
2021-07-02T03:41:57.000Z
|
2021-07-02T03:41:57.000Z
|
tests/returns/test_get_backupdir_path.py
|
tombaker/mklists_old
|
cf3ca814cf2cfc785a8cdbddd33162b9ee658570
|
[
"MIT"
] | null | null | null |
tests/returns/test_get_backupdir_path.py
|
tombaker/mklists_old
|
cf3ca814cf2cfc785a8cdbddd33162b9ee658570
|
[
"MIT"
] | null | null | null |
"""Returns full pathname of backup directory."""
import os
import pytest
from pathlib import Path
from mklists.constants import CONFIGFILE_NAME
from mklists.returns import get_backupdir_path
def test_get_backupdir_path(tmp_path):
"""Returns backups Path named for default working directory."""
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
backdir = "_backups"
datestr = "2020-01-03_1646"
workingdir = Path("agenda")
workingdir.mkdir()
os.chdir(workingdir)
actual = get_backupdir_path(backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / str(workingdir) / datestr
expected_explicit = Path(tmp_path) / "_backups" / "agenda" / "2020-01-03_1646"
assert actual == expected
assert actual == expected_explicit
def test_get_backupdir_path_given_datadir(tmp_path):
"""Returns backups Path named for specified working directory."""
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
workingdir = Path(tmp_path).joinpath("todolists/a")
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = "todolists_a"
backdir = "_backups"
datestr = "2020-01-03_1646_06488910"
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / workingdir_shortname_expected / datestr
assert actual == expected
def test_get_backupdir_path_given_datadir_with_slash(tmp_path):
"""Returns backups Path named for specified working directory ending with slash."""
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
workingdir = Path(tmp_path).joinpath("todolists/a/")
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = "todolists_a"
backdir = "_backups"
datestr = "2020-01-03_1646_06488910"
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / workingdir_shortname_expected / datestr
assert actual == expected
def test_get_backupdir_path_raise_exception_if_rootdir_not_found(tmp_path):
"""Raises exception if no rootdir is found (rootdir is None)."""
os.chdir(tmp_path)
with pytest.raises(SystemExit):
get_backupdir_path()
| 38.898305
| 87
| 0.747277
|
import os
import pytest
from pathlib import Path
from mklists.constants import CONFIGFILE_NAME
from mklists.returns import get_backupdir_path
def test_get_backupdir_path(tmp_path):
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
backdir = "_backups"
datestr = "2020-01-03_1646"
workingdir = Path("agenda")
workingdir.mkdir()
os.chdir(workingdir)
actual = get_backupdir_path(backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / str(workingdir) / datestr
expected_explicit = Path(tmp_path) / "_backups" / "agenda" / "2020-01-03_1646"
assert actual == expected
assert actual == expected_explicit
def test_get_backupdir_path_given_datadir(tmp_path):
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
workingdir = Path(tmp_path).joinpath("todolists/a")
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = "todolists_a"
backdir = "_backups"
datestr = "2020-01-03_1646_06488910"
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / workingdir_shortname_expected / datestr
assert actual == expected
def test_get_backupdir_path_given_datadir_with_slash(tmp_path):
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
workingdir = Path(tmp_path).joinpath("todolists/a/")
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = "todolists_a"
backdir = "_backups"
datestr = "2020-01-03_1646_06488910"
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / workingdir_shortname_expected / datestr
assert actual == expected
def test_get_backupdir_path_raise_exception_if_rootdir_not_found(tmp_path):
os.chdir(tmp_path)
with pytest.raises(SystemExit):
get_backupdir_path()
| true
| true
|
7907ef1fdaae719e2541f77a2157c6cc097c2789
| 3,238
|
py
|
Python
|
libra_client/shell/account_commands.py
|
yuan-xy/libra-client
|
697058bfa7bc8e8a7a2598dae4bb289f44524dba
|
[
"MIT"
] | 30
|
2019-09-16T12:50:33.000Z
|
2020-10-27T20:06:26.000Z
|
libra_client/shell/account_commands.py
|
yuan-xy/libra-client
|
697058bfa7bc8e8a7a2598dae4bb289f44524dba
|
[
"MIT"
] | 7
|
2019-09-18T14:23:09.000Z
|
2020-03-31T10:10:04.000Z
|
libra_client/shell/account_commands.py
|
yuan-xy/libra-client
|
697058bfa7bc8e8a7a2598dae4bb289f44524dba
|
[
"MIT"
] | 12
|
2019-09-22T15:43:56.000Z
|
2020-08-07T08:51:35.000Z
|
from libra_client.cli.command import Command, blocking_cmd
class AccountCommand(Command):
def get_aliases(self):
return ["account", "a"]
def get_description(self):
return "Account operations"
def execute(self, client, params, **kwargs):
commands = [
AccountCommandCreate(),
AccountCommandListAccounts(),
AccountCommandRecoverWallet(),
AccountCommandWriteRecovery(),
AccountCommandMint()
]
self.subcommand_execute(params[0], commands, client, params[1:], **kwargs)
class AccountCommandCreate(Command):
def get_aliases(self):
return ["create", "c"]
def get_description(self):
return "Create an account. Returns reference ID to use in other operations"
def execute(self, client, params, **kwargs):
print(">> Creating/retrieving next account from wallet")
index, account = client.create_next_account()
print(
"Created/retrieved account #{} address {}".format(
index,
account.address.hex()
)
)
class AccountCommandListAccounts(Command):
def get_aliases(self):
return ["list", "la"]
def get_description(self):
return "Print all accounts that were created or loaded"
def execute(self, client, params, **kwargs):
client.print_all_accounts()
class AccountCommandRecoverWallet(Command):
def get_aliases(self):
return ["recover", "r"]
def get_params_help(self):
return "<file_path>"
def get_description(self):
return "Recover Libra wallet from the file path"
def execute(self, client, params, **kwargs):
print(">> Recovering Wallet")
accounts = client.recover_wallet_accounts(params[1])
print(f"Wallet recovered and the first {len(accounts)} child accounts were derived")
for index, data in enumerate(accounts):
print("#{} address {}".format(index, data.address.hex()))
class AccountCommandWriteRecovery(Command):
def get_aliases(self):
return ["write", "w"]
def get_params_help(self):
return "<file_path>"
def get_description(self):
return "Save Libra wallet mnemonic recovery seed to disk"
def execute(self, client, params, **kwargs):
print(">> Saving Libra wallet mnemonic recovery seed to disk")
client.write_recovery(params[1])
print("Saved mnemonic seed to disk")
class AccountCommandMint(Command):
def get_aliases(self):
return ["mint", "mintb", "m", "mb"]
def get_params_help(self):
return "<receiver_account_ref_id>|<receiver_account_address> <number_of_coins>"
def get_description(self):
return "Mint coins to the account. Suffix 'b' is for blocking"
def execute(self, client, params, **kwargs):
print(">> Minting coins")
is_blocking = blocking_cmd(params[0])
client.mint_coins(params[1], params[2], is_blocking)
if is_blocking:
print("Finished minting!")
else:
print("Mint request submitted")
| 31.436893
| 93
| 0.617356
|
from libra_client.cli.command import Command, blocking_cmd
class AccountCommand(Command):
def get_aliases(self):
return ["account", "a"]
def get_description(self):
return "Account operations"
def execute(self, client, params, **kwargs):
commands = [
AccountCommandCreate(),
AccountCommandListAccounts(),
AccountCommandRecoverWallet(),
AccountCommandWriteRecovery(),
AccountCommandMint()
]
self.subcommand_execute(params[0], commands, client, params[1:], **kwargs)
class AccountCommandCreate(Command):
def get_aliases(self):
return ["create", "c"]
def get_description(self):
return "Create an account. Returns reference ID to use in other operations"
def execute(self, client, params, **kwargs):
print(">> Creating/retrieving next account from wallet")
index, account = client.create_next_account()
print(
"Created/retrieved account #{} address {}".format(
index,
account.address.hex()
)
)
class AccountCommandListAccounts(Command):
def get_aliases(self):
return ["list", "la"]
def get_description(self):
return "Print all accounts that were created or loaded"
def execute(self, client, params, **kwargs):
client.print_all_accounts()
class AccountCommandRecoverWallet(Command):
def get_aliases(self):
return ["recover", "r"]
def get_params_help(self):
return "<file_path>"
def get_description(self):
return "Recover Libra wallet from the file path"
def execute(self, client, params, **kwargs):
print(">> Recovering Wallet")
accounts = client.recover_wallet_accounts(params[1])
print(f"Wallet recovered and the first {len(accounts)} child accounts were derived")
for index, data in enumerate(accounts):
print("#{} address {}".format(index, data.address.hex()))
class AccountCommandWriteRecovery(Command):
def get_aliases(self):
return ["write", "w"]
def get_params_help(self):
return "<file_path>"
def get_description(self):
return "Save Libra wallet mnemonic recovery seed to disk"
def execute(self, client, params, **kwargs):
print(">> Saving Libra wallet mnemonic recovery seed to disk")
client.write_recovery(params[1])
print("Saved mnemonic seed to disk")
class AccountCommandMint(Command):
def get_aliases(self):
return ["mint", "mintb", "m", "mb"]
def get_params_help(self):
return "<receiver_account_ref_id>|<receiver_account_address> <number_of_coins>"
def get_description(self):
return "Mint coins to the account. Suffix 'b' is for blocking"
def execute(self, client, params, **kwargs):
print(">> Minting coins")
is_blocking = blocking_cmd(params[0])
client.mint_coins(params[1], params[2], is_blocking)
if is_blocking:
print("Finished minting!")
else:
print("Mint request submitted")
| true
| true
|
7907ef245208b5af256a9d929c6bca2cff8343b5
| 1,079
|
py
|
Python
|
setup.py
|
abduhbm/label-studio
|
9a5110d411073e951b84099fa29a5abfc7c0f41d
|
[
"Apache-2.0"
] | 5
|
2021-04-09T07:54:38.000Z
|
2021-09-28T11:42:22.000Z
|
setup.py
|
abduhbm/label-studio
|
9a5110d411073e951b84099fa29a5abfc7c0f41d
|
[
"Apache-2.0"
] | 10
|
2021-01-12T05:56:29.000Z
|
2021-05-11T21:37:59.000Z
|
setup.py
|
abduhbm/label-studio
|
9a5110d411073e951b84099fa29a5abfc7c0f41d
|
[
"Apache-2.0"
] | 3
|
2020-09-28T21:34:47.000Z
|
2021-01-29T02:04:19.000Z
|
import setuptools
import label_studio
print('Label Studio', label_studio.__version__)
# Readme
with open('README.md', 'r') as f:
long_description = f.read()
# Module dependencies
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name='label-studio',
version=label_studio.__version__,
author='Heartex',
author_email="hello@heartex.ai",
description='Label Studio annotation tool',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/heartexlabs/label-studio',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
install_requires=requirements,
python_requires='>=3.5',
entry_points={
'console_scripts': [
'label-studio=label_studio.server:main',
'label-studio-ml=label_studio.ml.server:main'
],
}
)
| 27.666667
| 57
| 0.674699
|
import setuptools
import label_studio
print('Label Studio', label_studio.__version__)
with open('README.md', 'r') as f:
long_description = f.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name='label-studio',
version=label_studio.__version__,
author='Heartex',
author_email="hello@heartex.ai",
description='Label Studio annotation tool',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/heartexlabs/label-studio',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
install_requires=requirements,
python_requires='>=3.5',
entry_points={
'console_scripts': [
'label-studio=label_studio.server:main',
'label-studio-ml=label_studio.ml.server:main'
],
}
)
| true
| true
|
7907ef80ff200a67a76e74217b1255fe2d30d7ab
| 14,958
|
py
|
Python
|
test/functional/interface_rest.py
|
100milliondollars/NeuQ
|
8670b9e50d4e2edfd2f35dc3058b3112ffb46986
|
[
"MIT"
] | 1
|
2019-08-13T01:44:54.000Z
|
2019-08-13T01:44:54.000Z
|
test/functional/interface_rest.py
|
100milliondollars/NeuQ
|
8670b9e50d4e2edfd2f35dc3058b3112ffb46986
|
[
"MIT"
] | null | null | null |
test/functional/interface_rest.py
|
100milliondollars/NeuQ
|
8670b9e50d4e2edfd2f35dc3058b3112ffb46986
|
[
"MIT"
] | 2
|
2019-08-11T22:01:50.000Z
|
2019-08-13T15:15:12.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.qtumconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
from test_framework.qtum import convert_btc_address_to_qtum
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import CBlockHeader
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Bitcoin to node 1")
# Random address so node1's balance doesn't increase
not_related_address = convert_btc_address_to_qtum("2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ")
self.nodes[0].generate(1)
self.sync_all()
for i in range(0, COINBASE_MATURITY, 100):
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), INITIAL_BLOCK_REWARD)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("<i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, COINBASE_MATURITY+2) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), 181)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:181], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| 44.650746
| 153
| 0.663993
|
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.qtumconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
from test_framework.qtum import convert_btc_address_to_qtum
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import CBlockHeader
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Bitcoin to node 1")
not_related_address = convert_btc_address_to_qtum("2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ")
self.nodes[0].generate(1)
self.sync_all()
for i in range(0, COINBASE_MATURITY, 100):
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), INITIAL_BLOCK_REWARD)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(json_obj['chaintipHash'], bb_hash)
assert_equal(len(json_obj['utxos']), 0)
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("<i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash)
assert_equal(chain_height, COINBASE_MATURITY+2) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1)
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), 181)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:181], response_header_bytes)
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1)
assert_equal(json_obj[0]['hash'], bb_hash)
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5)
self.log.info("Test tx inclusion in the /mempool and /block URIs")
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
assert_greater_than(json_obj['bytes'], 300)
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
newblockhash = self.nodes[1].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| true
| true
|
7907efe633bb6e70cd40c9d08e2ff6f97c40fd3d
| 768
|
py
|
Python
|
lipame/lipa/models.py
|
savioabuga/lipame
|
3f34d1679aa1e4981763a31f2ffd4767a19f6a1b
|
[
"MIT"
] | 1
|
2018-06-18T08:56:56.000Z
|
2018-06-18T08:56:56.000Z
|
lipame/lipa/models.py
|
savioabuga/lipame
|
3f34d1679aa1e4981763a31f2ffd4767a19f6a1b
|
[
"MIT"
] | null | null | null |
lipame/lipa/models.py
|
savioabuga/lipame
|
3f34d1679aa1e4981763a31f2ffd4767a19f6a1b
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from model_utils import Choices
from model_utils.models import TimeStampedModel, StatusModel
from django.conf import settings
class Booking(TimeStampedModel, StatusModel):
TRAVEL_CLASSES = Choices('economy', 'first_class')
STATUS = Choices(('pending', 'Pending'), ('paid', 'Paid'), ('failed', 'Failed'))
date_of_travel = models.DateTimeField()
travel_class = models.CharField(choices=TRAVEL_CLASSES, default=TRAVEL_CLASSES.economy, max_length=30)
status = models.CharField(choices=STATUS, default=STATUS.pending, max_length=20)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False)
payment_reference = models.CharField(max_length=100, blank=True)
| 48
| 106
| 0.778646
|
from __future__ import unicode_literals
from django.db import models
from model_utils import Choices
from model_utils.models import TimeStampedModel, StatusModel
from django.conf import settings
class Booking(TimeStampedModel, StatusModel):
TRAVEL_CLASSES = Choices('economy', 'first_class')
STATUS = Choices(('pending', 'Pending'), ('paid', 'Paid'), ('failed', 'Failed'))
date_of_travel = models.DateTimeField()
travel_class = models.CharField(choices=TRAVEL_CLASSES, default=TRAVEL_CLASSES.economy, max_length=30)
status = models.CharField(choices=STATUS, default=STATUS.pending, max_length=20)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False)
payment_reference = models.CharField(max_length=100, blank=True)
| true
| true
|
7907f01c80895bd5e3f5eace8e627fedf6a053da
| 439
|
py
|
Python
|
labinfo13/Candies.py
|
MatiwsxD/ayed-2019-1
|
a5fdbe3a055405150122cf3875cdb0c6afd9eff0
|
[
"MIT"
] | null | null | null |
labinfo13/Candies.py
|
MatiwsxD/ayed-2019-1
|
a5fdbe3a055405150122cf3875cdb0c6afd9eff0
|
[
"MIT"
] | null | null | null |
labinfo13/Candies.py
|
MatiwsxD/ayed-2019-1
|
a5fdbe3a055405150122cf3875cdb0c6afd9eff0
|
[
"MIT"
] | null | null | null |
N = int(input())
line = []
for a in range(N):
line.append(int(input()))
total = 0
curIter = 1
while min(line) < 999999:
valleys = []
for a in range(N):
if line[a] < 999999:
if (a == 0 or line[a] <= line[a - 1]) and (a == N - 1 or line[a] <= line[a + 1]):
valleys.append(a)
for a in valleys:
line[a] = 999999
total += (curIter * len(valleys))
curIter += 1
print(total)
| 20.904762
| 93
| 0.503417
|
N = int(input())
line = []
for a in range(N):
line.append(int(input()))
total = 0
curIter = 1
while min(line) < 999999:
valleys = []
for a in range(N):
if line[a] < 999999:
if (a == 0 or line[a] <= line[a - 1]) and (a == N - 1 or line[a] <= line[a + 1]):
valleys.append(a)
for a in valleys:
line[a] = 999999
total += (curIter * len(valleys))
curIter += 1
print(total)
| true
| true
|
7907f0307739bc565385613ad4d4efd3ff531aa9
| 17,412
|
py
|
Python
|
src/transformers/trainer_tf.py
|
tilmanbeck/adapter-transformers
|
ed42ced6983891060bb160c5c4f2c5d64d2c205c
|
[
"Apache-2.0"
] | 63
|
2020-12-09T18:58:16.000Z
|
2022-03-21T02:34:35.000Z
|
src/transformers/trainer_tf.py
|
tilmanbeck/adapter-transformers
|
ed42ced6983891060bb160c5c4f2c5d64d2c205c
|
[
"Apache-2.0"
] | 5
|
2021-01-29T10:33:04.000Z
|
2021-08-25T14:15:27.000Z
|
src/transformers/trainer_tf.py
|
tilmanbeck/adapter-transformers
|
ed42ced6983891060bb160c5c4f2c5d64d2c205c
|
[
"Apache-2.0"
] | 18
|
2020-12-11T20:36:04.000Z
|
2021-12-12T07:04:20.000Z
|
"""Tensorflow trainer class."""
import logging
import math
import os
from typing import Callable, Dict, Optional
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import TFPreTrainedModel, shape_list
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput
from .training_args_tf import TFTrainingArguments
logger = logging.getLogger(__name__)
class TFTrainer:
model: TFPreTrainedModel
args: TFTrainingArguments
# something similar to a PT Dataset.
# This is just temporary before to have
# a framework-agnostic approach for datasets.
train_dataset: Optional[tf.data.Dataset]
eval_dataset: Optional[tf.data.Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.gradient_accumulator = GradientAccumulator()
self._setup_training()
def _setup_training(self) -> None:
"""
Setup the different steps to train a model:
- check if all the data are given
- create the proper strategy
- create the features
- prepare the model settings
"""
self._prepare_dataset()
with self.args.strategy.scope():
self._create_optimizer()
_ = self.optimizer.iterations
self._set_loss_and_metric()
self._create_checkpoint_manager()
self._create_summary_writer()
def _set_loss_and_metric(self) -> None:
"""
Create the training loss and metric with their name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
try:
self.loss = tf.keras.losses.get(
{
"class_name": self.args.loss_name,
"config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE},
}
)
except TypeError:
self.loss = tf.keras.losses.get(
{"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}}
)
def _create_summary_writer(self) -> None:
"""
Create a summary writer to be able to read the logs in Tensorboard.
"""
self.writer = tf.summary.create_file_writer(self.args.logging_dir)
def _prepare_dataset(self) -> None:
"""
Prepare the training, validation and test data.
"""
if self.train_dataset is not None:
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
if self.args.max_steps > 0:
self.train_steps = self.args.max_steps
else:
self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size)
self.train_dataset = (
self.train_dataset.cache()
.shuffle(self.num_train_examples)
.batch(self.args.train_batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
if self.args.max_steps > 0:
self.train_dataset = self.train_dataset.repeat(-1)
self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset)
else:
self.train_steps = 0
if self.eval_dataset is not None:
self.eval_dataset = (
self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE)
)
self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset)
def _create_optimizer(self) -> None:
"""
Create the training optimizer with its name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
if self.args.optimizer_name == "adamw":
self.optimizer = create_optimizer(
self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr
)
else:
try:
self.optimizer = tf.keras.optimizers.get(
{
"class_name": self.args.optimizer_name,
"config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon},
}
)
except TypeError:
# This is for the case where the optimizer is not Adam-like such as SGD
self.optimizer = tf.keras.optimizers.get(
{"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}}
)
logger.info("Created an/a {} optimizer".format(self.args.optimizer_name))
def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None:
"""
Create a checkpoint manager in order to be able to make the training
fault-tolerant.
Args:
max_to_keep: the maximum number of checkpoints to keep in the checkpoint path.
load_model: if we want to start the training from the latest checkpoint.
"""
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep)
if load_model:
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
@tf.function
def _evaluate_steps(self, per_replica_features, per_replica_labels):
"""
One step evaluation across replica.
Args:
per_replica_features: the batched features.
per_replica_labels: the batched labels.
Returns:
The loss corresponding to the given batch.
"""
per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2(
self._run_model, args=(per_replica_features, per_replica_labels, False)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss, per_replica_logits
def _prediction_loop(
self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
logger.info("***** Running %s *****", description)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
step: int = 1
for features, labels in dataset:
step = tf.convert_to_tensor(step, dtype=tf.int64)
loss, logits = self._evaluate_steps(features, labels)
loss = tf.reduce_mean(loss)
if not prediction_loss_only:
if self.args.n_gpu > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
step += 1
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = loss.numpy()
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def evaluate(
self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
"""
if eval_dataset is None:
eval_dataset = self.eval_dataset
output = self._prediction_loop(eval_dataset, description="Evaluation")
return output.metrics
def train(self) -> None:
"""
Train method to train the model.
"""
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
iterations = self.optimizer.iterations
if iterations.numpy() > 0:
logger.info("Start the training from the last checkpoint")
start_epoch = (iterations.numpy() // self.train_steps) + 1
else:
start_epoch = 1
tf.summary.experimental.set_step(iterations)
epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
logger.info(" Num Epochs = %d", epochs)
logger.info(" Total optimization steps = %d", self.train_steps)
for epoch in range(start_epoch, int(epochs + 1)):
for training_loss in self._training_steps():
step = iterations.numpy()
if self.args.debug:
with self.writer.as_default():
tf.summary.scalar("loss", training_loss, step=step)
if step == 1 and self.args.debug:
with self.writer.as_default():
tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir)
if self.args.evaluate_during_training and step % self.args.eval_steps == 0:
logs = {}
results = self.evaluate()
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
if callable(self.optimizer.learning_rate):
logs["learning_rate"] = self.optimizer.learning_rate(step).numpy()
else:
logs["learning_rate"] = self.optimizer.learning_rate.numpy()
logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs))
with self.writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=step)
if step % self.args.logging_steps == 0:
logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy()))
if step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path))
if step % self.train_steps == 0:
break
def _training_steps(self):
"""
Returns a generator over training steps (i.e. parameters update).
"""
for i, loss in enumerate(self._accumulate_next_gradients()):
if i % self.args.gradient_accumulation_steps == 0:
self._apply_gradients()
yield loss
@tf.function
def _apply_gradients(self):
"""Applies the gradients (cross-replica)."""
self.args.strategy.experimental_run_v2(self._step)
def _step(self):
"""Applies gradients and resets accumulation."""
gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync
gradients = [
gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients
]
gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
def _accumulate_next_gradients(self):
"""Accumulates the gradients from the next element in dataset."""
iterator = iter(self.train_dataset)
@tf.function
def _accumulate_next():
per_replica_features, per_replica_labels = next(iterator)
return self._accumulate_gradients(per_replica_features, per_replica_labels)
while True:
try:
yield _accumulate_next()
except tf.errors.OutOfRangeError:
break
def _accumulate_gradients(self, per_replica_features, per_replica_labels):
"""Accumulates the gradients across all the replica."""
per_replica_loss = self.args.strategy.experimental_run_v2(
self._forward, args=(per_replica_features, per_replica_labels)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss
def _forward(self, features, labels):
"""Forwards a training example and accumulates the gradients."""
per_example_loss, _ = self._run_model(features, labels, True)
gradients = tf.gradients(per_example_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
self.gradient_accumulator(gradients)
return per_example_loss
def _run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Args:
features: the batched features.
labels: the batched labels.
training: run the model in training mode or not
"""
if self.args.mode == "text-classification" or self.args.mode == "token-classification":
logits = self.model(features, training=training)[0]
else:
logits = self.model(features, training=training)
if self.args.mode == "token-classification":
active_loss = tf.reshape(labels, (-1,)) != -1
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
loss = self.loss(labels, reduced_logits)
elif self.args.mode == "question-answering":
start_loss = self.loss(labels["start_position"], logits[0])
end_loss = self.loss(labels["end_position"], logits[1])
loss = (start_loss + end_loss) / 2.0
else:
loss = self.loss(labels, logits)
loss += sum(self.model.losses) * (1.0 / self.args.n_gpu)
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
Args:
test_dataset: something similar to a PT Dataset. This is just
temporary before to have a framework-agnostic approach for datasets.
"""
test_dataset = test_dataset.batch(self.args.eval_batch_size)
test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset)
return self._prediction_loop(test_dataset, description="Prediction")
def save_model(self) -> None:
"""
Save the pretrained model and create a Tensorflow saved model.
"""
logger.info("Saving model in {}".format(self.args.output_dir))
path = os.path.join(self.args.output_dir, "saved_model")
logger.info("Saving model in {}".format(path))
os.makedirs(path, exist_ok=True)
self.model.save_pretrained(self.args.output_dir)
| 39.844394
| 119
| 0.608086
|
import logging
import math
import os
from typing import Callable, Dict, Optional
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import TFPreTrainedModel, shape_list
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput
from .training_args_tf import TFTrainingArguments
logger = logging.getLogger(__name__)
class TFTrainer:
model: TFPreTrainedModel
args: TFTrainingArguments
train_dataset: Optional[tf.data.Dataset]
eval_dataset: Optional[tf.data.Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.gradient_accumulator = GradientAccumulator()
self._setup_training()
def _setup_training(self) -> None:
self._prepare_dataset()
with self.args.strategy.scope():
self._create_optimizer()
_ = self.optimizer.iterations
self._set_loss_and_metric()
self._create_checkpoint_manager()
self._create_summary_writer()
def _set_loss_and_metric(self) -> None:
try:
self.loss = tf.keras.losses.get(
{
"class_name": self.args.loss_name,
"config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE},
}
)
except TypeError:
self.loss = tf.keras.losses.get(
{"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}}
)
def _create_summary_writer(self) -> None:
self.writer = tf.summary.create_file_writer(self.args.logging_dir)
def _prepare_dataset(self) -> None:
if self.train_dataset is not None:
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
if self.args.max_steps > 0:
self.train_steps = self.args.max_steps
else:
self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size)
self.train_dataset = (
self.train_dataset.cache()
.shuffle(self.num_train_examples)
.batch(self.args.train_batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
if self.args.max_steps > 0:
self.train_dataset = self.train_dataset.repeat(-1)
self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset)
else:
self.train_steps = 0
if self.eval_dataset is not None:
self.eval_dataset = (
self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE)
)
self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset)
def _create_optimizer(self) -> None:
if self.args.optimizer_name == "adamw":
self.optimizer = create_optimizer(
self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr
)
else:
try:
self.optimizer = tf.keras.optimizers.get(
{
"class_name": self.args.optimizer_name,
"config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon},
}
)
except TypeError:
self.optimizer = tf.keras.optimizers.get(
{"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}}
)
logger.info("Created an/a {} optimizer".format(self.args.optimizer_name))
def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None:
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep)
if load_model:
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
@tf.function
def _evaluate_steps(self, per_replica_features, per_replica_labels):
per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2(
self._run_model, args=(per_replica_features, per_replica_labels, False)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss, per_replica_logits
def _prediction_loop(
self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
logger.info("***** Running %s *****", description)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
step: int = 1
for features, labels in dataset:
step = tf.convert_to_tensor(step, dtype=tf.int64)
loss, logits = self._evaluate_steps(features, labels)
loss = tf.reduce_mean(loss)
if not prediction_loss_only:
if self.args.n_gpu > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
step += 1
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = loss.numpy()
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def evaluate(
self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
if eval_dataset is None:
eval_dataset = self.eval_dataset
output = self._prediction_loop(eval_dataset, description="Evaluation")
return output.metrics
def train(self) -> None:
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
iterations = self.optimizer.iterations
if iterations.numpy() > 0:
logger.info("Start the training from the last checkpoint")
start_epoch = (iterations.numpy() // self.train_steps) + 1
else:
start_epoch = 1
tf.summary.experimental.set_step(iterations)
epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
logger.info(" Num Epochs = %d", epochs)
logger.info(" Total optimization steps = %d", self.train_steps)
for epoch in range(start_epoch, int(epochs + 1)):
for training_loss in self._training_steps():
step = iterations.numpy()
if self.args.debug:
with self.writer.as_default():
tf.summary.scalar("loss", training_loss, step=step)
if step == 1 and self.args.debug:
with self.writer.as_default():
tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir)
if self.args.evaluate_during_training and step % self.args.eval_steps == 0:
logs = {}
results = self.evaluate()
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
if callable(self.optimizer.learning_rate):
logs["learning_rate"] = self.optimizer.learning_rate(step).numpy()
else:
logs["learning_rate"] = self.optimizer.learning_rate.numpy()
logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs))
with self.writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=step)
if step % self.args.logging_steps == 0:
logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy()))
if step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path))
if step % self.train_steps == 0:
break
def _training_steps(self):
for i, loss in enumerate(self._accumulate_next_gradients()):
if i % self.args.gradient_accumulation_steps == 0:
self._apply_gradients()
yield loss
@tf.function
def _apply_gradients(self):
self.args.strategy.experimental_run_v2(self._step)
def _step(self):
gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync
gradients = [
gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients
]
gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
def _accumulate_next_gradients(self):
iterator = iter(self.train_dataset)
@tf.function
def _accumulate_next():
per_replica_features, per_replica_labels = next(iterator)
return self._accumulate_gradients(per_replica_features, per_replica_labels)
while True:
try:
yield _accumulate_next()
except tf.errors.OutOfRangeError:
break
def _accumulate_gradients(self, per_replica_features, per_replica_labels):
per_replica_loss = self.args.strategy.experimental_run_v2(
self._forward, args=(per_replica_features, per_replica_labels)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss
def _forward(self, features, labels):
per_example_loss, _ = self._run_model(features, labels, True)
gradients = tf.gradients(per_example_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
self.gradient_accumulator(gradients)
return per_example_loss
def _run_model(self, features, labels, training):
if self.args.mode == "text-classification" or self.args.mode == "token-classification":
logits = self.model(features, training=training)[0]
else:
logits = self.model(features, training=training)
if self.args.mode == "token-classification":
active_loss = tf.reshape(labels, (-1,)) != -1
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
loss = self.loss(labels, reduced_logits)
elif self.args.mode == "question-answering":
start_loss = self.loss(labels["start_position"], logits[0])
end_loss = self.loss(labels["end_position"], logits[1])
loss = (start_loss + end_loss) / 2.0
else:
loss = self.loss(labels, logits)
loss += sum(self.model.losses) * (1.0 / self.args.n_gpu)
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
test_dataset = test_dataset.batch(self.args.eval_batch_size)
test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset)
return self._prediction_loop(test_dataset, description="Prediction")
def save_model(self) -> None:
logger.info("Saving model in {}".format(self.args.output_dir))
path = os.path.join(self.args.output_dir, "saved_model")
logger.info("Saving model in {}".format(path))
os.makedirs(path, exist_ok=True)
self.model.save_pretrained(self.args.output_dir)
| true
| true
|
7907f27f7a8b22d2515ba776646986e401ea3035
| 7,294
|
py
|
Python
|
methods/latent-pp-models-mem-rjmcmc.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 1
|
2020-04-18T11:16:02.000Z
|
2020-04-18T11:16:02.000Z
|
methods/latent-pp-models-mem-rjmcmc.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 6
|
2020-04-13T18:38:04.000Z
|
2022-03-12T00:55:56.000Z
|
methods/latent-pp-models-mem-rjmcmc.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 1
|
2020-07-02T04:47:00.000Z
|
2020-07-02T04:47:00.000Z
|
#%%
import pymc3 as pm
import arviz as az
import pandas as pd
import numpy as np
from datetime import datetime
from scipy import stats
import os
import pickle
from scipy import special
import theano.tensor as tt
## List down file paths
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
#%%
###############################################################################
# Read in preparation: data_dates data frame
###############################################################################
filename = os.path.join(os.path.realpath(dir_picklejar), 'save_all_dict')
infile = open(filename,'rb')
clean_data = pickle.load(infile)
infile.close()
#%%
'''
Delete all times > 1hr before start time.
Extend day to handle all other times and remove duplicates
Need to move this part of code to pre-processing at some point
'''
for key in clean_data.keys():
temp = clean_data[key]
for days in temp.keys():
day_temp = temp[days]
if len(day_temp['hours_since_start_day']) > 0:
## Check if any times < or > 1hr
day_temp['hours_since_start_day'] = day_temp['hours_since_start_day'].iloc[np.where(day_temp['hours_since_start_day'] > -1)]
day_temp['hours_since_start_day'] = day_temp['hours_since_start_day'].iloc[np.where(day_temp['day_length'] - day_temp['hours_since_start_day'] > -1)]
day_min = np.min(day_temp['hours_since_start_day'])
day_min = np.min([day_min,0])
day_max = np.max(day_temp['hours_since_start_day'])
day_max = np.max([day_max, day_temp['day_length']])
day_temp['hours_since_start_day'] = day_temp['hours_since_start_day'] - day_min
day_temp['hours_since_start_day'] = np.unique(day_temp['hours_since_start_day'])
day_temp['day_length'] = day_max - day_min
#%%
###############################################################################
# Estimation using pymc3
###############################################################################
def exponential_log_complementary_cdf(x, lam):
''' log complementary CDF of exponential distribution '''
return -lam*x
def exponential_log_pdf(x, lam):
''' log complementary CDF of exponential distribution '''
return np.log(lam)-lam*x
def selfreport_mem(observed, latent, dimon):
'''
observed: Observed self report times
latent: Vector of latent smoking events (length is max)
dimon: Integer saying how many of the latent entries are currently included
'''
total = 1.0
temp_latent = latent[tt.arange(dimon)]
if not tt.all(tt.eq(observed,temp_latent)):
total = -1000000
else:
total = tt.prod(tt.eq(temp_latent,observed)*0.9 + (1-tt.eq(temp_latent,observed))*0.1)
return total
max_events = 0.0 # Defining max number of events
for participants in clean_data.keys():
for days in clean_data[participants].keys():
max_events = np.max([max_events,len(clean_data[participants][days]['hours_since_start_day'])])
max_events = max_events + 10 # Just to be safe let's add a few more
max_events = max_events.astype('int')
#%%
###############################################################################
'''
Estimation using pymc3.
Model is a static graph so we handle this by having a maximum number of
events within a day length max_events that tells us which events are "on"
'''
###############################################################################
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta = pm.Normal('beta', mu=0, sd=10)
loglamb_observed = beta
lamb_observed = np.exp(loglamb_observed)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
for participants in clean_data.keys():
for days in clean_data[participants].keys():
if len(clean_data[participants][days]['hours_since_start_day']) > 0:
pp_rate = lamb_observed*clean_data[participants][days]['day_length']
num_sr = len(clean_data[participants][days]['hours_since_start_day'])
sr = clean_data[participants][days]['hours_since_start_day']
day_length = clean_data[participants][days]['day_length']
init = np.append(sr, np.repeat(0,max_events-num_sr))
smoke_length = pm.Poisson('num_smokes_%d_%d'%(participants, days), mu=pp_rate, testval = num_sr) # Number of Events in Day
smoke_times = pm.Uniform('smoke_times_%d_%d'%(participants, days), lower = 0.0, upper = day_length, shape = max_events, testval = init) # Location of Events in Day
sr_times = pm.Potential('sr_times_%d_%d'%(participants, days), selfreport_mem(observed=sr, latent=smoke_times, dimon = smoke_length))
#%%
# Sample from posterior distribution
with model:
# posterior_samples = pm.sample(draws=5000, tune=5000, cores=1, target_accept=0.80)
posterior_samples = pm.sample(draws = 2000, tune=2000, init='adapt_diag', cores = 1)
#%%
# Calculate 95% credible interval
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
# Produce trace plots
pm.traceplot(posterior_samples)
# Collect results
collect_results = {'model':model,
'posterior_samples':posterior_samples,
'model_summary_logscale':model_summary_logscale}
#%%
# Remove variable from workspace
del model, posterior_samples, model_summary_logscale
#%%
###############################################################################
# Print results from all models
###############################################################################
import matplotlib.pyplot as plt
# Model 0
pm.traceplot(collect_results['posterior_samples'])
print(collect_results['model_summary_logscale'])
plt.figure(figsize=(4,8))
pm.forestplot(collect_results['posterior_samples'], var_names=['beta'], credible_interval=0.95)
pm.forestplot(collect_results['posterior_samples'], var_names=['beta_day'], credible_interval=0.95)
#pm.forestplot(collect_results['0']['posterior_samples'], var_names=['alpha'], credible_interval=0.95)
# %%
filename = os.path.join(os.path.realpath(dir_picklejar), 'rjmcmc_models')
outfile = open(filename, 'wb')
pickle.dump(collect_results, outfile)
outfile.close()
# %% REsidual code for safekeeping
# # Y_hat_latent = pm.Determinist(of Y_diff_latent)
# # Y_observed = pm.Potential('Y_observed', selfreport_mem(Y_hat_latent))
## Y_hat_observed is 'hours_since_start_day'
## Given hours_since_start_day, use smartdumbRJ.py to generate a new latent event times (Y_hat_latent)
## Given Y_hat_latent, take diff sequence and model as exponential holding times
# loglamb_observed = beta
# lamb_observed = np.exp(loglamb_observed)
# # Define Y_hat_latent
# # Take sequence of differences, Y_diff_latent
# Y_diff_latent = pm.Exponential('Y_diff_latent', lam = lamb_observed)
| 40.522222
| 179
| 0.612832
|
import pymc3 as pm
import arviz as az
import pandas as pd
import numpy as np
from datetime import datetime
from scipy import stats
import os
import pickle
from scipy import special
import theano.tensor as tt
s.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
| true
| true
|
7907f3583ea01d37420da656639526fb3fd56434
| 2,129
|
py
|
Python
|
src/probnum/filtsmooth/filtsmoothposterior.py
|
ralfrost/probnum
|
6b0988009a9dd7ecda87ba28c9d5c0b8019981b6
|
[
"MIT"
] | null | null | null |
src/probnum/filtsmooth/filtsmoothposterior.py
|
ralfrost/probnum
|
6b0988009a9dd7ecda87ba28c9d5c0b8019981b6
|
[
"MIT"
] | 2
|
2020-12-28T19:37:16.000Z
|
2020-12-28T19:37:31.000Z
|
src/probnum/filtsmooth/filtsmoothposterior.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | null | null | null |
"""Abstract Base Class for posteriors over states after applying filtering/smoothing"""
from abc import ABC, abstractmethod
class FiltSmoothPosterior(ABC):
"""Posterior Distribution over States after Filtering/Smoothing"""
@abstractmethod
def __call__(self, location):
"""Evaluate the time-continuous posterior for a given location
Parameters
----------
location : float
Location, or time, at which to evaluate the posterior.
Returns
-------
rv : `RandomVariable`
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Length of the discrete-time solution
Corresponds to the number of filtering/smoothing steps
"""
raise NotImplementedError
@abstractmethod
def __getitem__(self, idx):
"""Return the corresponding index/slice of the discrete-time solution"""
raise NotImplementedError
def sample(self, locations=None, size=()):
"""
Draw samples from the filtering/smoothing posterior.
If nothing is specified, a single sample is drawn (supported on self.locations).
If locations are specified, the samples are drawn on those locations.
If size is specified, more than a single sample is drawn.
Parameters
----------
locations : array_like, optional
Locations on which the samples are wanted. Default is none, which implies that
self.location is used.
size : int or tuple of ints, optional
Indicates how many samples are drawn. Default is an empty tuple, in which case
a single sample is returned.
Returns
-------
numpy.ndarray
Drawn samples. If size has shape (A1, ..., Z1), locations have shape (L,),
and the state space model has shape (A2, ..., Z2), the output has
shape (A1, ..., Z1, L, A2, ..., Z2).
For example: size=4, len(locations)=4, dim=3 gives shape (4, 4, 3).
"""
raise NotImplementedError("Sampling not implemented.")
| 33.793651
| 90
| 0.620479
|
from abc import ABC, abstractmethod
class FiltSmoothPosterior(ABC):
@abstractmethod
def __call__(self, location):
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self, idx):
raise NotImplementedError
def sample(self, locations=None, size=()):
raise NotImplementedError("Sampling not implemented.")
| true
| true
|
7907f3922b8472ef13adfa2259c4e2a7cd6c0a0f
| 77
|
py
|
Python
|
src/main/python/exceptions/BluetoothException.py
|
jjoyce0510/autonomous-shipping-vessel
|
6757ecd77ad6ef422223413c57f60278b88b543b
|
[
"MIT"
] | 1
|
2017-11-08T15:20:09.000Z
|
2017-11-08T15:20:09.000Z
|
src/main/python/exceptions/BluetoothException.py
|
jjoyce0510/autonomous-shipping-vessel
|
6757ecd77ad6ef422223413c57f60278b88b543b
|
[
"MIT"
] | null | null | null |
src/main/python/exceptions/BluetoothException.py
|
jjoyce0510/autonomous-shipping-vessel
|
6757ecd77ad6ef422223413c57f60278b88b543b
|
[
"MIT"
] | null | null | null |
# Defines a bluetooth exception
class BluetoothException(Exception):
pass
| 25.666667
| 36
| 0.805195
|
class BluetoothException(Exception):
pass
| true
| true
|
7907f44ecfc19c72ef0f9f60e37c7282e3451efc
| 8,091
|
py
|
Python
|
distributed/multi_lock.py
|
edyounis/distributed
|
bb091d5ec7d3ce4eb4a58e0957cba9cdf3da1d6a
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/multi_lock.py
|
edyounis/distributed
|
bb091d5ec7d3ce4eb4a58e0957cba9cdf3da1d6a
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/multi_lock.py
|
edyounis/distributed
|
bb091d5ec7d3ce4eb4a58e0957cba9cdf3da1d6a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict
from collections.abc import Hashable
from dask.utils import parse_timedelta
from distributed.client import Client
from distributed.utils import TimeoutError, log_errors
from distributed.worker import get_worker
logger = logging.getLogger(__name__)
class MultiLockExtension:
"""An extension for the scheduler to manage MultiLocks
This adds the following routes to the scheduler
* multi_lock_acquire
* multi_lock_release
The approach is to maintain `self.locks` that maps a lock (unique name given to
`MultiLock(names=, ...)` at creation) to a list of users (instances of `MultiLock`)
that "requests" the lock. Additionally, `self.requests` maps a user to its requested
locks and `self.requests_left` maps a user to the number of locks still need.
Every time a user `x` gets to the front in `self.locks[name] = [x, ...]` it means
that `x` now holds the lock `name` and when it holds all the requested locks
`acquire()` can return.
Finally, `self.events` contains all the events users are waiting on to finish.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list) # lock -> users
self.requests = {} # user -> locks
self.requests_left = {} # user -> locks still needed
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
"""Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
"""
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1: # The lock was free
self.requests_left[id] -= 1
if self.requests_left[id] == 0: # Got all locks needed
# Since we got all locks need, we can remove the rest of the requests
self.requests[id] -= set(locks[i + 1 :])
return True
return False
def _refain_locks(self, locks, id):
"""Cancel/release previously requested/acquired locks
Parameters
----------
locks: List[str]
Names of the locks to refain.
id: Hashable
Identifier of the `MultiLock` instance refraining the locks.
"""
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
# Notice, `self.requests_left[new_first]` might go below zero
# if more locks are freed than requested.
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
with log_errors():
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
# At this point `id` acquired all `locks`
assert self.requests_left[id] == 0
return True
def release(self, id=None):
with log_errors():
self._refain_locks(self.requests[id], id)
class MultiLock:
"""Distributed Centralized Lock
Parameters
----------
names: List[str]
Names of the locks to acquire. Choosing the same name allows two
disconnected processes to coordinate a lock.
client: Client (optional)
Client to use for communication with the scheduler. If not given, the
default global client will be used.
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout=1) # doctest: +SKIP
>>> # do things with protected resource 'x' and 'y'
>>> lock.release() # doctest: +SKIP
"""
def __init__(self, names=[], client=None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.names = names
self.id = uuid.uuid4().hex
self._locked = False
def acquire(self, blocking=True, timeout=None, num_locks=None):
"""Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
"""
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
def release(self):
"""Release the lock if already acquired"""
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args, **kwargs):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args, **kwargs):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
| 33.995798
| 89
| 0.582252
|
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict
from collections.abc import Hashable
from dask.utils import parse_timedelta
from distributed.client import Client
from distributed.utils import TimeoutError, log_errors
from distributed.worker import get_worker
logger = logging.getLogger(__name__)
class MultiLockExtension:
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list)
self.requests = {}
self.requests_left = {}
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1:
self.requests_left[id] -= 1
if self.requests_left[id] == 0:
self.requests[id] -= set(locks[i + 1 :])
return True
return False
def _refain_locks(self, locks, id):
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
with log_errors():
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
assert self.requests_left[id] == 0
return True
def release(self, id=None):
with log_errors():
self._refain_locks(self.requests[id], id)
class MultiLock:
def __init__(self, names=[], client=None):
try:
self.client = client or Client.current()
except ValueError:
self.client = get_worker().client
self.names = names
self.id = uuid.uuid4().hex
self._locked = False
def acquire(self, blocking=True, timeout=None, num_locks=None):
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
def release(self):
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args, **kwargs):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args, **kwargs):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
| true
| true
|
7907f4644ab1640bc7cc59c4b8f7b8282a59b140
| 755
|
py
|
Python
|
scripts/knn_voronoi_plot.py
|
Drishttii/pyprobml
|
30b120e7d4f81ade55c10250193d98398040574b
|
[
"MIT"
] | null | null | null |
scripts/knn_voronoi_plot.py
|
Drishttii/pyprobml
|
30b120e7d4f81ade55c10250193d98398040574b
|
[
"MIT"
] | null | null | null |
scripts/knn_voronoi_plot.py
|
Drishttii/pyprobml
|
30b120e7d4f81ade55c10250193d98398040574b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from scipy.spatial import KDTree, Voronoi, voronoi_plot_2d
np.random.seed(42)
data = np.random.rand(25, 2)
vor = Voronoi(data)
print('Using scipy.spatial.voronoi_plot_2d, wait...')
voronoi_plot_2d(vor)
xlim = plt.xlim()
ylim = plt.ylim()
pml.savefig('knnVoronoiMesh.pdf')
plt.show()
print('Using scipy.spatial.KDTree, wait a few seconds...')
plt.figure()
tree = KDTree(data)
x = np.linspace(xlim[0], xlim[1], 200)
y = np.linspace(ylim[0], ylim[1], 200)
xx, yy = np.meshgrid(x, y)
xy = np.c_[xx.ravel(), yy.ravel()]
plt.plot(data[:, 0], data[:, 1], 'ko')
plt.pcolormesh(x, y, tree.query(xy)[1].reshape(200, 200), cmap='jet')
pml.savefig('knnVoronoiColor.pdf')
plt.show()
| 25.166667
| 69
| 0.701987
|
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from scipy.spatial import KDTree, Voronoi, voronoi_plot_2d
np.random.seed(42)
data = np.random.rand(25, 2)
vor = Voronoi(data)
print('Using scipy.spatial.voronoi_plot_2d, wait...')
voronoi_plot_2d(vor)
xlim = plt.xlim()
ylim = plt.ylim()
pml.savefig('knnVoronoiMesh.pdf')
plt.show()
print('Using scipy.spatial.KDTree, wait a few seconds...')
plt.figure()
tree = KDTree(data)
x = np.linspace(xlim[0], xlim[1], 200)
y = np.linspace(ylim[0], ylim[1], 200)
xx, yy = np.meshgrid(x, y)
xy = np.c_[xx.ravel(), yy.ravel()]
plt.plot(data[:, 0], data[:, 1], 'ko')
plt.pcolormesh(x, y, tree.query(xy)[1].reshape(200, 200), cmap='jet')
pml.savefig('knnVoronoiColor.pdf')
plt.show()
| true
| true
|
7907f7b28f08f68ba5e1078d40db835496770b86
| 21,749
|
py
|
Python
|
tcex/bin/validate.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
tcex/bin/validate.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
tcex/bin/validate.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
| 39.400362
| 98
| 0.542186
|
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
import colorama as c
from pydantic import ValidationError
from stdlib_list import stdlib_list
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
import sqlite3
except ModuleNotFoundError:
pass
class Validate(BinABC):
def __init__(self, ignore_validation: bool) -> None:
super().__init__()
self.ignore_validation = ignore_validation
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft()
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
ind_spec(module)
found = find_spec is not None
if found is True:
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
return 'passed' if status else 'failed'
| true
| true
|
7907f7bfa4aa7da93fcd44748d4064e05c159089
| 4,298
|
py
|
Python
|
tests/functional/test_objects_issues.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_objects_issues.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_objects_issues.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allure
import coreapi
import pytest
from adcm_client.base import ActionHasIssues
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin import utils
from tests.library.errorcodes import UPGRADE_ERROR
def test_action_should_not_be_run_while_cluster_has_an_issue(sdk_client_fs: ADCMClient):
bundle_path = utils.get_data_dir(__file__, "cluster")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
cluster = bundle.cluster_create(name=utils.random_string())
with allure.step(f"Run action with error for cluster {cluster.name}"):
with pytest.raises(ActionHasIssues):
cluster.action(name="install").run()
def test_action_should_not_be_run_while_host_has_an_issue(sdk_client_fs: ADCMClient):
bundle_path = utils.get_data_dir(__file__, "host")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
provider = bundle.provider_create(name=utils.random_string())
host = provider.host_create(fqdn=utils.random_string())
with allure.step(f"Run action with error for host {host.fqdn}"):
with pytest.raises(ActionHasIssues):
host.action(name="install").run()
def test_action_should_not_be_run_while_hostprovider_has_an_issue(
sdk_client_fs: ADCMClient,
):
bundle_path = utils.get_data_dir(__file__, "provider")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
provider = bundle.provider_create(name=utils.random_string())
with allure.step(f"Run action with error for provider {provider.name}"):
with pytest.raises(ActionHasIssues):
provider.action(name="install").run()
def test_when_cluster_has_issue_than_upgrade_locked(sdk_client_fs: ADCMClient):
with allure.step("Create cluster and upload new one bundle"):
old_bundle_path = utils.get_data_dir(__file__, "cluster")
new_bundle_path = utils.get_data_dir(__file__, "upgrade", "cluster")
old_bundle = sdk_client_fs.upload_from_fs(old_bundle_path)
cluster = old_bundle.cluster_create(name=utils.random_string())
sdk_client_fs.upload_from_fs(new_bundle_path)
with allure.step("Upgrade cluster"):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
cluster.upgrade().do()
with allure.step("Check if cluster has issues"):
UPGRADE_ERROR.equal(e, "cluster ", " has issue: ")
def test_when_hostprovider_has_issue_than_upgrade_locked(sdk_client_fs: ADCMClient):
with allure.step("Create hostprovider"):
old_bundle_path = utils.get_data_dir(__file__, "provider")
new_bundle_path = utils.get_data_dir(__file__, "upgrade", "provider")
old_bundle = sdk_client_fs.upload_from_fs(old_bundle_path)
provider = old_bundle.provider_create(name=utils.random_string())
sdk_client_fs.upload_from_fs(new_bundle_path)
with allure.step("Upgrade provider"):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
provider.upgrade().do()
with allure.step("Check if upgrade locked"):
UPGRADE_ERROR.equal(e)
@allure.link("https://jira.arenadata.io/browse/ADCM-487")
def test_when_component_has_no_constraint_then_cluster_doesnt_have_issues(
sdk_client_fs: ADCMClient,
):
with allure.step("Create cluster (component has no constraint)"):
bundle_path = utils.get_data_dir(__file__, "cluster_component_hasnt_constraint")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
cluster = bundle.cluster_create(name=utils.random_string())
cluster.service_add()
with allure.step("Run action: lock cluster"):
cluster.action(name="lock-cluster").run().try_wait()
with allure.step("Check if state is always-locked"):
cluster.reread()
assert cluster.state == "always-locked"
| 45.242105
| 88
| 0.746626
|
import allure
import coreapi
import pytest
from adcm_client.base import ActionHasIssues
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin import utils
from tests.library.errorcodes import UPGRADE_ERROR
def test_action_should_not_be_run_while_cluster_has_an_issue(sdk_client_fs: ADCMClient):
bundle_path = utils.get_data_dir(__file__, "cluster")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
cluster = bundle.cluster_create(name=utils.random_string())
with allure.step(f"Run action with error for cluster {cluster.name}"):
with pytest.raises(ActionHasIssues):
cluster.action(name="install").run()
def test_action_should_not_be_run_while_host_has_an_issue(sdk_client_fs: ADCMClient):
bundle_path = utils.get_data_dir(__file__, "host")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
provider = bundle.provider_create(name=utils.random_string())
host = provider.host_create(fqdn=utils.random_string())
with allure.step(f"Run action with error for host {host.fqdn}"):
with pytest.raises(ActionHasIssues):
host.action(name="install").run()
def test_action_should_not_be_run_while_hostprovider_has_an_issue(
sdk_client_fs: ADCMClient,
):
bundle_path = utils.get_data_dir(__file__, "provider")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
provider = bundle.provider_create(name=utils.random_string())
with allure.step(f"Run action with error for provider {provider.name}"):
with pytest.raises(ActionHasIssues):
provider.action(name="install").run()
def test_when_cluster_has_issue_than_upgrade_locked(sdk_client_fs: ADCMClient):
with allure.step("Create cluster and upload new one bundle"):
old_bundle_path = utils.get_data_dir(__file__, "cluster")
new_bundle_path = utils.get_data_dir(__file__, "upgrade", "cluster")
old_bundle = sdk_client_fs.upload_from_fs(old_bundle_path)
cluster = old_bundle.cluster_create(name=utils.random_string())
sdk_client_fs.upload_from_fs(new_bundle_path)
with allure.step("Upgrade cluster"):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
cluster.upgrade().do()
with allure.step("Check if cluster has issues"):
UPGRADE_ERROR.equal(e, "cluster ", " has issue: ")
def test_when_hostprovider_has_issue_than_upgrade_locked(sdk_client_fs: ADCMClient):
with allure.step("Create hostprovider"):
old_bundle_path = utils.get_data_dir(__file__, "provider")
new_bundle_path = utils.get_data_dir(__file__, "upgrade", "provider")
old_bundle = sdk_client_fs.upload_from_fs(old_bundle_path)
provider = old_bundle.provider_create(name=utils.random_string())
sdk_client_fs.upload_from_fs(new_bundle_path)
with allure.step("Upgrade provider"):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
provider.upgrade().do()
with allure.step("Check if upgrade locked"):
UPGRADE_ERROR.equal(e)
@allure.link("https://jira.arenadata.io/browse/ADCM-487")
def test_when_component_has_no_constraint_then_cluster_doesnt_have_issues(
sdk_client_fs: ADCMClient,
):
with allure.step("Create cluster (component has no constraint)"):
bundle_path = utils.get_data_dir(__file__, "cluster_component_hasnt_constraint")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
cluster = bundle.cluster_create(name=utils.random_string())
cluster.service_add()
with allure.step("Run action: lock cluster"):
cluster.action(name="lock-cluster").run().try_wait()
with allure.step("Check if state is always-locked"):
cluster.reread()
assert cluster.state == "always-locked"
| true
| true
|
7907f80ed26ea8ab1b224f9116d262ea3518a9ec
| 6,875
|
py
|
Python
|
exampledoc/docs/Extractor.py
|
sofiapasquini/Code-Astro-Group-23-Project
|
97dcbaf1b04822d56582e51332666dc5045e1154
|
[
"MIT"
] | null | null | null |
exampledoc/docs/Extractor.py
|
sofiapasquini/Code-Astro-Group-23-Project
|
97dcbaf1b04822d56582e51332666dc5045e1154
|
[
"MIT"
] | null | null | null |
exampledoc/docs/Extractor.py
|
sofiapasquini/Code-Astro-Group-23-Project
|
97dcbaf1b04822d56582e51332666dc5045e1154
|
[
"MIT"
] | null | null | null |
#define functions that will extract the data from SDSS based on an input RA/DEC
from astroquery.sdss import SDSS
from astropy import coordinates as coords
import pandas as pd
from astroquery.ned import Ned
import matplotlib.pyplot as plt
from astropy.convolution import convolve, Box1DKernel
import numpy as np
from astropy import units as u
def ra_dec_format(val):
""" Ra/Dec string formatting
Converts the input string format of a right ascension/ declination coordinate
to one recognizable by astroquery
Args:
val (str): string; an ra/dec expression formatted as "005313.81 +130955.0".
Returns:
string: the ra/dec coordinates re-formatted as "00h53m13.81s +13d09m55.0s"
"""
#ra
hour = val[0:2]
min_ = val[2:4]
sec = val[4:9]
ra = hour+'h'+min_+'m'+sec+'s'
#dec
deg = val[9:13]
min_d = val[13:15]
sec_d = val[15:]
dec = deg+'d'+min_d+'m'+sec_d+'s'
return ra+" "+dec
def extractor(position):
"""
This function extracts the information from the SDSS database and returns
a pandas dataframe with the query region. Please ensure that the 'position'
input is formatted as '005313.81 +130955.0
extractor(str) --> pd.DataFrame
"""
# convert the input position argument to the format recognized by astroquery.SDSS
# position=ra_dec_format(position)
# query the region and get the data
position = ra_dec_format(position)
pos = coords.SkyCoord(position, frame='icrs')
data = SDSS.query_region(pos, spectro=True)
return data.to_pandas()
def downloader(data):
"""
This function uses extracted information in order to dwonaload spectra,
separating the data from th SDSS and BOSS.
downloader(pd.Dataframe) --> [list(fits)]
"""
#create a empty list
spec_list=[]
# iteration over the pandas
for i in range(len(data)):
results = SDSS.query_specobj(plate = data['plate'][i],
mjd = data['mjd'][i],
fiberID = data['fiberID'][i])
# try if it can download the data (SDSS)
try:
spec = SDSS.get_spectra(matches=results)[0]
spec_list.append(spec)
# if it cant download, is because is from (BOSS)
except:
results.remove_column("instrument")
results.add_column(name="instrument", col="eboss") # replace the instrument column
spec = SDSS.get_spectra(matches=results)[0]
spec_list.append(spec)
return spec_list
# test=downloader(result)
# print(test)
# define a function which grabs the object's redshift from the Ned database (better calibration)- needed for plotting in the object's rest-frame
def redshift(position):
# make sure to format the input position argument such that it is recognizable by astroquery.Ned
# position=ra_dec_format(position)
position = ra_dec_format(position)
pos=coords.SkyCoord(position, frame='icrs') # create a position object
ned_results=Ned.query_region(pos,equinox="J2000", radius=2*u.arcsecond) # query the database
z=ned_results[0][6] # grab the redshift value from the query results
return z
# define a function that transforms an objects wavelength array into the object's rest-frame
def redshift_correct(z, wavelengths): # takes as input the redshift and the array of wavelengths
wavelengths_corrected = wavelengths/(z+1)
return wavelengths_corrected
# define a function that transforms the results of downloader() into an array of data which will be plotted
def transform_data(spec_list, z): # takes as input a list of (I think?) fits files results and the redshift of the object
# iterate over each file and grab the important data
#fluxes={} # containers for each of the data arrays to be plotted ( will be lists of lists/arrays)
#wavelengths={}
#inverse_variances={} # <- dictionaries!
dict={}
for spec in spec_list:
flux_array=[]
wavelength_array=[]
sigma_array=[]
data=spec[1].data # this is the data part of the file
#print(data.shape[0])
#print(data)
# store the appropriate columns in the designated containers- each row is a single spectrum?
# SOFIA- try a nested dictionary?!?!
for j in range(data.shape[0]):
#print(data[j][0])
#smoothedFlux=convolve(data[0],Box1DKernel(9)) # smooth the fluxes using a boxcar
#print(smoothedFlux)
flux_data = data[j][0]
flux_array.append(flux_data)
wavelengths_uncorrected=10**data[j][1] # the wavelengths (transformed from the log scale)
#print(wavelengths_uncorrected)
wavelengths_corrected=redshift_correct(z, wavelengths_uncorrected) # save the wavelengths after they have been scaled to the rest-frame
#print(wavelengths_corrected)
wavelength_array.append(wavelengths_corrected)
inverse_variance=data[j][2] # the inverse variance of the flux
one_over_sigma=inverse_variance**0.5
sigma=1/one_over_sigma # the one-sigma uncertainty associated with the flux array
sigma_array.append(sigma)
smoothedFlux = convolve(flux_array,Box1DKernel(9))
if 'flux' in dict:
dict['flux'].append([smoothedFlux])
else:
dict['flux'] = [smoothedFlux]
if 'wavelength' in dict:
dict['wavelength'].append([wavelength_array])
else:
dict['wavelength'] = [wavelength_array]
if '1sigma' in dict:
dict['1sigma'].append([sigma_array])
else:
dict['1sigma'] = [sigma_array]
# now return the nested dictionary with three keys:(flux, wavelength and sigma)
# each key should have data.shape[0] number of arrays with all fluxes, wavelength and sigmas for every spec in spec_list
return dict
def plot_spec(dict, radec, z): # takes as input the dictionary holding the data, the radec, and the redshift
for i in range(len(dict['wavelength'])):
#extract data
wavelength = dict['wavelength'][i]
sigma = dict['1sigma'][i]
flux = dict['flux'][i]
# instantiate a figure object
fig=plt.figure()
plt.title(str(radec)+str('; ')+'z={}'.format(z))
plt.xlabel("Rest-frame Wavelength [$\AA$]")
plt.ylabel("Flux [$10^{-17}$ erg$^{-1}$s$^{-1}$cm$^{-2}$$\AA^{-1}$]")
plt.plot(wavelength, flux) # plot the actual data
# now create upper and lower bounds on the uncertainty regions
sigmaUpper=np.add(flux,sigma)
sigmaLower=np.subtract(flux,sigma)
plt.fill_between(wavelength, sigmaLower, sigmaUpper, color='grey', alpha=0.5)
plt.show()
#TEST
radec='223812.39 +213203.4'
z=redshift(radec)
data=extractor(radec)
spec_list=downloader(data)
dic = transform_data(spec_list,z)
plot_spec(dic, radec, z)
| 34.722222
| 147
| 0.666764
|
from astroquery.sdss import SDSS
from astropy import coordinates as coords
import pandas as pd
from astroquery.ned import Ned
import matplotlib.pyplot as plt
from astropy.convolution import convolve, Box1DKernel
import numpy as np
from astropy import units as u
def ra_dec_format(val):
hour = val[0:2]
min_ = val[2:4]
sec = val[4:9]
ra = hour+'h'+min_+'m'+sec+'s'
deg = val[9:13]
min_d = val[13:15]
sec_d = val[15:]
dec = deg+'d'+min_d+'m'+sec_d+'s'
return ra+" "+dec
def extractor(position):
position = ra_dec_format(position)
pos = coords.SkyCoord(position, frame='icrs')
data = SDSS.query_region(pos, spectro=True)
return data.to_pandas()
def downloader(data):
spec_list=[]
for i in range(len(data)):
results = SDSS.query_specobj(plate = data['plate'][i],
mjd = data['mjd'][i],
fiberID = data['fiberID'][i])
try:
spec = SDSS.get_spectra(matches=results)[0]
spec_list.append(spec)
except:
results.remove_column("instrument")
results.add_column(name="instrument", col="eboss")
spec = SDSS.get_spectra(matches=results)[0]
spec_list.append(spec)
return spec_list
def redshift(position):
position = ra_dec_format(position)
pos=coords.SkyCoord(position, frame='icrs')
ned_results=Ned.query_region(pos,equinox="J2000", radius=2*u.arcsecond)
z=ned_results[0][6]
return z
def redshift_correct(z, wavelengths): # takes as input the redshift and the array of wavelengths
wavelengths_corrected = wavelengths/(z+1)
return wavelengths_corrected
# define a function that transforms the results of downloader() into an array of data which will be plotted
def transform_data(spec_list, z): # takes as input a list of (I think?) fits files results and the redshift of the object
# iterate over each file and grab the important data
#fluxes={} # containers for each of the data arrays to be plotted ( will be lists of lists/arrays)
#wavelengths={}
#inverse_variances={} # <- dictionaries!
dict={}
for spec in spec_list:
flux_array=[]
wavelength_array=[]
sigma_array=[]
data=spec[1].data # this is the data part of the file
#print(data.shape[0])
#print(data)
# store the appropriate columns in the designated containers- each row is a single spectrum?
# SOFIA- try a nested dictionary?!?!
for j in range(data.shape[0]):
#print(data[j][0])
#smoothedFlux=convolve(data[0],Box1DKernel(9)) # smooth the fluxes using a boxcar
#print(smoothedFlux)
flux_data = data[j][0]
flux_array.append(flux_data)
wavelengths_uncorrected=10**data[j][1] # the wavelengths (transformed from the log scale)
#print(wavelengths_uncorrected)
wavelengths_corrected=redshift_correct(z, wavelengths_uncorrected) # save the wavelengths after they have been scaled to the rest-frame
#print(wavelengths_corrected)
wavelength_array.append(wavelengths_corrected)
inverse_variance=data[j][2] # the inverse variance of the flux
one_over_sigma=inverse_variance**0.5
sigma=1/one_over_sigma # the one-sigma uncertainty associated with the flux array
sigma_array.append(sigma)
smoothedFlux = convolve(flux_array,Box1DKernel(9))
if 'flux' in dict:
dict['flux'].append([smoothedFlux])
else:
dict['flux'] = [smoothedFlux]
if 'wavelength' in dict:
dict['wavelength'].append([wavelength_array])
else:
dict['wavelength'] = [wavelength_array]
if '1sigma' in dict:
dict['1sigma'].append([sigma_array])
else:
dict['1sigma'] = [sigma_array]
# now return the nested dictionary with three keys:(flux, wavelength and sigma)
# each key should have data.shape[0] number of arrays with all fluxes, wavelength and sigmas for every spec in spec_list
return dict
def plot_spec(dict, radec, z): # takes as input the dictionary holding the data, the radec, and the redshift
for i in range(len(dict['wavelength'])):
#extract data
wavelength = dict['wavelength'][i]
sigma = dict['1sigma'][i]
flux = dict['flux'][i]
# instantiate a figure object
fig=plt.figure()
plt.title(str(radec)+str('; ')+'z={}'.format(z))
plt.xlabel("Rest-frame Wavelength [$\AA$]")
plt.ylabel("Flux [$10^{-17}$ erg$^{-1}$s$^{-1}$cm$^{-2}$$\AA^{-1}$]")
plt.plot(wavelength, flux) # plot the actual data
# now create upper and lower bounds on the uncertainty regions
sigmaUpper=np.add(flux,sigma)
sigmaLower=np.subtract(flux,sigma)
plt.fill_between(wavelength, sigmaLower, sigmaUpper, color='grey', alpha=0.5)
plt.show()
#TEST
radec='223812.39 +213203.4'
z=redshift(radec)
data=extractor(radec)
spec_list=downloader(data)
dic = transform_data(spec_list,z)
plot_spec(dic, radec, z)
| true
| true
|
7907f819f2647b99f87d9ca4a74578c3d905cc76
| 26,149
|
py
|
Python
|
jsonpickle/pickler.py
|
cclauss/jsonpickle
|
18c353f7581698a1056a1ad234b1486ffd51758c
|
[
"BSD-3-Clause"
] | null | null | null |
jsonpickle/pickler.py
|
cclauss/jsonpickle
|
18c353f7581698a1056a1ad234b1486ffd51758c
|
[
"BSD-3-Clause"
] | null | null | null |
jsonpickle/pickler.py
|
cclauss/jsonpickle
|
18c353f7581698a1056a1ad234b1486ffd51758c
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
"""
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
# Whether to allow decimals to pass-through
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
# ignore exceptions
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace."""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
#########################################
# if obj is nonrecursive return immediately
# for performance reasons we don't want to do recursive checks
if PY2 and isinstance(obj, types.FileType):
return self._flatten_file(obj)
if util.is_bytes(obj):
return self._flatten_bytestring(obj)
if util.is_primitive(obj):
return obj
# Decimal is a primitive when use_decimal is True
if self._use_decimal and isinstance(obj, decimal.Decimal):
return obj
#########################################
self._push()
return self._pop(self._flatten_obj(obj))
def _max_reached(self):
return self._depth == self._max_depth
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._max_reached()
try:
in_cycle = _in_cycle(obj, self._objs, max_reached, self.make_refs)
if in_cycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new"""
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict"""
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__."""
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
"""Flatten only non-string key/value pairs"""
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
"""Flatten string key/value pairs only."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
"""Detect cyclic structures that would lead to infinite recursion"""
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)"""
if isinstance(string, string_types):
return (string,)
return string
| 35.005355
| 88
| 0.575548
|
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
self._depth = -1
self._max_depth = max_depth
self._objs = {}
self._seen = []
self._max_iter = max_iter
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
self._depth += 1
def _pop(self, value):
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
is_new = self._log_ref(obj)
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
ycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
return self._flatten_obj_instance(obj)
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
return self._flatten(curmod)
except KeyError:
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
if isinstance(string, string_types):
return (string,)
return string
| true
| true
|
7907f92d955db9bf68a50b78eaf8174c038b395e
| 514
|
py
|
Python
|
tools/LevelCreator/ezTypes.py
|
Kronifer/cj8-repo
|
6d0f4a45b16ea184bc429f7e7b10b752595ea65e
|
[
"MIT"
] | 1
|
2021-07-09T17:23:34.000Z
|
2021-07-09T17:23:34.000Z
|
tools/LevelCreator/ezTypes.py
|
Kronifer/cj8-repo
|
6d0f4a45b16ea184bc429f7e7b10b752595ea65e
|
[
"MIT"
] | 3
|
2021-07-18T15:03:49.000Z
|
2021-07-18T15:04:11.000Z
|
tools/LevelCreator/ezTypes.py
|
Kronifer/cj8-repo
|
6d0f4a45b16ea184bc429f7e7b10b752595ea65e
|
[
"MIT"
] | null | null | null |
# List the type colors for the editor
AIR = (0, 0, 0)
GRASS = (100, 200, 40)
ROCK = (106, 106, 106)
LAVA = (252, 144, 3)
WATER = (0, 0, 255)
PLAYER = (155, 191, 250)
PLAYER_END = (40, 30, 100)
SPIKE_UP = (204, 24, 24)
SPIKE_DOWN = (166, 8, 8)
# List all the used types
types = ['GRASS', 'ROCK', 'LAVA', 'WATER', 'PLAYER', 'SPIKE_UP', 'SPIKE_DOWN', 'PLAYER_END', 'AIR']
colorTypes = [GRASS, ROCK, LAVA, WATER, PLAYER, SPIKE_UP, SPIKE_DOWN, PLAYER_END, AIR]
# Set default type
select = 'GRASS'
colorSelect = GRASS
| 27.052632
| 99
| 0.640078
|
AIR = (0, 0, 0)
GRASS = (100, 200, 40)
ROCK = (106, 106, 106)
LAVA = (252, 144, 3)
WATER = (0, 0, 255)
PLAYER = (155, 191, 250)
PLAYER_END = (40, 30, 100)
SPIKE_UP = (204, 24, 24)
SPIKE_DOWN = (166, 8, 8)
types = ['GRASS', 'ROCK', 'LAVA', 'WATER', 'PLAYER', 'SPIKE_UP', 'SPIKE_DOWN', 'PLAYER_END', 'AIR']
colorTypes = [GRASS, ROCK, LAVA, WATER, PLAYER, SPIKE_UP, SPIKE_DOWN, PLAYER_END, AIR]
select = 'GRASS'
colorSelect = GRASS
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.