hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a17b17f55cc70462f3c6da0e9a412dda1d90844
| 617
|
py
|
Python
|
datasource_service/tests/test_main.py
|
airavata-courses/DCoders
|
cbe19a286f7e28abb031d0fa7f3576275b8999e6
|
[
"Apache-2.0"
] | 1
|
2022-02-03T08:41:39.000Z
|
2022-02-03T08:41:39.000Z
|
datasource_service/tests/test_main.py
|
airavata-courses/DCoders
|
cbe19a286f7e28abb031d0fa7f3576275b8999e6
|
[
"Apache-2.0"
] | 23
|
2022-01-24T04:51:36.000Z
|
2022-03-08T19:58:17.000Z
|
datasource_service/tests/test_main.py
|
airavata-courses/DCoders
|
cbe19a286f7e28abb031d0fa7f3576275b8999e6
|
[
"Apache-2.0"
] | null | null | null |
""" Pytest for the GET api { Mock the nexrad object so that it doesn't download the radar object for the
unit testing } """
from fastapi.testclient import TestClient
from datasource_service.main import app
test_client = TestClient(app)
def test_nexrad_data():
""" Test GET api """
response = test_client.get('/api/v1/2013/05/31/KTLX')
assert response.status_code == 200
def test_nexrad_error():
""" Test wrong station input """
response = test_client.get('/api/v1/2013/05/31/KTL')
assert response.status_code == 404
assert response.text == '{"detail":"Radar station is not found"}'
| 28.045455
| 104
| 0.705024
|
4a17b350911884888d6f9fc9266cdac127ef7259
| 2,109
|
py
|
Python
|
datasets/schema.py
|
inyukwo1/qgm_decoder
|
70e60afec140ec3e2ee04f980a384e1cf28d761c
|
[
"MIT"
] | null | null | null |
datasets/schema.py
|
inyukwo1/qgm_decoder
|
70e60afec140ec3e2ee04f980a384e1cf28d761c
|
[
"MIT"
] | null | null | null |
datasets/schema.py
|
inyukwo1/qgm_decoder
|
70e60afec140ec3e2ee04f980a384e1cf28d761c
|
[
"MIT"
] | null | null | null |
import random
class Schema:
def __init__(self):
self.db_id = ""
self._table_names = dict()
self._table_names_original = dict()
self._col_names = dict()
self._col_names_original = dict()
self._col_parent = dict()
self._foreign_primary_pairs = []
self._primary_keys = []
def import_from_spider(self, spider_schema):
self.db_id = spider_schema["db_id"]
for tab_num, tab_name in enumerate(spider_schema["table_names"]):
self._table_names[tab_num] = tab_name
for tab_num, tab_name_original in enumerate(spider_schema["table_names_original"]):
self._table_names_original[tab_num] = tab_name_original
for col_num, (par_tab, col_name) in enumerate(spider_schema["column_names"]):
if par_tab != -1:
self._col_names[col_num] = col_name
self._col_parent[col_num] = par_tab
for col_num, (par_tab, col_name_original) in enumerate(spider_schema["column_names_original"]):
if par_tab != -1:
self._col_names_original[col_num] = col_name_original
self._foreign_primary_pairs = spider_schema["foreign_keys"]
self._primary_keys = spider_schema["primary_keys"]
def get_parent_table_id(self, col_id):
return self._col_parent[col_id]
def get_random_table_id(self):
table_id_list = list(self._table_names)
return random.choice(table_id_list)
def get_foreign_primary_pairs(self):
return self._foreign_primary_pairs
def is_primary_key(self, col_id):
return col_id in self._primary_keys
def get_all_table_ids(self):
return list(self._table_names)
def get_table_name(self, table_id):
return self._table_names[table_id]
def get_col_name(self, col_id):
return self._col_names[col_id]
def get_child_col_ids(self, table_id):
col_ids = []
for col_id in self._col_parent:
if self.get_parent_table_id(col_id) == table_id:
col_ids.append(col_id)
return col_ids
| 36.362069
| 103
| 0.662399
|
4a17b3633e7e3ec4bdd78d0cbf186945c8f45e7c
| 14,627
|
py
|
Python
|
qriscloud/_vendor/ldap3/utils/config.py
|
UQ-RCC/uq-globus-tools
|
a5191cd223b841fd404eddc90402947247b6504f
|
[
"Apache-2.0"
] | null | null | null |
qriscloud/_vendor/ldap3/utils/config.py
|
UQ-RCC/uq-globus-tools
|
a5191cd223b841fd404eddc90402947247b6504f
|
[
"Apache-2.0"
] | null | null | null |
qriscloud/_vendor/ldap3/utils/config.py
|
UQ-RCC/uq-globus-tools
|
a5191cd223b841fd404eddc90402947247b6504f
|
[
"Apache-2.0"
] | null | null | null |
"""
"""
# Created on 2016.08.31
#
# Author: Giovanni Cannata
#
# Copyright 2013 - 2020 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from sys import stdin, getdefaultencoding
from .. import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, NO_ATTRIBUTES, SEQUENCE_TYPES
from ..core.exceptions import LDAPConfigurationParameterError
# checks
_CLASSES_EXCLUDED_FROM_CHECK = ['subschema']
_ATTRIBUTES_EXCLUDED_FROM_CHECK = [ALL_ATTRIBUTES,
ALL_OPERATIONAL_ATTRIBUTES,
NO_ATTRIBUTES,
'ldapSyntaxes',
'matchingRules',
'matchingRuleUse',
'dITContentRules',
'dITStructureRules',
'nameForms',
'altServer',
'namingContexts',
'supportedControl',
'supportedExtension',
'supportedFeatures',
'supportedCapabilities',
'supportedLdapVersion',
'supportedSASLMechanisms',
'vendorName',
'vendorVersion',
'subschemaSubentry',
'ACL']
_UTF8_ENCODED_SYNTAXES = ['1.2.840.113556.1.4.904', # DN String [MICROSOFT]
'1.2.840.113556.1.4.1362', # String (Case) [MICROSOFT]
'1.3.6.1.4.1.1466.115.121.1.12', # DN String [RFC4517]
'1.3.6.1.4.1.1466.115.121.1.15', # Directory String [RFC4517]
'1.3.6.1.4.1.1466.115.121.1.41', # Postal Address) [RFC4517]
'1.3.6.1.4.1.1466.115.121.1.58', # Substring Assertion [RFC4517]
'2.16.840.1.113719.1.1.5.1.6', # Case Ignore List [NOVELL]
'2.16.840.1.113719.1.1.5.1.14', # Tagged String [NOVELL]
'2.16.840.1.113719.1.1.5.1.15', # Tagged Name and String [NOVELL]
'2.16.840.1.113719.1.1.5.1.23', # Tagged Name [NOVELL]
'2.16.840.1.113719.1.1.5.1.25'] # Typed Name [NOVELL]
_UTF8_ENCODED_TYPES = []
_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF = ['msds-memberOfTransitive', 'msds-memberTransitive', 'entryDN']
_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF = ['instanceType', 'nTSecurityDescriptor', 'objectCategory']
_CASE_INSENSITIVE_ATTRIBUTE_NAMES = True
_CASE_INSENSITIVE_SCHEMA_NAMES = True
# abstraction layer
_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX = 'OA_'
# communication
_POOLING_LOOP_TIMEOUT = 10 # number of seconds to wait before restarting a cycle to find an active server in the pool
_RESPONSE_SLEEPTIME = 0.05 # seconds to wait while waiting for a response in asynchronous strategies
_RESPONSE_WAITING_TIMEOUT = 3 # waiting timeout for receiving a response in asynchronous strategies
_SOCKET_SIZE = 4096 # socket byte size
_CHECK_AVAILABILITY_TIMEOUT = 2.5 # default timeout for socket connect when checking availability
_RESET_AVAILABILITY_TIMEOUT = 5 # default timeout for resetting the availability status when checking candidate addresses
_RESTARTABLE_SLEEPTIME = 2 # time to wait in a restartable strategy before retrying the request
_RESTARTABLE_TRIES = 30 # number of times to retry in a restartable strategy before giving up. Set to True for unlimited retries
_REUSABLE_THREADED_POOL_SIZE = 5
_REUSABLE_THREADED_LIFETIME = 3600 # 1 hour
_DEFAULT_THREADED_POOL_NAME = 'REUSABLE_DEFAULT_POOL'
_ADDRESS_INFO_REFRESH_TIME = 300 # seconds to wait before refreshing address info from dns
_ADDITIONAL_SERVER_ENCODINGS = ['latin-1', 'koi8-r'] # some broken LDAP implementation may have different encoding than those expected by RFCs
_ADDITIONAL_CLIENT_ENCODINGS = ['utf-8']
_IGNORE_MALFORMED_SCHEMA = False # some flaky LDAP servers returns malformed schema. If True no expection is raised and schema is thrown away
_DEFAULT_SERVER_ENCODING = 'utf-8' # should always be utf-8
_LDIF_LINE_LENGTH = 78 # as stated in RFC 2849
if stdin and hasattr(stdin, 'encoding') and stdin.encoding:
_DEFAULT_CLIENT_ENCODING = stdin.encoding
elif getdefaultencoding():
_DEFAULT_CLIENT_ENCODING = getdefaultencoding()
else:
_DEFAULT_CLIENT_ENCODING = 'utf-8'
PARAMETERS = ['CASE_INSENSITIVE_ATTRIBUTE_NAMES',
'CASE_INSENSITIVE_SCHEMA_NAMES',
'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX',
'POOLING_LOOP_TIMEOUT',
'RESPONSE_SLEEPTIME',
'RESPONSE_WAITING_TIMEOUT',
'SOCKET_SIZE',
'CHECK_AVAILABILITY_TIMEOUT',
'RESTARTABLE_SLEEPTIME',
'RESTARTABLE_TRIES',
'REUSABLE_THREADED_POOL_SIZE',
'REUSABLE_THREADED_LIFETIME',
'DEFAULT_THREADED_POOL_NAME',
'ADDRESS_INFO_REFRESH_TIME',
'RESET_AVAILABILITY_TIMEOUT',
'DEFAULT_CLIENT_ENCODING',
'DEFAULT_SERVER_ENCODING',
'CLASSES_EXCLUDED_FROM_CHECK',
'ATTRIBUTES_EXCLUDED_FROM_CHECK',
'UTF8_ENCODED_SYNTAXES',
'UTF8_ENCODED_TYPES',
'ADDITIONAL_SERVER_ENCODINGS',
'ADDITIONAL_CLIENT_ENCODINGS',
'IGNORE_MALFORMED_SCHEMA',
'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF',
'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF',
'LDIF_LINE_LENGTH'
]
def get_config_parameter(parameter):
if parameter == 'CASE_INSENSITIVE_ATTRIBUTE_NAMES': # Boolean
return _CASE_INSENSITIVE_ATTRIBUTE_NAMES
elif parameter == 'CASE_INSENSITIVE_SCHEMA_NAMES': # Boolean
return _CASE_INSENSITIVE_SCHEMA_NAMES
elif parameter == 'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX': # String
return _ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX
elif parameter == 'POOLING_LOOP_TIMEOUT': # Integer
return _POOLING_LOOP_TIMEOUT
elif parameter == 'RESPONSE_SLEEPTIME': # Integer
return _RESPONSE_SLEEPTIME
elif parameter == 'RESPONSE_WAITING_TIMEOUT': # Integer
return _RESPONSE_WAITING_TIMEOUT
elif parameter == 'SOCKET_SIZE': # Integer
return _SOCKET_SIZE
elif parameter == 'CHECK_AVAILABILITY_TIMEOUT': # Integer
return _CHECK_AVAILABILITY_TIMEOUT
elif parameter == 'RESTARTABLE_SLEEPTIME': # Integer
return _RESTARTABLE_SLEEPTIME
elif parameter == 'RESTARTABLE_TRIES': # Integer
return _RESTARTABLE_TRIES
elif parameter == 'REUSABLE_THREADED_POOL_SIZE': # Integer
return _REUSABLE_THREADED_POOL_SIZE
elif parameter == 'REUSABLE_THREADED_LIFETIME': # Integer
return _REUSABLE_THREADED_LIFETIME
elif parameter == 'DEFAULT_THREADED_POOL_NAME': # String
return _DEFAULT_THREADED_POOL_NAME
elif parameter == 'ADDRESS_INFO_REFRESH_TIME': # Integer
return _ADDRESS_INFO_REFRESH_TIME
elif parameter == 'RESET_AVAILABILITY_TIMEOUT': # Integer
return _RESET_AVAILABILITY_TIMEOUT
elif parameter in ['DEFAULT_CLIENT_ENCODING', 'DEFAULT_ENCODING']: # String - DEFAULT_ENCODING for backward compatibility
return _DEFAULT_CLIENT_ENCODING
elif parameter == 'DEFAULT_SERVER_ENCODING': # String
return _DEFAULT_SERVER_ENCODING
elif parameter == 'CLASSES_EXCLUDED_FROM_CHECK': # Sequence
if isinstance(_CLASSES_EXCLUDED_FROM_CHECK, SEQUENCE_TYPES):
return _CLASSES_EXCLUDED_FROM_CHECK
else:
return [_CLASSES_EXCLUDED_FROM_CHECK]
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_CHECK': # Sequence
if isinstance(_ATTRIBUTES_EXCLUDED_FROM_CHECK, SEQUENCE_TYPES):
return _ATTRIBUTES_EXCLUDED_FROM_CHECK
else:
return [_ATTRIBUTES_EXCLUDED_FROM_CHECK]
elif parameter == 'UTF8_ENCODED_SYNTAXES': # Sequence
if isinstance(_UTF8_ENCODED_SYNTAXES, SEQUENCE_TYPES):
return _UTF8_ENCODED_SYNTAXES
else:
return [_UTF8_ENCODED_SYNTAXES]
elif parameter == 'UTF8_ENCODED_TYPES': # Sequence
if isinstance(_UTF8_ENCODED_TYPES, SEQUENCE_TYPES):
return _UTF8_ENCODED_TYPES
else:
return [_UTF8_ENCODED_TYPES]
elif parameter in ['ADDITIONAL_SERVER_ENCODINGS', 'ADDITIONAL_ENCODINGS']: # Sequence - ADDITIONAL_ENCODINGS for backward compatibility
if isinstance(_ADDITIONAL_SERVER_ENCODINGS, SEQUENCE_TYPES):
return _ADDITIONAL_SERVER_ENCODINGS
else:
return [_ADDITIONAL_SERVER_ENCODINGS]
elif parameter in ['ADDITIONAL_CLIENT_ENCODINGS']: # Sequence
if isinstance(_ADDITIONAL_CLIENT_ENCODINGS, SEQUENCE_TYPES):
return _ADDITIONAL_CLIENT_ENCODINGS
else:
return [_ADDITIONAL_CLIENT_ENCODINGS]
elif parameter == 'IGNORE_MALFORMED_SCHEMA': # Boolean
return _IGNORE_MALFORMED_SCHEMA
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF': # Sequence
if isinstance(_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF, SEQUENCE_TYPES):
return _ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF
else:
return [_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF]
elif parameter == 'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF': # Sequence
if isinstance(_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF, SEQUENCE_TYPES):
return _IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF
else:
return [_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF]
elif parameter == 'LDIF_LINE_LENGTH': # Integer
return _LDIF_LINE_LENGTH
raise LDAPConfigurationParameterError('configuration parameter %s not valid' % parameter)
def set_config_parameter(parameter, value):
if parameter == 'CASE_INSENSITIVE_ATTRIBUTE_NAMES':
global _CASE_INSENSITIVE_ATTRIBUTE_NAMES
_CASE_INSENSITIVE_ATTRIBUTE_NAMES = value
elif parameter == 'CASE_INSENSITIVE_SCHEMA_NAMES':
global _CASE_INSENSITIVE_SCHEMA_NAMES
_CASE_INSENSITIVE_SCHEMA_NAMES = value
elif parameter == 'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX':
global _ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX
_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX = value
elif parameter == 'POOLING_LOOP_TIMEOUT':
global _POOLING_LOOP_TIMEOUT
_POOLING_LOOP_TIMEOUT = value
elif parameter == 'RESPONSE_SLEEPTIME':
global _RESPONSE_SLEEPTIME
_RESPONSE_SLEEPTIME = value
elif parameter == 'RESPONSE_WAITING_TIMEOUT':
global _RESPONSE_WAITING_TIMEOUT
_RESPONSE_WAITING_TIMEOUT = value
elif parameter == 'SOCKET_SIZE':
global _SOCKET_SIZE
_SOCKET_SIZE = value
elif parameter == 'CHECK_AVAILABILITY_TIMEOUT':
global _CHECK_AVAILABILITY_TIMEOUT
_CHECK_AVAILABILITY_TIMEOUT = value
elif parameter == 'RESTARTABLE_SLEEPTIME':
global _RESTARTABLE_SLEEPTIME
_RESTARTABLE_SLEEPTIME = value
elif parameter == 'RESTARTABLE_TRIES':
global _RESTARTABLE_TRIES
_RESTARTABLE_TRIES = value
elif parameter == 'REUSABLE_THREADED_POOL_SIZE':
global _REUSABLE_THREADED_POOL_SIZE
_REUSABLE_THREADED_POOL_SIZE = value
elif parameter == 'REUSABLE_THREADED_LIFETIME':
global _REUSABLE_THREADED_LIFETIME
_REUSABLE_THREADED_LIFETIME = value
elif parameter == 'DEFAULT_THREADED_POOL_NAME':
global _DEFAULT_THREADED_POOL_NAME
_DEFAULT_THREADED_POOL_NAME = value
elif parameter == 'ADDRESS_INFO_REFRESH_TIME':
global _ADDRESS_INFO_REFRESH_TIME
_ADDRESS_INFO_REFRESH_TIME = value
elif parameter == 'RESET_AVAILABILITY_TIMEOUT':
global _RESET_AVAILABILITY_TIMEOUT
_RESET_AVAILABILITY_TIMEOUT = value
elif parameter in ['DEFAULT_CLIENT_ENCODING', 'DEFAULT_ENCODING']:
global _DEFAULT_CLIENT_ENCODING
_DEFAULT_CLIENT_ENCODING = value
elif parameter == 'DEFAULT_SERVER_ENCODING':
global _DEFAULT_SERVER_ENCODING
_DEFAULT_SERVER_ENCODING = value
elif parameter == 'CLASSES_EXCLUDED_FROM_CHECK':
global _CLASSES_EXCLUDED_FROM_CHECK
_CLASSES_EXCLUDED_FROM_CHECK = value
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_CHECK':
global _ATTRIBUTES_EXCLUDED_FROM_CHECK
_ATTRIBUTES_EXCLUDED_FROM_CHECK = value
elif parameter == 'UTF8_ENCODED_SYNTAXES':
global _UTF8_ENCODED_SYNTAXES
_UTF8_ENCODED_SYNTAXES = value
elif parameter == 'UTF8_ENCODED_TYPES':
global _UTF8_ENCODED_TYPES
_UTF8_ENCODED_TYPES = value
elif parameter in ['ADDITIONAL_SERVER_ENCODINGS', 'ADDITIONAL_ENCODINGS']:
global _ADDITIONAL_SERVER_ENCODINGS
_ADDITIONAL_SERVER_ENCODINGS = value if isinstance(value, SEQUENCE_TYPES) else [value]
elif parameter in ['ADDITIONAL_CLIENT_ENCODINGS']:
global _ADDITIONAL_CLIENT_ENCODINGS
_ADDITIONAL_CLIENT_ENCODINGS = value if isinstance(value, SEQUENCE_TYPES) else [value]
elif parameter == 'IGNORE_MALFORMED_SCHEMA':
global _IGNORE_MALFORMED_SCHEMA
_IGNORE_MALFORMED_SCHEMA = value
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF':
global _ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF
_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF = value
elif parameter == 'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF':
global _IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF
_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF = value
elif parameter == 'LDIF_LINE_LENGTH':
global _LDIF_LINE_LENGTH
_LDIF_LINE_LENGTH = value
else:
raise LDAPConfigurationParameterError('unable to set configuration parameter %s' % parameter)
| 48.756667
| 143
| 0.688658
|
4a17b38d218ab97c6707705d1cf72f299376d5b1
| 909
|
py
|
Python
|
src/cms/utils/file_utils.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | null | null | null |
src/cms/utils/file_utils.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | null | null | null |
src/cms/utils/file_utils.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains helpers for file handling.
"""
from ..forms.media import DocumentForm
def save_file(request):
"""
This function accepts uploaded files, checks if they are valid in respect to the
:class:`~cms.forms.media.document_form.DocumentForm` and stores them to disk if so.
Example usage: :class:`cms.views.media.media_edit_view.MediaEditView`
:param request: The current request submitting the file(s)
:type request: ~django.http.HttpRequest
:return: A dictionary containing the :class:`~cms.forms.media.document_form.DocumentForm` object and the boolean return status
:rtype: dict
"""
status = 0
if request.method == "POST":
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
status = 1
else:
form = DocumentForm()
return {"form": form, "status": status}
| 28.40625
| 130
| 0.674367
|
4a17b4963eb60cdd8ff48cf4bde3b52050c02b55
| 15,753
|
py
|
Python
|
utils/models.py
|
cgvvv/MouseTracking
|
fc6cceefccfa4f070246fae64b9993ded76421cd
|
[
"MIT"
] | null | null | null |
utils/models.py
|
cgvvv/MouseTracking
|
fc6cceefccfa4f070246fae64b9993ded76421cd
|
[
"MIT"
] | null | null | null |
utils/models.py
|
cgvvv/MouseTracking
|
fc6cceefccfa4f070246fae64b9993ded76421cd
|
[
"MIT"
] | null | null | null |
# My collection of available network models...
import tensorflow as tf
import tf_slim as slim
from tf_slim.nets import resnet_v2
from tf_slim.nets import resnet_utils
from tf_slim.nets import inception
from tf_slim.nets import vgg
from .readers import means, scale, atan2
import scipy.ndimage.morphology as morph
import numpy as np
# Concats x/y gradients along the depth dimension
# Used in coordconvs
# Note: You should call with tf.map_fn(lambda by_batch: concat_xygrad_2d(by_batch), input_tensor)
def concat_xygrad_2d(input_tensor):
input_shape = [int(x) for i,x in enumerate(input_tensor.get_shape())]
xgrad = tf.reshape(tf.tile([tf.lin_space(0.0,1.0,input_shape[-2])],[input_shape[-3],1]),np.concatenate([input_shape[0:-1],[1]]))
ygrad = tf.reshape(tf.tile(tf.reshape([tf.lin_space(0.0,1.0,input_shape[-3])],[input_shape[-3],1]),[1,input_shape[-2]]),np.concatenate([input_shape[0:-1],[1]]))
return tf.concat([input_tensor, xgrad, ygrad], axis=-1)
# Fits an ellipse from a mask
# Assumes that the mask is of size [?,3], where [:,0] are x indices and [:,1] are y indices
def fitEll(mask):
locs = tf.cast(tf.slice(mask,[0,0],[-1,2]),tf.float32)
translations = tf.reduce_mean(locs, 0)
sqlocs = tf.square(locs)
variance = tf.reduce_mean(sqlocs,0)-tf.square(translations)
variance_xy = tf.reduce_mean(tf.reduce_prod(locs, 1),0)-tf.reduce_prod(translations,0)
translations = tf.reverse(translations,[0]) # Note: Moment across X-values gives you y location, so need to reverse
tmp1 = tf.reduce_sum(variance)
tmp2 = tf.sqrt(tf.multiply(4.0,tf.pow(variance_xy,2))+tf.pow(tf.reduce_sum(tf.multiply(variance,[1.0,-1.0])),2))
eigA = tf.multiply(tf.sqrt((tmp1+tmp2)/2.0),4.0)
eigB = tf.multiply(tf.sqrt((tmp1-tmp2)/2.0),4.0)
angle = 0.5*atan2(2.0*variance_xy,tf.reduce_sum(tf.multiply(variance,[1.0,-1.0]))) # Radians
ellfit = tf.stack([tf.slice(translations,[0],[1]),tf.slice(translations,[1],[1]),[eigB],[eigA],[tf.sin(angle)],[tf.cos(angle)]],1)
return tf.reshape(tf.divide(tf.subtract(ellfit,means),scale),[-1])
# It appears that the issue for running this is due to nested loops in the optimizer (cannot train).
# https://github.com/tensorflow/tensorflow/issues/3726
# Both tf.where and tf.gather_nd use loops
# This can be used during inference to get slightly better results (by changing the line in the fitEllFromSeg definition).
def fitEll_weighted(mask, seg):
locs_orig = tf.cast(tf.slice(mask,[0,0],[-1,2]),tf.float32)
weights = tf.gather_nd(seg, mask)
# Normalize to sum of 1
weights_orig = tf.exp(tf.divide(weights,tf.reduce_sum(weights)))
weights_orig = tf.divide(weights_orig,tf.reduce_sum(weights_orig))
weights = tf.reshape(tf.tile(weights_orig,[2]),[-1,2])
# This is the line that breaks it:
locs = tf.multiply(locs_orig,weights)
translations = tf.reduce_sum(locs, 0) # Note: Moment across X-values gives you y location, so need to reverse. This is changed on the return values (index 1, then index 0)
sqlocs = tf.multiply(tf.square(locs_orig),weights)
variance = tf.reduce_sum(sqlocs,0)-tf.square(translations)
variance_xy = tf.reduce_sum(tf.reduce_prod(locs_orig, 1)*weights_orig,0)-tf.reduce_prod(translations,0)
tmp1 = tf.reduce_sum(variance)
tmp2 = tf.sqrt(tf.multiply(4.0,tf.pow(variance_xy,2))+tf.pow(tf.reduce_sum(tf.multiply(variance,[1.0,-1.0])),2))
eigA = tf.multiply(tf.sqrt((tmp1+tmp2)/2.0),4.0)
eigB = tf.multiply(tf.sqrt((tmp1-tmp2)/2.0),4.0)
angle = 0.5*atan2(2.0*variance_xy,tf.reduce_sum(tf.multiply(variance,[1.0,-1.0]))) # Radians
ellfit = tf.stack([tf.slice(translations,[1],[1]),tf.slice(translations,[0],[1]),[eigB],[eigA],[tf.sin(angle)],[tf.cos(angle)]],1)
return tf.reshape(tf.divide(tf.subtract(ellfit,means),scale),[-1])
# Safely applies the threshold to the mask and returns default values if no indices are classified as mouse
def fitEllFromSeg(seg, node_act):
mask = tf.where(tf.greater(seg,node_act))
# NOTE: See note on fitEll_weighted function definition
#return tf.cond(tf.shape(mask)[0]>0, lambda: fitEll_weighted(mask, seg), lambda: tf.to_float([-1.0,-1.0,-1.0,-1.0,-1.0,-1.0]))
return tf.cond(tf.shape(mask)[0]>0, lambda: fitEll(mask), lambda: tf.to_float([-1.0,-1.0,-1.0,-1.0,-1.0,-1.0]))
##########################################################################
# Begin defining all available models
##########################################################################
def construct_segellreg_v8(images, is_training):
batch_norm_params = {'is_training': is_training, 'decay': 0.999, 'updates_collections': None, 'center': True, 'scale': True, 'trainable': True}
# Normalize the image inputs (map_fn used to do a "per batch" calculation)
norm_imgs = tf.map_fn(lambda img: tf.image.per_image_standardization(img), images)
kern_size = [5,5]
filter_size = 8
with tf.variable_scope('SegmentEncoder'):
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
padding='SAME',
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
c1 = slim.conv2d(norm_imgs, filter_size, kern_size)
p1 = slim.max_pool2d(c1, [2,2], scope='pool1') #240x240
c2 = slim.conv2d(p1, filter_size*2, kern_size)
p2 = slim.max_pool2d(c2, [2,2], scope='pool2') #120x120
c3 = slim.conv2d(p2, filter_size*4, kern_size)
p3 = slim.max_pool2d(c3, [2,2], scope='pool3') #60x60
c4 = slim.conv2d(p3, filter_size*8, kern_size)
p4 = slim.max_pool2d(c4, [2,2], scope='pool4') # 30x30
c5 = slim.conv2d(p4, filter_size*16, kern_size)
p5 = slim.max_pool2d(c5, [2,2], scope='pool5') # 15x15
c6 = slim.conv2d(p5, filter_size*32, kern_size)
p6 = slim.max_pool2d(c6, [3,3], stride=3, scope='pool6') # 5x5
c7 = slim.conv2d(p6, filter_size*64, kern_size)
with tf.variable_scope('SegmentDecoder'):
upscale = 2 # Undo the pools once at a time
mynet = slim.conv2d_transpose(c7, filter_size*32, kern_size, stride=[3, 3], activation_fn=None)
mynet = tf.add(mynet, c6)
mynet = slim.conv2d_transpose(mynet, filter_size*16, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c5)
mynet = slim.conv2d_transpose(mynet, filter_size*8, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c4)
mynet = slim.conv2d_transpose(mynet, filter_size*4, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c3)
mynet = slim.conv2d_transpose(mynet, filter_size*2, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c2)
mynet = slim.conv2d_transpose(mynet, filter_size, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c1)
seg = slim.conv2d(mynet, 2, [1,1], scope='seg')
with tf.variable_scope('Ellfit'):
seg_morph = tf.slice(tf.nn.softmax(seg,-1),[0,0,0,0],[-1,-1,-1,1])-tf.slice(tf.nn.softmax(seg,-1),[0,0,0,1],[-1,-1,-1,1])
# And was kept here to just assist in the ellipse-fit for any unwanted noise
filter1 = tf.expand_dims(tf.constant(morph.iterate_structure(morph.generate_binary_structure(2,1),4),dtype=tf.float32),-1)
seg_morph = tf.nn.dilation2d(tf.nn.erosion2d(seg_morph,filter1,[1,1,1,1],[1,1,1,1],"SAME"),filter1,[1,1,1,1],[1,1,1,1],"SAME")
filter2 = tf.expand_dims(tf.constant(morph.iterate_structure(morph.generate_binary_structure(2,1),5),dtype=tf.float32),-1)
seg_morph = tf.nn.erosion2d(tf.nn.dilation2d(seg_morph,filter2,[1,1,1,1],[1,1,1,1],"SAME"),filter2,[1,1,1,1],[1,1,1,1],"SAME")
node_act = tf.constant(0.0,dtype=tf.float32)
# Fit the ellipse from the segmentation mask algorithmically
ellfit = tf.map_fn(lambda mask: fitEllFromSeg(mask, node_act), seg_morph)
with tf.variable_scope('AngleFix'):
mynet = slim.conv2d(c7, 128, kern_size, activation_fn=tf.nn.relu, padding='SAME', weights_initializer=tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer=slim.l2_regularizer(0.0005), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params)
mynet = slim.conv2d(mynet, 64, kern_size, activation_fn=tf.nn.relu, padding='SAME', weights_initializer=tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer=slim.l2_regularizer(0.0005), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params)
mynet = slim.flatten(mynet)
angle_bins = slim.fully_connected(mynet, 4, activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='angle_bin')
angles = tf.add(tf.multiply(tf.slice(ellfit, [0,4], [-1,2]), scale[4:5]), means[4:5]) # Extract angles to fix them
sin_angles = tf.slice(angles,[0,0],[-1,1]) # Unmorph the sin(angles)
ang_bins_max = tf.argmax(angle_bins,1) # Note: This is from 0-3, not 1-4
angles = tf.where(tf.equal(ang_bins_max,2), -angles, angles) # Bin 3 always wrong
angles = tf.where(tf.logical_and(tf.equal(ang_bins_max,1), tf.squeeze(tf.less(sin_angles, 0.0))), -angles, angles) # Bin 2 is wrong when sin(ang) < np.sin(np.pi/4.) ... Some bleedover, so < 0.0
angles = tf.where(tf.logical_and(tf.equal(ang_bins_max,3), tf.squeeze(tf.greater(sin_angles, 0.0))), -angles, angles) # Bin 4 is wrong when sin(ang) > -np.sin(np.pi/4.) ... Some bleedover, so > 0.0
angles = tf.divide(tf.subtract(angles, means[4:5]), scale[4:5])
original = tf.slice(ellfit,[0,0],[-1,4])
ellfit = tf.concat([original, angles],1)
return seg, ellfit, angle_bins
# XY binning for 480 x and 480 y bins
def construct_xybin_v1(images, is_training, n_bins):
batch_norm_params = {'is_training': is_training, 'decay': 0.8, 'updates_collections': None, 'center': True, 'scale': True, 'trainable': True}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
padding='SAME',
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
mynet = slim.repeat(images, 2, slim.conv2d, 16, [3,3], scope='conv1')
mynet = slim.max_pool2d(mynet, [2,2], scope='pool1')
mynet = slim.repeat(mynet, 2, slim.conv2d, 32, [3,3], scope='conv2')
mynet = slim.max_pool2d(mynet, [2,2], scope='pool2')
mynet = slim.repeat(mynet, 2, slim.conv2d, 64, [3,3], scope='conv3')
mynet = slim.max_pool2d(mynet, [2,2], scope='pool3')
mynet = slim.repeat(mynet, 2, slim.conv2d, 128, [3,3], scope='conv4')
mynet = slim.max_pool2d(mynet, [2,2], scope='pool4')
mynet = slim.repeat(mynet, 2, slim.conv2d, 256, [3,3], scope='conv5')
mynet = slim.max_pool2d(mynet, [2,2], scope='pool5')
features = slim.flatten(mynet, scope='flatten')
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
# To add additional fully connected layers...
# Our tests showed no substantial difference
#mynet = slim.fully_connected(mynet, 4096, scope='fc5')
#mynet = slim.dropout(mynet, 0.5, scope='dropout5')
#mynet = slim.fully_connected(mynet, 4096, scope='fc6')
#mynet = slim.dropout(mynet, 0.5, scope='dropout6')
xbins = slim.fully_connected(features, n_bins, activation_fn=None, scope='xbins')
xbins = slim.softmax(xbins, scope='smx')
ybins = slim.fully_connected(features, n_bins, activation_fn=None, scope='ybins')
ybins = slim.softmax(ybins, scope='smy')
mynet = tf.stack([xbins, ybins])
return mynet, features
# Attempt to predict the ellipse-regression directly (using resnet_v2_200)
def construct_ellreg_v3_resnet(images, is_training):
batch_norm_params = {'is_training': is_training, 'decay': 0.8, 'updates_collections': None, 'center': True, 'scale': True, 'trainable': True}
mynet, _ = resnet_v2.resnet_v2_200(images, None, is_training=is_training)
features = tf.reshape(mynet, [-1, 2048])
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
mynet = slim.fully_connected(features, 6, activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='outlayer')
return mynet, features
# Attempt to predict the ellipse-regression directly with coordinate convs
def construct_ellreg_v4_resnet(images, is_training):
batch_norm_params = {'is_training': is_training, 'decay': 0.8, 'updates_collections': None, 'center': True, 'scale': True, 'trainable': True}
input_imgs = tf.map_fn(lambda by_batch: concat_xygrad_2d(by_batch), images)
mynet, _ = resnet_v2.resnet_v2_200(input_imgs, None, is_training=is_training)
features = tf.reshape(mynet, [-1, 2048])
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
mynet = slim.fully_connected(features, 6, activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='outlayer')
return mynet, features
# Segmentation Only Network (no angle prediction)
def construct_segsoft_v5(images, is_training):
batch_norm_params = {'is_training': is_training, 'decay': 0.999, 'updates_collections': None, 'center': True, 'scale': True, 'trainable': True}
# Normalize the image inputs (map_fn used to do a "per batch" calculation)
norm_imgs = tf.map_fn(lambda img: tf.image.per_image_standardization(img), images)
kern_size = [5,5]
filter_size = 8
# Run the segmentation net without pooling
with tf.variable_scope('SegmentEncoder'):
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
padding='SAME',
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
c1 = slim.conv2d(norm_imgs, filter_size, kern_size)
p1 = slim.max_pool2d(c1, [2,2], scope='pool1') #240x240
c2 = slim.conv2d(p1, filter_size*2, kern_size)
p2 = slim.max_pool2d(c2, [2,2], scope='pool2') #120x120
c3 = slim.conv2d(p2, filter_size*4, kern_size)
p3 = slim.max_pool2d(c3, [2,2], scope='pool3') #60x60
c4 = slim.conv2d(p3, filter_size*8, kern_size)
p4 = slim.max_pool2d(c4, [2,2], scope='pool4') # 30x30
c5 = slim.conv2d(p4, filter_size*16, kern_size)
p5 = slim.max_pool2d(c5, [2,2], scope='pool5') # 15x15
c6 = slim.conv2d(p5, filter_size*32, kern_size)
p6 = slim.max_pool2d(c6, [3,3], stride=3, scope='pool6') # 5x5
c7 = slim.conv2d(p6, filter_size*64, kern_size)
with tf.variable_scope('SegmentDecoder'):
upscale = 2 # Undo the pools once at a time
mynet = slim.conv2d_transpose(c7, filter_size*32, kern_size, stride=[3, 3], activation_fn=None)
mynet = tf.add(mynet, c6)
mynet = slim.conv2d_transpose(mynet, filter_size*16, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c5)
mynet = slim.conv2d_transpose(mynet, filter_size*8, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c4)
mynet = slim.conv2d_transpose(mynet, filter_size*4, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c3)
mynet = slim.conv2d_transpose(mynet, filter_size*2, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c2)
mynet = slim.conv2d_transpose(mynet, filter_size, kern_size, stride=[upscale, upscale], activation_fn=None)
mynet = tf.add(mynet, c1)
seg = slim.conv2d(mynet, 2, [1,1], scope='seg')
return seg
| 60.588462
| 266
| 0.718466
|
4a17b4dbe64c9f537ad434ce34bcf4637d83970a
| 13,128
|
py
|
Python
|
test/functional/feature_dbcrash.py
|
proteanx/Bitcorn-Test
|
87e0245c1cbbb1a662ae0f3a3a9411bbe308ab0f
|
[
"MIT"
] | 25
|
2019-01-05T05:00:11.000Z
|
2021-05-03T03:54:07.000Z
|
test/functional/feature_dbcrash.py
|
Mattras007/BITCORN-1
|
47a5cdf7fa559aeeacf23f7d0191ba832561260b
|
[
"MIT"
] | 17
|
2019-07-12T22:10:09.000Z
|
2021-04-07T17:15:26.000Z
|
test/functional/feature_dbcrash.py
|
Mattras007/BITCORN-1
|
47a5cdf7fa559aeeacf23f7d0191ba832561260b
|
[
"MIT"
] | 17
|
2019-06-09T20:46:37.000Z
|
2021-12-31T08:44:19.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing.
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo"""
import errno
import http.client
import random
import time
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, ToHex
from test_framework.test_framework import BitCornTestFramework
from test_framework.util import assert_equal, create_confirmed_utxos, hex_str_to_bytes
class ChainstateWriteCrashTest(BitCornTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = False
# Need a bit of extra time for the nodes to start up for this test
self.rpc_timeout = 90
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"]
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to pcoinsTip.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
self.node3_args = ["-blockmaxweight=4000000"]
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
"""Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
after 60 seconds. Returns the utxo hash of the given node."""
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
# An exception here should mean the node is about to crash.
# If bitcornd exits, then try again. wait_for_node_exit()
# should raise an exception if bitcornd doesn't exit.
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, bitcornd isn't coming back up on restart. Could be a
# bug in bitcornd, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
Catch any exceptions that indicate the node has crashed.
Returns true if the block was submitted successfully; false otherwise."""
try:
self.nodes[node_index].submitblock(block)
return True
except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
def sync_node3blocks(self, block_hashes):
"""Use submitblock to sync node3's chain with the other nodes
If submitblock fails, restart the node and get the new utxo hash.
If any nodes crash while updating, we'll compare utxo hashes to
ensure recovery was successful."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
"""Verify that the utxo hash of each node matches node3.
Restart any nodes that crash while querying."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for i in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
output_amount = (input_amount - FEE) // 3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for i in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransactionwithwallet(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Track test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
self.log.debug("At height %d, considering height %d", current_height, random_height)
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
self.log.debug("Invalidating block at height %d", random_height)
self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount())))
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warn("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
| 47.912409
| 120
| 0.642824
|
4a17b4e4e3feecbed6ebdd61d39feab3bfc5a0dc
| 233
|
py
|
Python
|
Python/code case/code case 220.py
|
amazing-2020/pdf
|
8cd3f5f510a1c1ed89b51b1354f4f8c000c5b24d
|
[
"Apache-2.0"
] | 3
|
2021-01-01T13:08:24.000Z
|
2021-02-03T09:27:56.000Z
|
Python/code case/code case 220.py
|
amazing-2020/pdf
|
8cd3f5f510a1c1ed89b51b1354f4f8c000c5b24d
|
[
"Apache-2.0"
] | null | null | null |
Python/code case/code case 220.py
|
amazing-2020/pdf
|
8cd3f5f510a1c1ed89b51b1354f4f8c000c5b24d
|
[
"Apache-2.0"
] | null | null | null |
for i in range(1, 5):
print(" "*(4-i), end="")
for j in range(1, 2*i):
print("*", end="")
print()
for i in range(1, 4):
print(" "*(i), end="")
for j in range(7-2*i):
print("*", end="")
print()
| 21.181818
| 28
| 0.424893
|
4a17b5dd4bdfe68ac498a4bcfe2717f17da615ec
| 2,051
|
py
|
Python
|
venv/lib/python3.6/site-packages/astroid/__pkginfo__.py
|
albixhafa/recipry
|
bcf89f6982645f99bcd93fdd95999872d8dc0b89
|
[
"MIT"
] | 10
|
2020-07-21T21:59:54.000Z
|
2021-07-19T11:01:47.000Z
|
Thonny/Lib/site-packages/astroid/__pkginfo__.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 51
|
2019-10-08T01:53:02.000Z
|
2021-06-04T22:02:21.000Z
|
Thonny/Lib/site-packages/astroid/__pkginfo__.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 7
|
2020-08-06T17:10:26.000Z
|
2022-03-13T02:15:59.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2017 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radosław Ganczarek <radoslaw@ganczarek.in>
# Copyright (c) 2016 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2017 Hugo <hugovk@users.noreply.github.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Calen Pennington <cale@edx.org>
# Copyright (c) 2018 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""astroid packaging information"""
version = "2.3.3"
numversion = tuple(int(elem) for elem in version.split(".") if elem.isdigit())
extras_require = {}
install_requires = [
"lazy_object_proxy==1.4.*",
"six~=1.12",
"wrapt==1.11.*",
'typed-ast>=1.4.0,<1.5;implementation_name== "cpython" and python_version<"3.8"',
]
# pylint: disable=redefined-builtin; why license is a builtin anyway?
license = "LGPL"
author = "Python Code Quality Authority"
author_email = "code-quality@python.org"
mailinglist = "mailto://%s" % author_email
web = "https://github.com/PyCQA/astroid"
description = "An abstract syntax tree for Python with inference support."
classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
| 39.442308
| 85
| 0.703072
|
4a17b68fb05a7c2e986f1e6b969212af3ea6081e
| 3,539
|
py
|
Python
|
contrib/buildbot/test/test_endpoint_buildDiff.py
|
ryan763/bitcoin-abc
|
d61983e9c7e4eb591ebd1d16711a917d4c70fcc3
|
[
"MIT"
] | null | null | null |
contrib/buildbot/test/test_endpoint_buildDiff.py
|
ryan763/bitcoin-abc
|
d61983e9c7e4eb591ebd1d16711a917d4c70fcc3
|
[
"MIT"
] | null | null | null |
contrib/buildbot/test/test_endpoint_buildDiff.py
|
ryan763/bitcoin-abc
|
d61983e9c7e4eb591ebd1d16711a917d4c70fcc3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2020 The Bitcoin ABC developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import json
import mock
import requests
import unittest
from unittest.mock import call
from build import Build, BuildStatus
from test.abcbot_fixture import ABCBotFixture
import test.mocks.teamcity
from testutil import AnyWith
class buildDiffRequestQuery():
def __init__(self):
self.stagingRef = "refs/tags/phabricator/diff/1234"
self.targetPHID = "PHID-HMBT-123456"
def __str__(self):
return "?{}".format("&".join("{}={}".format(key, value)
for key, value in self.__dict__.items()))
class EndpointBuildDiffTestCase(ABCBotFixture):
def test_buildDiff(self):
data = buildDiffRequestQuery()
def set_build_configuration(builds):
config = {
"builds": {
}
}
for build in builds:
config["builds"][build.name] = {
"runOnDiff": True
}
self.phab.get_file_content_from_master = mock.Mock()
self.phab.get_file_content_from_master.return_value = json.dumps(
config)
def call_buildDiff(builds):
self.teamcity.session.send.side_effect = [
test.mocks.teamcity.buildInfo(build_id=build.build_id, buildqueue=True) for build in builds
]
response = self.app.post(
'/buildDiff{}'.format(data),
headers=self.headers)
self.assertEqual(response.status_code, 200)
self.phab.get_file_content_from_master.assert_called()
expected_calls = [
call(AnyWith(requests.PreparedRequest, {
"url": "https://teamcity.test/app/rest/buildQueue",
"body": json.dumps({
"branchName": data.stagingRef,
"buildType": {
"id": "BitcoinABC_BitcoinAbcStaging",
},
'properties': {
'property': [
{
'name': 'env.ABC_BUILD_NAME',
'value': build.name,
},
{
'name': 'env.harborMasterTargetPHID',
'value': data.targetPHID,
},
],
},
}),
}))
for build in builds
]
self.teamcity.session.send.assert_has_calls(
expected_calls, any_order=True)
self.teamcity.session.send.reset_mock()
# No diff to run
builds = []
set_build_configuration(builds)
call_buildDiff(builds)
self.teamcity.session.send.assert_not_called()
# Single diff
builds.append(Build(1, BuildStatus.Queued, "build-1"))
set_build_configuration(builds)
call_buildDiff(builds)
# Lot of builds
builds = [Build(i, BuildStatus.Queued, "build-{}".format(i))
for i in range(10)]
set_build_configuration(builds)
call_buildDiff(builds)
if __name__ == '__main__':
unittest.main()
| 33.074766
| 107
| 0.517943
|
4a17b69dbf7096e8d7988fbef11d7ac2c93cb3ec
| 5,891
|
py
|
Python
|
ewh/ewh/environment.py
|
ryanordille/ewh-simulation
|
a4bde94688aacdaa228db63597cac052903eaf96
|
[
"MIT"
] | 1
|
2016-12-20T23:13:19.000Z
|
2016-12-20T23:13:19.000Z
|
ewh/ewh/environment.py
|
ryanordille/ewh-simulation
|
a4bde94688aacdaa228db63597cac052903eaf96
|
[
"MIT"
] | null | null | null |
ewh/ewh/environment.py
|
ryanordille/ewh-simulation
|
a4bde94688aacdaa228db63597cac052903eaf96
|
[
"MIT"
] | 2
|
2016-12-21T16:52:41.000Z
|
2021-07-22T17:58:58.000Z
|
import csv
import itertools
import os
import pprint
import math
import config
class Environment(object):
def __init__(self, mapping, time_scaling_factor, reactivation_hours, start_time_step=0):
self._mapping = mapping
self._current_hour = 0
self._tsf = time_scaling_factor
self._current_timestep = start_time_step
self._reactivation_hours = reactivation_hours
self.sync_timestep(start_time_step)
@property
def current_tuple(self):
"""(demand [L/h], ambient [deg C], inlet [deg C])"""
return self._mapping[self._current_hour]
@property
def demand(self):
"""Water demand (in litres per hour)"""
return self.current_tuple[0]
@property
def ambient_temperature(self):
"""Temperature (in degrees C) of area outside of the EWH population"""
return self.current_tuple[1]
@property
def inlet_temperature(self):
"""Temperature (in degrees C) of inlet water"""
return self.current_tuple[2]
@property
def time_scaling_factor(self):
"""Integer number of time steps that comprise exactly one hour"""
return self._tsf
@property
def current_hour(self):
"""Current hour of the simulation, floored"""
return self._current_hour
@property
def time_tuple(self):
"""(Current day, current hour of day, current minute of hour), zero-indexed"""
minutes_since_hour_start = (self._current_timestep * (60/self._tsf)) - (self.current_hour * 60)
return (math.floor(self._current_hour / 24), self._current_hour % 24, math.floor(minutes_since_hour_start))
def is_at_non_peak_boundary(self):
"""Return True if environment is exactly at 10am or 8pm"""
_, hours, minutes = self.time_tuple
during_non_peak = hours in [10, 20]
at_boundary = minutes < (60/self._tsf)
return during_non_peak and at_boundary
def is_at_peak_boundary(self):
"""Return True if environment is exactly at 6am or 4pm"""
_, hours, minutes = self.time_tuple
during_peak = hours in [6, 16]
at_boundary = minutes < (60/self._tsf)
return during_peak and at_boundary
def is_in_reactivation_period(self):
morning = list(range(10, 10 + self._reactivation_hours))
evening = list(range(20, 20 + self._reactivation_hours))
return self.time_tuple[1] in (morning + evening)
def is_at_zone_boundary(self):
minutes = self._reactivation_hours * 60
return self.time_tuple[2] in (0,minutes/4,minutes/2, 3*minutes/4)
def reactivation_zone(self):
minutes_per_zone = (self._reactivation_hours * 60) / 4
_, hour, minute = self.time_tuple
minutes_since_reactivation_started = ((hour % 10) * 60) + minute
return math.floor(minutes_since_reactivation_started / minutes_per_zone)
@property
def reactivation_hours(self):
return self._reactivation_hours
def sync_timestep(self, time_step_index):
"""Set the hour of the simulation according to the given time step"""
self._current_timestep = time_step_index
self._current_hour = math.floor(self._current_timestep / self._tsf)
def info(self):
return {
'current_hour': self._current_hour,
'demand': self.demand,
'ambient_temperature': self.ambient_temperature,
'inlet_temperature': self.inlet_temperature,
'time_scaling_factor': self._tsf,
}
_environment_singleton = None
def environment():
"""Return the environment used over the whole simulation."""
return _environment_singleton
def setup_temperature_csv(csv_location):
"""Return a list of CSV values containing day & temperature data. The index
of the list represents the day of the year, and the value at that index represents
the temperature at that day.
"""
with open(csv_location) as csvfile:
reader = csv.DictReader(csvfile)
rows = [list(itertools.repeat(float(row['Celsius']), 24)) for row in reader]
return rows
def setup_demand(csv_location):
"""Return a list of CSV values containing hour & temperature data. The index
of the list represents the hour of the day, and the value at that index represents
the demand (in L/h) at that hour.
"""
with open(csv_location) as csvfile:
reader = csv.DictReader(csvfile)
rows = [float(row['Litres/Hour']) for row in reader]
return rows
def setup_environment(csv_directory, time_scaling_factor, reactivation_hours, start_time_step=0):
"""Build up an environment from the time/temperature/demand mappings in the
given CSV directory. The given TSF is also included.
"""
ambient = setup_temperature_csv(os.path.join(csv_directory, 'AirTemperature.csv'))
inlet = setup_temperature_csv(os.path.join(csv_directory, 'IncomingWaterTemperature.csv'))
daily_demand = setup_demand(os.path.join(csv_directory, 'WaterUse.csv'))
yearly_demand = list(itertools.repeat(daily_demand, 365)) # copy for every day
# now we want a mapping of demand/ambient/inlet for every hour
# [(demand for hour 0, ambient 0, inlet 0), (demand 1, ambient 1, inlet 1), ...]
mapping = zipper(yearly_demand, ambient, inlet)
_environment_singleton = Environment(mapping, time_scaling_factor, reactivation_hours, start_time_step=start_time_step)
return _environment_singleton
def zipper(demand, ambient, inlet):
"""Make the mapping between demand/ambient/inlet temperatures."""
mapping = []
for day_index in range(365):
daily_demand = demand[day_index]
daily_ambient = ambient[day_index]
daily_inlet = inlet[day_index]
for hour_index in range(24):
mapping.append([daily_demand[hour_index], daily_ambient[hour_index], daily_inlet[hour_index]])
return mapping
| 38.756579
| 123
| 0.691733
|
4a17b6d592a3893fb3595a860160111792451450
| 390
|
py
|
Python
|
socceraction/__init__.py
|
pientist/socceraction
|
7f8e666ee5da7c1890c72a2c72042d4c73b90fda
|
[
"MIT"
] | 371
|
2019-07-25T07:35:00.000Z
|
2022-03-25T11:13:56.000Z
|
socceraction/__init__.py
|
pientist/socceraction
|
7f8e666ee5da7c1890c72a2c72042d4c73b90fda
|
[
"MIT"
] | 145
|
2019-08-29T12:49:55.000Z
|
2022-03-31T09:35:05.000Z
|
socceraction/__init__.py
|
pientist/socceraction
|
7f8e666ee5da7c1890c72a2c72042d4c73b90fda
|
[
"MIT"
] | 101
|
2019-08-20T21:07:34.000Z
|
2022-03-26T10:00:00.000Z
|
# -*- coding: utf-8 -*-
"""
SoccerAction
~~~~~~~~~~~~
SoccerAction is a Python package for objectively quantifying the impact of the
individual actions performed by soccer players using event stream data.
Full documentation is at <https://ml-kuleuven.github.io/socceraction/>.
:copyright: (c) 2020 by DTAI KU Leuven.
:license: MIT, see LICENSE for more details.
"""
__version__ = '1.1.1'
| 27.857143
| 78
| 0.723077
|
4a17b6f86b7f3c1aa0a92be0c54aaee73616fe69
| 49,251
|
py
|
Python
|
m2p/polymaker.py
|
NREL/m2p
|
821a6b884439057d44889b0ad97eabb09f7c3e54
|
[
"BSD-3-Clause"
] | 5
|
2021-06-16T17:05:48.000Z
|
2021-11-01T13:41:46.000Z
|
m2p/polymaker.py
|
NREL/m2p
|
821a6b884439057d44889b0ad97eabb09f7c3e54
|
[
"BSD-3-Clause"
] | null | null | null |
m2p/polymaker.py
|
NREL/m2p
|
821a6b884439057d44889b0ad97eabb09f7c3e54
|
[
"BSD-3-Clause"
] | 3
|
2021-07-12T16:10:04.000Z
|
2022-02-08T01:52:32.000Z
|
import pandas as pd
import numpy as np
import re
import random
import ast
import warnings
import itertools
import time
from rdkit import Chem, rdBase
from rdkit.Chem import AllChem
from rdkit import RDLogger
from rdkit.Chem import Descriptors
from ast import literal_eval as leval
from copy import deepcopy
from tqdm import tqdm
import casadi as cas
from casadi import SX,integrator,vertcat
tqdm.pandas()
lg = RDLogger.logger()
lg.setLevel(RDLogger.ERROR)
class PolyMaker():
def __init__ (self):
self.smiles_req = {'ols':'[C,c;!$(C=O)][OH]',
'aliphatic_ols':'[C;!$(C=O);!$([a])][OH]',
'acids':'[#6][#6](=[#8:4])([F,Cl,Br,I,#8H,O-])',
'prime_amines':'[#6;!$(C=O)][NH2;!$([NH2+])]',
'carbonates':'[O]=[C]([F,Cl,Br,I,O])([F,Cl,Br,I,O])',
'acidanhydrides':'[#8]([#6](=[#8]))([#6](=[#8]))',
'prime_thiols':'[#6;!$(C=O)][SH]'}
self.reactions = { 'ester':
{'diols_acids':'[C;!$(C=O);!$([a]):6][OH:1].[#6:2][#6:3](=[O:4])([F,Cl,Br,I,#8H,O-:5])>>'
'[C:6][O:1][#6:3](=[O:4])([#6:2])',
'diacids_ols':'[#6:2][#6:3](=[O:4])([F,Cl,Br,I,#8H,O-:5]).[C;!$(C=O);!$([a]):6][OH:1]>>'
'[C:6][O:1][#6:3](=[O:4])([#6:2])',
'infinite_chain':'([C;!$(C=O);!$([a]):1][OH:2].[#6:3][#6:4](=[O:5])([F,Cl,Br,I,OH,O-:6]))>>'
'[*:1][*:2][*:4](=[*:5])[*:3]'},
'amide':
{'diamines_acids':'[#6;!$(C=O):0][NH2;!$([NH2+]):1].[#6:2][#6:3](=[O:4])([#8H,O-:5])>>'
'[#6:0][NH:1][#6:3](=[O:4])([#6:2])',
'diacids_amines':'[#6:2][#6:3](=[O:4])([#8H,O-:5]).[#6;!$(C=O):0][NH2;!$([NH2+]):1]>>'
'[#6:0][NH:1][#6:3](=[O:4])([#6:2])',
'infinite_chain':'([#6;!$(C=O):1][NH2;!$([NH2+]):2].[#6:3][#6:4](=[O:5])([#8H,O-:6]))>>'
'[*:1][*:2][*:4](=[*:5])[*:3]'},
'carbonate':{
'phosgene':{'diols_carbonates':'[C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([F,Cl,Br,I,O:4])([F,Cl,Br,I:5])>>'
'[O:2]=[C:3]([O:1][C,c:0])[X:4]',
'carbonates_diols':'[O:2]=[C:3]([F,Cl,Br,I,O:4])([F,Cl,Br,I:5]).[C,c;!$(C=O):0][OH:1]>>'
'[O:2]=[C:3]([O:1][C,c:0])[X:4]',
'infinite_chain':'([C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([F,Cl,Br,I,O:4])([F,Cl,Br,I:5]))>>'
'[O:2]=[C:3]([O:4])([O:1][C,c:0])'},
'nonphosgene':{'diols_carbonates':'[C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([O:4][C,c:6])([O:5][C,c])>>'
'[O:2]=[C:3]([O:1][C,c:0])[O:4][C,c:6]',
'carbonates_diols':'[O:2]=[C:3]([O:4][C,c:6])([O:5][C,c]).[C,c;!$(C=O):0][OH:1]>>'
'[O:2]=[C:3]([O:1][C,c:0])[O:4][C,c:6]',
'infinite_chain':'([C,c;!$(C=O):0][OH:1].[O:2]=[C:3]([O:4][C,c:6])([O:5][C,c]))>>'
'[O:2]=[C:3]([O:1][C,c:0])[O:4][C,c:6]'}},
'imide':
{'diacidanhydrides_amines':'[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7])).[#6;!$(C=O):0][NH2:1]>>'
'[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))',
'diamines_acidanhydrides':'[#6;!$(C=O):0][NH2:1].[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7]))>>'
'[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))',
'infinite_chain':'([#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7])).[#6;!$(C=O):0][NH2:1])>>'
'[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))'},
'open_acidanhydrides':
{'add_OH':'[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7]))>>'
'[#8:3]([#6:4](=[#8:5])(O))([#6:6](=[#8:7]))'}
}
self.__verison__ = '0.1.3.2'
def checksmile(self,s):
'''checks to make sure monomer is readable by rdkit and
returns canonical smile
Input: string
Returns: string
'''
rdBase.DisableLog('rdApp.error')
try:
mol = Chem.MolFromSmiles(s)
mol = Chem.MolToSmiles(mol)
except:
mol = ''
rdBase.EnableLog('rdApp.error')
return mol
def get_monomers(self,s,stereochemistry=False):
'''parses a string into a list of monomers
the string is separated by '.' and each monomer is checked
for validity
Input: string
Returns: list of strings
'''
try:s=ast.literal_eval(s)
except:pass
if type(s)==str:
s = s.split('.')
if not stereochemistry:s = [s_i.replace('/','').replace('@','') for s_i in s]
monomers = tuple([self.checksmile(s_i) for s_i in s])
if np.any(np.array(monomers)=='',):monomers==None
if type(s)==tuple:
monomers=s
return monomers
def thermoset(self,reactants,mechanism,crosslinker=[],distribution=[],DP=10,replicate_structures=1,verbose=True):
''' Inputs:
reactants: contains smiles strings for reactants used in the polymer for both backbone and crosslinks
a tuple
or a strings of monomers
or a pandas dataframe containing a list of monomers as strings with column title 'monomers'
crosslinker: a list of 0's and 1's
each value will correspond to the mononmers in reactants
0's will indicate the corresponding monomer is part of the backbone
1's will indicate the corresponding monomer is part of the crosslink
a list of integers
or a column in dataframe that is named 'crosslinker'
example: [0,0,0,1]
distribution: number of mols for each monomer in the reaction. values should be in samer order as reactancts
list of floats
or column in dataframe that is named 'mols'
example: [10,10,3,1]
DP: degree of polymerization which is the number of monomer units in the polymer
an integer, if an integer the same DP will be used for the backbone and the crosslinks
a tuple, will contain only 2 values, the first value will be for the backbone and the second
for the crosslinks
mechanism: one of the following strings,
upe: unsaturated polyester, backbone will be a polyester with unsaturated bonds, crosslinks will be vinyls, olefins, acrylates
replicate_structures: integer, number of replicate structures which will be generated
Returns:
polymer: string
# '''
returnpoly = pd.DataFrame()
#converst monomers to tuple if reactants is dataframee
if type(reactants)==pd.DataFrame:
try: reactants.loc[:,'monomers'] = reactants.apply(lambda row: self.get_monomers(row.monomers),axis=1)
except:pass
for rep in range(0,replicate_structures):
returnpoly_i = pd.DataFrame()
# reactants,crosslinks,etc should be a tuple but as a string going into polymerization methods
# this puts everthing into dataframe before generating structures
#fixing reactants and build dataframe
if type(reactants)==pd.DataFrame:
returnpoly_i = reactants
if 'mechanism' not in reactants.columns: returnpoly_i.loc[:,'mechanism'] = mechanism
returnpoly_i.loc[:,'replicate_structure']=rep
returnpoly_i.loc[:,'monomers'] = returnpoly_i.monomers.astype(str)
returnpoly_i.loc[:,'mechanism'] = mechanism
elif type(reactants)==str:
try:
reactants_i = ast.literal_eval(reactants)
except:
reactants_i = self.get_monomers(reactants)
returnpoly_i.loc[:,'monomers']=pd.Series(str(reactants_i))
returnpoly_i.loc[:,'distribution']=pd.Series(str(distribution))
returnpoly_i.loc[:,'crosslinker']=pd.Series(str(crosslinker))
returnpoly_i.loc[:,'replicate_structure']=rep
returnpoly_i.loc[:,'monomers'] = returnpoly_i.monomers.astype(str)
returnpoly_i.loc[:,'mechanism'] = mechanism
elif type(reactants)==tuple:
returnpoly_i.loc[:,'monomers']=pd.Series(str(reactants))
returnpoly_i.loc[:,'distribution']=pd.Series(str(distribution))
returnpoly_i.loc[:,'crosslinker']=pd.Series(str(crosslinker))
returnpoly_i.loc[:,'replicate_structure']=rep
returnpoly_i.loc[:,'monomers'] = returnpoly_i.monomers.astype(str)
returnpoly_i.loc[:,'mechanism'] = mechanism
else:
raise ValueError('Data type not recognized')
#building dataframe
returnpoly = pd.concat([returnpoly,returnpoly_i])
# build polymers
if verbose:
returnpoly[['polymer','mechanism']] = returnpoly.progress_apply(
lambda row:
self.__polymerizemechanism_thermoset(
leval(row.monomers),
row.mechanism,
leval(row.crosslinker),
leval(row.distribution),
DP),
axis=1)
else:
returnpoly[['polymer','mechanism']] = returnpoly.apply(
lambda row:
self.__polymerizemechanism_thermoset(
leval(row.monomers),
row.mechanism,
leval(row.crosslinker),
leval(row.distribution),
DP),
axis=1)
returnpoly = returnpoly.sort_index().sort_values('replicate_structure')
# BUILD STRUCTURE
return returnpoly
def thermoplastic(self,reactants,DP=2,mechanism='',replicate_structures=1,distribution=[],pm=None,infinite_chain=False,verbose=True):
'''Polymerization method for building thermoplastics
Inputs:
reactants: a tuple
or a strings of monomers
or a pandas dataframe containing a list of monomers as strings with column title monomers
DP: integer, degree of polymerization which is the number of monomer units in the polymer
mechanism: string,
vinyl: performs polymerization along vinyl groups
ester: performs condensation reaction on dicarboxylic acid + diol
ester_stereo: performs condensation reaction on dicarboxylic acid + diol where stereoregulatirty is also specified
amide: performs condensation reaction on dicarboxylic acid + diamine
carbonate: performs condensation reaction on carbonate + diol
replicate_structures: integer, number of replicate structures which will be generated
Returns:
polymer: dataframe
'''
returnpoly = pd.DataFrame()
for rep in range(0,replicate_structures):
returnpoly_i = pd.DataFrame()
# reactants should be a tuple but as a string going into polymerization methods
# this puts everthing into dataframe before generating structures
if type(reactants)==str:
try:
reactants_i = ast.literal_eval(reactants)
except:
reactants_i = self.get_monomers(reactants)
returnpoly_i.loc[:,'monomers']=pd.Series(str(reactants_i))
returnpoly_i.loc[:,'distribution']=str(distribution)
elif type(reactants)==tuple:
returnpoly_i.loc[:,'monomers']=pd.Series(str(reactants))
returnpoly_i.loc[:,'distribution']=str(distribution)
elif type(reactants)==pd.DataFrame:
returnpoly_i = reactants
if 'distribution' in returnpoly_i:
returnpoly_i['distribution'] = returnpoly_i['distribution'].astype(str)
else:
returnpoly_i.loc[:,'distribution'] = str(distribution)
else:
raise ValueError('Data type not recognized')
returnpoly_i.loc[:,'replicate_structure']=rep
returnpoly_i.loc[:,'monomers'] = returnpoly_i.monomers.astype(str)
returnpoly = pd.concat([returnpoly,returnpoly_i])
# Ensure valid pm for df
if isinstance(pm, (float, int)):
if not (0 <= pm and pm <= 1):
warnings.warn(f"pm must be between 0 and 1, value of {pm} given. Setting pm to 0.5.")
pm = 0.5
# if valid user pm then use that for everything
returnpoly['pm'] = pm
else:
pm = 0.5
if 'pm' not in returnpoly:
returnpoly['pm'] = pm
else:
# Ensure that all rows have numerical pm
returnpoly[~returnpoly["pm"].map(lambda i: isinstance(i, (int, float)))]["pm"] = pm
# Ensure that all rows have pm between 0 and 1
returnpoly[(0 <= returnpoly["pm"]) & (returnpoly["pm"] <= 1)]["pm"] = pm
if verbose:
returnpoly[['polymer','mechanism']] = returnpoly.progress_apply(
lambda row:
self.__polymerizemechanism_thermoplastic(
ast.literal_eval(row.monomers),
DP,
mechanism,
ast.literal_eval(row.distribution),
row.pm,
infinite_chain),
axis=1)
else:
returnpoly[['polymer','mechanism']] = returnpoly.apply(
lambda row:
self.__polymerizemechanism_thermoplastic(
ast.literal_eval(row.monomers),
DP,
mechanism,
ast.literal_eval(row.distribution),
row.pm,
infinite_chain),
axis=1)
returnpoly = returnpoly.sort_index().sort_values('replicate_structure')
return returnpoly
def get_functionality(self,reactants,distribution=[]):
'''gets the functional groups from a list of reactants
inputs: list of smiles
output: dataframe with count of functional groups
'''
def id_functionality(r):
mol = Chem.MolFromSmiles(r.name)
r.ols = len(mol.GetSubstructMatches(Chem.MolFromSmarts(self.smiles_req['ols'])))
r.aliphatic_ols = len(mol.GetSubstructMatches(Chem.MolFromSmarts(self.smiles_req['aliphatic_ols'])))
r.acids = len(mol.GetSubstructMatches(Chem.MolFromSmarts(self.smiles_req['acids'])))
r.prime_amines = len(mol.GetSubstructMatches(Chem.MolFromSmarts(self.smiles_req['prime_amines'])))
r.carbonates = len(mol.GetSubstructMatches(Chem.MolFromSmarts(self.smiles_req['carbonates'])))
r.acidanhydrides = len(mol.GetSubstructMatches(Chem.MolFromSmarts(self.smiles_req['acidanhydrides'])))
return r
df_func = pd.DataFrame(data = 0,index=reactants,columns=['ols','acids','prime_amines','carbonates','aliphatic_ols','acidanhydrides'])
df_func = df_func.apply(lambda r: id_functionality(r),axis=1)
#appends distribution to dataframe
if len(distribution)==0:df_func['distribution'] = [1]*df_func.shape[0]
else:df_func['distribution'] = list(distribution)
return df_func
def __returnvalid(self,prodlist):
'''verifies molecule is valid
Input: list of strings
Return: list of strings
'''
returnlist = []
rdBase.DisableLog('rdApp.error')
for x in prodlist:
try:
Chem.SanitizeMol(Chem.MolFromSmiles(x))
returnlist.append(x)
except:
pass
rdBase.EnableLog('rdApp.error')
return returnlist
def __get_distributed_reactants(self,reactants,distribution=[]):
if len(distribution)!=0:
distribution = self.__integerize_distribution(distribution)
smiles_list = []
for reactant,mol in zip(reactants,distribution):
smiles_list = smiles_list+[reactant]*mol
return_reactants = self.get_monomers('.'.join(smiles_list))
else:return_reactants=reactants
return return_reactants
def __integerize_distribution(self,distribution):
numdecimals = max([str(d)[::-1].find('.') for d in distribution])
if numdecimals==-1:numdecimals=0
distribution = [int(d*10**numdecimals) for d in distribution]
try:distribution=distribution/np.gcd.reduce(distribution)
except:pass
return [int(d) for d in distribution]
def __polymerizemechanism_thermoplastic(self,reactants,DP,mechanism,distribution=[],pm=0,infinite_chain=False,rep=None):
'''directs polymerization to correct method for mechanism'''
returnpoly = ''
#reactants = self.__get_distributed_reactants(reactants,distribution=distribution)
if (mechanism=='vinyl')|(mechanism=='acrylate'):
polydata = self.__poly_vinyl(reactants,DP,distribution,infinite_chain)
returnpoly = polydata[0]
mechanism = polydata[1]
elif mechanism=='ester':
polydata = self.__poly_ester(reactants,DP,distribution,infinite_chain)
returnpoly = polydata[0]
mechanism = polydata[1]
elif mechanism=='ester_stereo':
polydata = self.__poly_ester_stereo(reactants, DP, pm, distribution, infinite_chain)
returnpoly = polydata[0]
mechanism = polydata[1]
elif mechanism=='amide':
polydata = self.__poly_amide(reactants,DP,distribution,infinite_chain)
returnpoly = polydata[0]
mechanism = polydata[1]
elif mechanism=='carbonate':
polydata = self.__poly_carbonate(reactants,DP,distribution,infinite_chain)
returnpoly = polydata[0]
mechanism = polydata[1]
elif mechanism=='imide':
polydata = self.__poly_imide(reactants,DP,distribution,infinite_chain)
returnpoly = polydata[0]
mechanism = polydata[1]
elif mechanism=='all':
polylist = [self.__poly_vinyl(reactants,DP,distribution,infinite_chain),
self.__poly_ester(reactants,DP,distribution,infinite_chain),
self.__poly_amide(reactants,DP,distribution,infinite_chain),
self.__poly_carbonate(reactants,DP,distribution,infinite_chain),
self.__poly_imide(reactants,DP,distribution,infinite_chain)]
polylist = [p for p in polylist if p[0] not in ['ERROR:Vinyl_ReactionFailed',
'ERROR:Ester_ReactionFailed',
'ERROR:Amide_ReactionFailed',
'ERROR:Carbonate_ReactionFailed',
'ERROR:Imide_ReactionFailed',
'']]
if len(polylist)==1:
returnpoly = polylist[0][0]
mechanism = polylist[0][1]
elif len(polylist) > 1:
returnpoly = 'ERROR_02:MultiplePolymerizations'
else:
returnpoly = 'ERROR_01:NoReaction'
else:
returnpoly='ERROR_03:MechanismNotRecognized'
return pd.Series([returnpoly,mechanism])
def __polymerizemechanism_thermoset(self,reactants,mechanism,crosslinker,distribution,DP):
'''directs polymerization to correct method for mechanism'''
returnpoly = ''
if (mechanism=='UPE'):
polydata = self.__poly_upe(reactants,crosslinker,distribution,DP)
returnpoly = polydata[0]
mechanism = polydata[1]
else:
returnpoly='ERROR_03:MechanismNotRecognized'
return pd.Series([returnpoly,mechanism])
def __poly_vinyl_init(self,mola,molb):
'''performs propagation rxn of vinyl polymer'''
#rxn definition
rxn = AllChem.ReactionFromSmarts('[C:1]=[C:2].[C:3]=[C:4]>>[Kr][C:1][C:2][C:3][C:4][Xe]')
#product creation and validation
prod = rxn.RunReactants((mola,molb))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
molprodlist = [Chem.MolFromSmiles(p) for p in self.__returnvalid(prodlist)]
return molprodlist
def __poly_vinyl_prop(self,mola,molb):
'''performs propagation rxn of vinyl polymer'''
#rxn definition
rxn = AllChem.ReactionFromSmarts('[C:0][C:1][C:2][C:3][Xe].[C:4]=[C:5]>>[C:0][C:1][C:2][C:3][C:4][C:5][Xe]')
#product creation and validation
prod = rxn.RunReactants((mola,molb))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
molprodlist = [Chem.MolFromSmiles(p) for p in self.__returnvalid(prodlist)]
return molprodlist
def __poly_vinyl_term(self,mola,molb,infinite_chain=False,single_rxn=False):
'''performs termination rxn of vinyl polymer'''
#rxn definition
if single_rxn:
rxn1 = AllChem.ReactionFromSmarts('[C:0]=[C:1].[C:2]=[C:3]>>[C:0][C:1][C:2][C:3]')
prod = rxn1.RunReactants((mola,molb))
elif infinite_chain:
#terminates and removes Xe
rxn1 = AllChem.ReactionFromSmarts('[C:0][C:1][C:2][C:3][Xe].[C:4]=[C:5]>>[C:0][C:1][C:2][C:3][C:4][C:5][Xe]')
prod = rxn1.RunReactants((mola,molb))
#ring closes
rxn2 = AllChem.ReactionFromSmarts('([Kr][C:0][C:1].[C:2][C:3][Xe])>>[C:1][C:0][C:3][C:2]')
prod = [rxn2.RunReactants((r,)) for r in list(itertools.chain(*prod))]
prod = list(itertools.chain(*prod))
else:
#terminates and removes Xe
rxn1 = AllChem.ReactionFromSmarts('[C:0][C:1][C:2][C:3][Xe].[C:4]=[C:5]>>[C:0][C:1][C:2][C:3][C:4][C:5]')
prod = rxn1.RunReactants((mola,molb))
#removes Kr
rxn2 = AllChem.ReactionFromSmarts('[C:0][C:1][C:2][C:3][Kr]>>[C:0][C:1][C:2][C:3]')
prod = [rxn2.RunReactants((r,)) for r in list(itertools.chain(*prod))]
prod = list(itertools.chain(*prod))
#preps for return
prod = list(itertools.chain(*prod))
prodlist = [Chem.MolToSmiles(p) for p in prod]
molprodlist = [Chem.MolFromSmiles(p) for p in self.__returnvalid(prodlist)]
return molprodlist
def __poly_vinyl(self,reactants,DP=3, distribution = [],infinite_chain=False,crosslink=False):
''' performs vinyl polymerization'''
try:
if len(distribution)==0:distribution = [1]*len(reactants)
#mol conversion and parsing
mols = [Chem.MolFromSmiles(r) for r in reactants]
if crosslink:
distribution = [1,1]+list(distribution)
dfreactants = pd.DataFrame(data=[reactants,mols,distribution],index=['reactants','mols','distribution']).T
dfmolA = pd.DataFrame(dfreactants.iloc[0]).T
dfmolB = pd.DataFrame(dfreactants.iloc[1]).T
dfreactants = dfreactants.iloc[2:]
else:
dfreactants = pd.DataFrame(data=[reactants,mols,distribution],index=['reactants','mols','distribution']).T
dfmolA = dfreactants
dfmolB = dfreactants
dfreactants = dfreactants
#polymerization
assert DP>1
if DP>2:
# initiate
molA = dfmolA.sample(1,weights=dfmolA.distribution,replace=True).iloc[0].loc['mols']
mol = dfreactants.sample(1,weights=dfreactants.distribution,replace=True).iloc[0].loc['mols']
polymer = self.__poly_vinyl_init(molA,mol)
# propagate
for r in range(0,DP-3):
assert len(polymer)>=1
polymer = random.choice(polymer)
mol = dfreactants.sample(1,weights=dfreactants.distribution,replace=True).iloc[0].loc['mols']
polymer = self.__poly_vinyl_prop(polymer,mol)
#terminate
polymer = random.choice(polymer)
molB = dfmolB.sample(1,weights=dfmolB.distribution,replace=True).iloc[0].loc['mols']
polymer = self.__poly_vinyl_term(polymer,molB,infinite_chain)
if DP==2:
molA = dfmolA.sample(1,weights=dfmolA.distribution,replace=True).iloc[0].loc['mols']
molB = dfmolB.sample(1,weights=dfmolB.distribution,replace=True).iloc[0].loc['mols']
polymer = self.__poly_vinyl_term(molA,molB,single_rxn=True)
polymer = Chem.MolToSmiles(random.choice(polymer))
except:
polymer = 'ERROR:Vinyl_ReactionFailed'
return polymer, 'vinyl'
def __protect_substructure(self,mol,substructure,n_unprotected=0):
''' protects atoms in the group identified
mol: rdkit mol object
substructure: SMARTS string to match to
n_uprotected: number of substructures that will not be protected'''
if type(mol)==str:
mol=Chem.MolFromSmiles(mol)
mol = deepcopy(mol)
protect = list(mol.GetSubstructMatches(Chem.MolFromSmarts(substructure)))
random.shuffle(protect)
protect = protect[n_unprotected:]
protect = list(itertools.chain(*protect))
for atom in mol.GetAtoms():
if atom.GetIdx() in protect: atom.SetProp('_protected','1')
else: pass
return [mol, len(protect)]
def __unprotect_atoms(self,mol):
'''unprotects all atoms in molecule'''
mol = deepcopy(mol)
for atom in mol.GetAtoms():
try:atom.ClearProp('_protected')
except:pass
return mol
def __poly_ester(self,reactants,DP=2, distribution = [],infinite_chain=False):
'''performs condenstation reaction on dicarboxyl and diols'''
try:
#open acid anhydrides
def replace_acidanhydrides(reactant):
mol = Chem.MolFromSmiles(reactant)
if len(mol.GetSubstructMatches(Chem.MolFromSmarts(self.smiles_req['acidanhydrides'])))>0:
reactant = self.__openacidanyhydride(reactant)
else:
pass
return reactant
reactants = pd.Series(reactants).apply(replace_acidanhydrides).tolist()
rxn_dic = self.reactions['ester']
df_func = self.get_functionality(reactants,distribution=distribution)
#select initial monomer as polymer chain
df_poly = df_func.sample(1)
df_func.loc['polymer'] = df_poly.sample(1).values[0]
poly = df_poly.index[0]
molpoly = Chem.MolFromSmiles(poly)
DP_count=1
DP_actual = 1
while DP_count<DP:
#select rxn rule and reactant
if (df_func.loc['polymer','aliphatic_ols']>=1)&(df_func.loc['polymer','acids']>=1):
msk =((df_func.acids>=1)|(df_func.aliphatic_ols>=1))&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
if df_func.loc[a].aliphatic_ols>=1:rxn_selector = 'diacids_ols'
if df_func.loc[a].acids >=1: rxn_selector = 'diols_acids'
elif df_func.loc['polymer','aliphatic_ols'] >=2:
msk = (df_func.acids>=1)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector = 'diols_acids'
elif df_func.loc['polymer','acids']>=2:
msk = (df_func.aliphatic_ols>=1)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector = 'diacids_ols'
else:
assert False
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic[rxn_selector])
#update df_func table
df_func.loc['polymer']=df_func.loc['polymer']+df_func.loc[a] # adding polymer and a
for column_name in ['aliphatic_ols','ols','acids']:
df_func.loc['polymer',column_name] += -1 # substracting off functionality
assert df_func.loc['polymer'][df_func.loc['polymer']>-1].shape==df_func.loc['polymer'].shape
#React and select product
mola = Chem.MolFromSmiles(a)
prod = rxn.RunReactants((molpoly,mola))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
# manage loop and ring close
if (infinite_chain)&(DP_count==DP-1):
# logic for closing ring
if (df_func.loc['polymer','aliphatic_ols']>0)&(df_func.loc['polymer','acids'])>0:
#case for when has can ring close
DP_count+=1
DP_actual+=1
else:
#case for when has same terminal ends so can't ring close
DP_count = DP_count
DP_actual+=1
else:
DP_count+=1
DP_actual+=1
if infinite_chain: #closes ring
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic['infinite_chain'])
prod = rxn.RunReactants((molpoly,))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
except:
poly='ERROR:Ester_ReactionFailed'
return poly, 'ester'
def __poly_ester_stereo(
self, reactants, DP=2, pm=0.5, distribution=[1], infinite_chain=False
):
"""performs condenstation reaction on dicarboxyl and diols
A poly ester generator that incorporates stereochemistry. There are three polymer properties that are controlled:
(1) DP, degree of polymerization. Counts each monomer provided as a repeat unit (each adds 1 to DP)
(2) pm, which is the probability for meso addition. 0 = syndiotactic, 0.5 = atactic, 1 = isotactic
(3) the distribution of monomers in the polymer (fraction composition)
In order to properly construct the polyer, the inputs must follow the following conventions:
(1) reactants must be smiles in a tuple. Unique monomer order is determined by stripping stereochemistry
from monomers. For each monomer it is assumed that two provided monomers are the R and S enantiomers in that
order.
(2) pm must be a single value, which is applied to all monomers equally, or a list that corresponds to the
order of monomers in the reactants tuple.
(3) distribution must be a single value, which is applied to all monomers equally, or a list that
corresponds to the order of the monomers in the reactants tuple.
Example inputs, where monomer letters represent their SMILES representation.
(1) reacting one monomer, A, with two enantiomers, A(R) and A(S) in an isotactic fashion (pm=1).
reactants = (A(R), A(S))
pm = 1
(2) reacting two monomers: A, with two enantiomers, A(R) and A(S), and B, with two enatniomers,
B(R) and B(S), with A adding isotactically and B adding atactically, and a ratio of A:B of 80:20
reactants = (A(R), A(S), B(R), B(S))
pm = [1, 0.5]
distribution = [80, 20]
(3) reacting two monomers: A, with two enantiomers, A(R) and A(S) and B, with no enantiomers.
A is syndiotactic. A pm value must be supplied to every monomer, even if it can't display tacticity.
This can be done with either a list, or just using one pm value.
reactants = (A(R), A(S), B)
pm = 0
reactants: Tuple[str]
A tuple containing the reactant smiles
DP: int
Degree of polymerization. Each monomer added contributes to this value, by default 2
pm: Union[float, List[float]]
The probability for meso addition for the monomer sepcies. 0 = syndiotactic, 0.5 = atactic, 1 = isotactic.
Specifying one value sets the value for all monomers, by default 0.5
distribution: List[float]
The distribution for the monomer species in the resultant polymer. Specifying one value sets the value for
all monomers, by default [1]
infinite_chain: bool
Whether or not to use an infinite chain, by default False
"""
# Define helper functions
def replace_acidanhydrides(reactant):
mol = Chem.MolFromSmiles(reactant)
if (
len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(self.smiles_req["acidanhydrides"])
)
)
> 0
):
reactant = self.__openacidanyhydride(reactant)
else:
pass
return reactant
def sample_by_pm(monomers, last_stereo):
"""Sample monomer to react based on pm values
monomers: df
Dataframe containing monomers to sample from
last_stereo: int
The stereochemistry of the last monomer
"""
# Select id of monomer to add
monomer_id = sample_monomer_id(distribution, ids=monomers.monomer_id.unique())
# Get any monomers that match
monomers = monomers[
(monomers.index != "polymer") & (monomers["monomer_id"] == monomer_id)
].reset_index(drop=False)
if last_stereo == 2: # No stereochemistry, sample randomly
new_monomer = monomers.sample(1)
elif len(monomers) == 1: # Only one monomer to select
new_monomer = monomers
else:
# Generate weights based on pm and get sample monomer
monomer_pm = pm[monomer_id]
if last_stereo == 0:
weights = [monomer_pm, 1 - monomer_pm]
else:
weights = [1 - monomer_pm, monomer_pm]
new_monomer = monomers.sample(1, weights=weights)
# Return new monomer SMILES and new stereo_id
return new_monomer.iloc[0]["index"], new_monomer.iloc[0]["stereo_id"]
def sample_monomer_id(distribution, ids=[]):
# Get a monomer ID from the distribution
# ids dictates which polymers to draw from, empty list indicates all
p_distribution = np.array(distribution)
ids = np.array(ids)
if len(ids) > 0:
p_distribution = p_distribution[ids]
else:
ids = range(len(distribution))
# normalize for the choice function
p_distribution = np.array(p_distribution, dtype=float)
p_distribution /= sum(p_distribution)
monomer_id = np.random.choice(ids, p=p_distribution)
return monomer_id
try:
# open acid anhydrides
reactants = pd.Series(reactants).apply(replace_acidanhydrides).tolist()
# Load reaction info and get functionalities
rxn_dict = self.reactions["ester"]
func_df = self.get_functionality(reactants)
# Assign each monomer a unique ID independent of stereochemistry to aid in sampling
func_df["nonstereo_smiles"] = func_df.index.map(
lambda s: s.replace("@", "").replace("\\", "")
)
func_df["monomer_id"] = func_df.groupby(
["nonstereo_smiles"], sort=False
).ngroup()
n_unique_monomers = max(func_df["monomer_id"]) + 1
# if distribution is wrong shape, make uniform
if len(distribution) != n_unique_monomers:
distribution = [1] * n_unique_monomers
# Verify pm is correct shape
if type(pm) != list:
pm = [pm] * n_unique_monomers
else:
assert len(pm) == len(reactants)
# Give each monomer a stereo ID, assumes R is first and S is second
# R = 0, S = 1, one monomer = 2
for monomer_id in set(func_df["monomer_id"]):
n_monomers = len(func_df[func_df["monomer_id"] == monomer_id])
assert n_monomers < 3 # Only allowing for 2 monomers currently
if n_monomers == 2:
func_df.loc[func_df["monomer_id"] == monomer_id, "stereo_id"] = [0, 1]
elif n_monomers == 1:
func_df.loc[func_df["monomer_id"] == monomer_id, "stereo_id"] = 2
func_df = func_df.drop(columns=["nonstereo_smiles"])
# Grab a random polymer from func_df based on the distribution and initialize poly entry in df
poly_df = func_df[
func_df["monomer_id"] == sample_monomer_id(distribution)
].sample(1)
molpoly = Chem.MolFromSmiles(poly_df.index[0]) # index is SMILES
poly_df.index.values[0] = "polymer"
func_df = pd.concat([func_df, poly_df])
last_stereo = poly_df["stereo_id"][0]
DP_count = 1
DP_actual = 1
while DP_count < DP:
# select rxn rule and reactant
if (func_df.loc["polymer", "aliphatic_ols"] >= 1) & (
func_df.loc["polymer", "acids"] >= 1
):
mask = ((func_df.acids >= 1) | (func_df.aliphatic_ols >= 1)) & (
func_df.index != "polymer"
)
func_df_select = func_df.loc[mask]
monomer, last_stereo = sample_by_pm(func_df_select, last_stereo)
if func_df.loc[monomer].aliphatic_ols >= 1:
rxn_selector = "diacids_ols"
if func_df.loc[monomer].acids >= 1:
rxn_selector = "diols_acids"
elif func_df.loc["polymer", "aliphatic_ols"] >= 2:
msk = (func_df.acids >= 1) & (func_df.index != "polymer")
func_df_select = func_df.loc[msk]
monomer, last_stereo = sample_by_pm(func_df_select, last_stereo)
rxn_selector = "diols_acids"
elif func_df.loc["polymer", "acids"] >= 2:
msk = (func_df.aliphatic_ols >= 1) & (func_df.index != "polymer")
func_df_select = func_df.loc[msk]
monomer, last_stereo = sample_by_pm(func_df_select, last_stereo)
rxn_selector = "diacids_ols"
else:
assert False
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dict[rxn_selector])
# update func_df table
func_df.loc["polymer"] = (
func_df.loc["polymer"] + func_df.loc[monomer]
) # adding polymer and a
for column_name in ["aliphatic_ols", "ols", "acids"]:
func_df.loc[
"polymer", column_name
] += -1 # substracting off functionality
assert (
func_df.loc["polymer"][func_df.loc["polymer"] > -1].shape
== func_df.loc["polymer"].shape
)
# React and select product
mola = Chem.MolFromSmiles(monomer)
prod = rxn.RunReactants((molpoly, mola))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
# manage loop and ring close
if (infinite_chain) & (DP_count == DP - 1):
# logic for closing ring
if (func_df.loc["polymer", "aliphatic_ols"] > 0) & (
func_df.loc["polymer", "acids"]
) > 0:
# case for when has can ring close
DP_count += 1
DP_actual += 1
else:
# case for when has same terminal ends so can't ring close
DP_count = DP_count
DP_actual += 1
else:
DP_count += 1
DP_actual += 1
if infinite_chain: # closes ring
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dict["infinite_chain"])
prod = rxn.RunReactants((molpoly,))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
except BaseException as e:
# print(e) # Can give reason why it fails?
poly = "ERROR:Ester_Stereo_ReactionFailed"
return poly, "ester_stereo"
def __poly_amide(self,reactants,DP=2, distribution = [],infinite_chain=False):
'''performs condenstation reaction on dicarboxyl and diols'''
# function
try:
# initial
rxn_dic = self.reactions['amide']
df_func = self.get_functionality(reactants,distribution=distribution)
#select initial monomer as polymer chain
df_poly = df_func.sample(1)
df_func.loc['polymer'] = df_poly.sample(1).values[0]
poly = df_poly.index[0]
molpoly = Chem.MolFromSmiles(poly)
DP_count=1
DP_actual = 1
while DP_count<DP:
#select rxn rule and reactant
if (df_func.loc['polymer','prime_amines']>=1)&(df_func.loc['polymer','acids']>=1):
msk =((df_func.acids>=1)|(df_func.prime_amines>=1))&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
if df_func.loc[a].prime_amines>=1:rxn_selector = 'diacids_amines'
if df_func.loc[a].acids >=1: rxn_selector = 'diamines_acids'
elif df_func.loc['polymer','prime_amines'] >=2:
msk = (df_func.acids>=1)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector = 'diamines_acids'
elif df_func.loc['polymer','acids']>=2:
msk = (df_func.prime_amines>=1)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector = 'diacids_amines'
else: assert False
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic[rxn_selector])
#update df_func table
df_func.loc['polymer']=df_func.loc['polymer']+df_func.loc[a]# adding polymer and a
for column_name in ['prime_amines','acids']:
df_func.loc['polymer',column_name] += -1
assert df_func.loc['polymer'][df_func.loc['polymer']>-1].shape==df_func.loc['polymer'].shape
#React and select product
mola = Chem.MolFromSmiles(a)
prod = rxn.RunReactants((molpoly,mola))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
# manage loop and ring close
if (infinite_chain)&(DP_count==DP-1):
# logic for closing ring
if (df_func.loc['polymer','prime_amines']>0)&(df_func.loc['polymer','acids'])>0:
#case for when can ring close
DP_count+=1
DP_actual+=1
else:
#case for when has same terminal ends so can't ring close
DP_count = DP_count
DP_actual+=1
else:
DP_count+=1
DP_actual+=1
if infinite_chain: #closes ring
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic['infinite_chain'])
prod = rxn.RunReactants((molpoly,))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
except:
poly='ERROR:Amide_ReactionFailed'
return poly, 'amide'
def __poly_carbonate(self,reactants,DP=2, distribution = [],infinite_chain=False):
def choose_carbonyltype(reactants):
#this chooses the right rxn scheme depeneding on the carbonate monomer
template_phosgene = '[O:2]=[C:3]([F,Cl,Br,I,O:4])([F,Cl,Br,I:5])'
template_nonphosgene = '[O:2]=[C:3]([O:4][C,c:6])([O:5][C,c])'
if np.any([len(Chem.MolFromSmiles(r).GetSubstructMatch(Chem.MolFromSmarts(template_phosgene))) for r in reactants]):carbonyltype='phosgene'
if np.any([len(Chem.MolFromSmiles(r).GetSubstructMatch(Chem.MolFromSmarts(template_nonphosgene))) for r in reactants]):carbonyltype='nonphosgene'
return carbonyltype
def get_prods_matching_mw(molpoly,mola,prodlist,leavegroup_MW,infinite_chain=False):
returnlist = []
if not infinite_chain:
mwexpected = np.round(Descriptors.MolWt(molpoly)+Descriptors.MolWt(mola)-leavegroup_MW,2)
else:
mwexpected = np.round(Descriptors.MolWt(molpoly)-leavegroup_MW,2)
for prod in prodlist:
mprod = Chem.MolFromSmiles(prod)
mwprod = np.round(Descriptors.MolWt(mprod),2)
if (mwexpected-.1<mwprod<mwexpected+.1):
returnlist.append(prod)
return returnlist
try:
# initial
carbonyltype = choose_carbonyltype(reactants)
rxn_dic = self.reactions['carbonate'][carbonyltype]
df_func = self.get_functionality(reactants,distribution=distribution)
#select initial monomer as polymer chain
df_poly = df_func.sample(1)
df_func.loc['polymer'] = df_poly.sample(1).values[0]
poly = df_poly.index[0]
molpoly = Chem.MolFromSmiles(poly)
DP_count=1
DP_actual = 1
while DP_count<DP:
# print(df_func)
#select rxn rule and reactant
if (df_func.loc['polymer','ols']>=1)&(df_func.loc['polymer','carbonates']>=0.5):
msk =((df_func.ols>=1)|(df_func.carbonates>=0.5))&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
if np.all(df_func.loc[a].ols>=1): rxn_selector ='carbonates_diols'
if np.all(df_func.loc[a].carbonates >=0.5):rxn_selector = 'diols_carbonates'
elif df_func.loc['polymer','ols'] >=2:
msk = (df_func.carbonates>=0.5)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector = 'diols_carbonates'
elif df_func.loc['polymer','carbonates']>=1:
msk = (df_func.ols>=1)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector ='carbonates_diols'
else:
assert False
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic[rxn_selector])
#update df_func table
df_func.loc['polymer']=df_func.loc['polymer']+df_func.loc[a]# adding polymer and a
for column_name,adder in zip(['ols','carbonates'],[-1,-0.5]):
df_func.loc['polymer',column_name] += adder
assert df_func.loc['polymer'][df_func.loc['polymer']>-1].shape==df_func.loc['polymer'].shape
mola = Chem.MolFromSmiles(a)
if ((DP_count-1 == 0)&(rxn_selector=='diols_carbonates')):
leavegroup_MW = (Descriptors.MolWt(mola)-Descriptors.MolWt(Chem.MolFromSmiles('C=O'))+4)/2
if ((DP_count-1 == 0)&(rxn_selector=='carbonates_diols')):
leavegroup_MW = (Descriptors.MolWt(molpoly)-Descriptors.MolWt(Chem.MolFromSmiles('C=O'))+4)/2
prods = rxn.RunReactants((molpoly,mola))
allprodlist = [Chem.MolToSmiles(x[0]) for x in prods]
prodlist = pd.Series(self.__returnvalid(allprodlist)).unique().tolist()
prodlist = get_prods_matching_mw(molpoly,mola,prodlist,leavegroup_MW)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
# manage loop and ring close
if (infinite_chain)&(DP_count==DP-1):
# logic for closing ring
if (df_func.loc['polymer','ols']>0)&(df_func.loc['polymer','carbonates']>0):
#case for when has can ring close
DP_count+=1
DP_actual+=1
else:
#case for when has same terminal ends so can't ring close
DP_count = DP_count
DP_actual+=1
else:
DP_count+=1
DP_actual+=1
if infinite_chain: #closes ring
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic['infinite_chain'])
prod = rxn.RunReactants((molpoly,))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
prodlist = get_prods_matching_mw(molpoly,mola,prodlist,leavegroup_MW,infinite_chain=True)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
except:
poly='ERROR:Carbonate_ReactionFailed'
return poly, 'carbonate'
def __poly_imide(self,reactants,DP=2, distribution = [],infinite_chain=False):
'''performs condenstation reaction on dianhydride and diamine'''
# function
try:
# initial
rxn_dic = {'diacidanhydrides_amines':'[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7])).[#6;!$(C=O):0][NH2:1]>>[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))',
'diamines_acidanhydrides':'[#6;!$(C=O):0][NH2:1].[#8:3]([#6:4](=[#8:5]))([#6:6](=[#8:7]))>>[#6:0][N:1]([#6:4](=[#8:5]))([#6:6](=[#8:7]))'}
rxn_dic = self.reactions['imide']
df_func = self.get_functionality(reactants,distribution=distribution)
#select initial monomer as polymer chain
df_poly = df_func.sample(1)
df_func.loc['polymer'] = df_poly.sample(1).values[0]
poly = df_poly.index[0]
molpoly = Chem.MolFromSmiles(poly)
DP_count=1
DP_actual = 1
while DP_count<DP:
#select rxn rule and reactant
if (df_func.loc['polymer','prime_amines']>=1)&(df_func.loc['polymer','acidanhydrides']>=1):
msk = ((df_func.acids>=1)|(df_func.prime_amines>=1))&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
if np.all(df_func.loc[a].prime_amines>=1): rxn_selector ='diacidanhydrides_amines'
if np.all(df_func.loc[a].acidanhydrides >=1): rxn_selector = 'diamines_acidanhydrides'
elif df_func.loc['polymer','prime_amines'] >=2:
msk = (df_func.acidanhydrides>=1)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector = 'diamines_acidanhydrides'
elif df_func.loc['polymer','acidanhydrides']>=2:
msk = (df_func.prime_amines>=1)&(df_func.index!='polymer')
df_func_select = df_func.loc[msk]
a = df_func_select.sample(1,weights=df_func.distribution,replace=True).index.values[0]
rxn_selector = 'diacidanhydrides_amines'
else:
assert False
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic[rxn_selector])
#update df_func table
df_func.loc['polymer']=df_func.loc['polymer']+df_func.loc[a]# adding polymer and a
for column_name,adder in zip(['prime_amines','acidanhydrides'],[-1,-1]):
df_func.loc['polymer',column_name] += adder
assert df_func.loc['polymer'][df_func.loc['polymer']>-1].shape==df_func.loc['polymer'].shape
#React and select product
mola = Chem.MolFromSmiles(a)
prod = rxn.RunReactants((molpoly,mola))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
# manage loop and ring close
if (infinite_chain)&(DP_count==DP-1):
# logic for closing ring
if (df_func.loc['polymer','prime_amines']>0)&(df_func.loc['polymer','acidanhydrides'])>0:
#case for when has can ring close
DP_count+=1
DP_actual+=1
else:
#case for when has same terminal ends so can't ring close
DP_count = DP_count
DP_actual+=1
else:
DP_count+=1
DP_actual+=1
if infinite_chain: #closes ring
rxn = Chem.AllChem.ReactionFromSmarts(rxn_dic['infinite_chain'])
prod = rxn.RunReactants((molpoly,))
prodlist = [Chem.MolToSmiles(x[0]) for x in prod]
prodlist = self.__returnvalid(prodlist)
poly = random.choice(prodlist)
molpoly = Chem.MolFromSmiles(poly)
except:
poly='ERROR:Imide_ReactionFailed'
return poly, 'imide'
def __poly_upe(self,reactants,crosslinker,distribution,DP):
''' generates 2 ringed thermoset
reactants: list of smiles
crosslinker: boolean list indicating which reactants are in the ring structure and which are in the crosslink
mols: number of mols in reaction, this is not just the molar ratio and should be actual mols
DP: integer, degree of polymerization
'''
#getting distributed reactants and parsing monomers
reactants = np.array(reactants)
crosslinker = np.array(crosslinker,dtype=bool)
distribution = np.array(distribution)
reactants_backbone = reactants[~crosslinker]
reactants_backbone = tuple(reactants[np.isin(reactants,reactants_backbone)])
distribution_backbone = tuple(distribution[np.isin(reactants,reactants_backbone)])
reactants_crosslinker = reactants[crosslinker]
reactants_crosslinker = tuple(reactants[np.isin(reactants,reactants_crosslinker)])
distribution_crosslinker = tuple(distribution[np.isin(reactants,reactants_crosslinker)])
# parse DP
# to be inserted
#make rings by generating ring structures, makes 20 attempts to have ring with a reaction cite and protects any other reactions cites
ring1=ring2='ERROR'
i=0
while ring1=='ERROR'or ring2=='ERROR':
dfrings = self.thermoplastic(reactants_backbone,
mechanism='ester',
DP=DP,
replicate_structures=1,
infinite_chain=True,
verbose=False)
if dfrings.polymer.str.contains('ERROR').any():#makes sure the ester reaction worked before trying to protect
pass
else:
mol,p = dfrings.apply(lambda row: self.__protect_substructure(row.polymer,'C=C',n_unprotected=1),axis=1)[0]
if p>0:
if ring1=='ERROR':
ring1=mol
else:
ring2=mol
i+=1
if i>20:break
if type(ring1)==str or type(ring2)==str: #makes sure rings have been assigned, if error could be because ring didnt have rxn site or bc ester rxn failed
poly='ERROR:Ester_ReactionFailed'
else:
rings = [Chem.MolToSmiles(s) for s in [ring1,ring2]]
## connect rings
reactant_ringclose = rings+list(reactants_crosslinker)
poly = self.__poly_vinyl(reactant_ringclose,DP=DP,distribution=distribution_crosslinker,crosslink=True)[0]
if 'ERROR' in poly:poly='ERROR:Vinyl_ReactionFailed'
return poly,'UPE'
def __openacidanyhydride(self,reactant):
rxn = Chem.AllChem.ReactionFromSmarts(self.reactions['open_acidanhydrides']['add_OH'])
mol = Chem.MolFromSmiles(reactant)
prod = rxn.RunReactants((mol,))
prod = random.choice(prod)[0]
mol = Chem.RWMol(prod)
mol.RemoveBond(0,1)
return Chem.MolToSmiles(mol)
| 37.65367
| 154
| 0.669854
|
4a17b71d3d7ff94b4b066c885e51c17bbff9b329
| 1,115
|
py
|
Python
|
test/test_econdings.py
|
mspronesti/qlkit
|
2bb4dabcf88e63c54f7c57e2e80ad2ca77a04b40
|
[
"Apache-2.0"
] | 5
|
2021-12-26T15:45:00.000Z
|
2022-01-12T10:31:57.000Z
|
test/test_econdings.py
|
mspronesti/qlkit
|
2bb4dabcf88e63c54f7c57e2e80ad2ca77a04b40
|
[
"Apache-2.0"
] | null | null | null |
test/test_econdings.py
|
mspronesti/qlkit
|
2bb4dabcf88e63c54f7c57e2e80ad2ca77a04b40
|
[
"Apache-2.0"
] | 2
|
2022-01-28T22:05:50.000Z
|
2022-02-27T18:50:33.000Z
|
from numpy.lib.scimath import sqrt
from qlearnkit.encodings import AmplitudeEncoding
import pytest
import numpy as np
@pytest.fixture
def classical_data():
return [
[1, 1, 1, 1],
[-1, -1, -1, -1],
[1 / 2, -1 / 2, 0, 0]
]
@pytest.fixture
def expected_normalised_data():
return [
[1 / 2, 1 / 2, 1 / 2, 1 / 2],
[-1 / 2, -1 / 2, -1 / 2, -1 / 2],
[1 / sqrt(2), -1 / sqrt(2), 0, 0]
]
def test_fidelity(classical_data, tolerance=1.0e-5):
amp_enc = AmplitudeEncoding(n_features=4)
normalised_data = np.array([amp_enc.state_vector(data)
for data in classical_data
])
assert (normalised_data ** 2).sum(axis=1) == pytest.approx(1.0, tolerance)
def test_amplitude_encoding(classical_data, expected_normalised_data):
amp_enc = AmplitudeEncoding(n_features=4)
normalised_data = np.array([amp_enc.state_vector(data)
for data in classical_data
])
assert (normalised_data == expected_normalised_data).all()
| 27.875
| 78
| 0.574888
|
4a17b9628ee4ee63f8df88d7ab1085809d662e7f
| 869
|
py
|
Python
|
Ch3/magic_8ball.py
|
dmdinh22/ATBS
|
3ddd331757cc434faa5f27997b178f8a39e3b5d2
|
[
"MIT"
] | null | null | null |
Ch3/magic_8ball.py
|
dmdinh22/ATBS
|
3ddd331757cc434faa5f27997b178f8a39e3b5d2
|
[
"MIT"
] | null | null | null |
Ch3/magic_8ball.py
|
dmdinh22/ATBS
|
3ddd331757cc434faa5f27997b178f8a39e3b5d2
|
[
"MIT"
] | null | null | null |
import random # import random module
def get_answer(answer_number): #define function
if answer_number == 1:
return "It is certain"
elif answer_number == 2:
return "It is decidedly so"
elif answer_number == 3:
return "Yes"
elif answer_number == 4:
return "Reply hazy try again"
elif answer_number == 5:
return "Ask again later"
elif answer_number == 6:
return "Concentrate and ask again"
elif answer_number == 7:
return "My reply is no"
elif answer_number == 8:
return "Outlook not so good"
elif answer_number == 9:
return "Very doubtful"
# rand = random.randint(1,9) # calling random int function
# fortune = get_answer(rand) # setting fortune to returned string of fn
# print(fortune) # print to console
# in one line
print(get_answer(random.randint(1,9)))
| 29.965517
| 71
| 0.652474
|
4a17b9a991fcc18fb4ad6306fa54119d511664dc
| 3,773
|
py
|
Python
|
obsei/source/website_crawler_source.py
|
shahrukhx01/obsei
|
ca1f8ecde28ac6003c6112cffbb690a235e86f0b
|
[
"Apache-2.0"
] | null | null | null |
obsei/source/website_crawler_source.py
|
shahrukhx01/obsei
|
ca1f8ecde28ac6003c6112cffbb690a235e86f0b
|
[
"Apache-2.0"
] | null | null | null |
obsei/source/website_crawler_source.py
|
shahrukhx01/obsei
|
ca1f8ecde28ac6003c6112cffbb690a235e86f0b
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
from abc import abstractmethod
from typing import List, Optional
import mmh3
from trafilatura import extract, feeds, fetch_url, sitemaps
from obsei.analyzer.base_analyzer import AnalyzerRequest
from obsei.source.base_source import BaseSource, BaseSourceConfig
logger = logging.getLogger(__name__)
class BaseCrawlerConfig(BaseSourceConfig):
TYPE: str = "BaseCrawler"
@abstractmethod
def extract_url(self, url: str, url_id: str = None):
pass
@abstractmethod
def find_urls(self, url: str):
pass
class TrafilaturaCrawlerConfig(BaseCrawlerConfig):
# To understand about these configuration params refer:
# https://trafilatura.readthedocs.io/
_output_format: str = "json"
TYPE: str = "Crawler"
urls: List[str]
include_comments: bool = False
include_tables: bool = True
no_fallback: bool = False
include_images: bool = False
include_formatting: bool = False
deduplicate: bool = True
no_ssl: bool = False
is_feed: bool = False
is_sitemap: bool = False
include_links: bool = True
target_language: Optional[str] = None
url_blacklist: Optional[List[str]] = None
def extract_url(self, url: str, url_id: str = None):
url_id = url_id or "{:02x}".format(mmh3.hash(url, signed=False))
url_content = fetch_url(
url=url,
no_ssl=self.no_ssl,
)
extracted_dict = None
if url_content is not None:
extracted_data = extract(
filecontent=url_content,
record_id=url_id,
no_fallback=self.no_fallback,
output_format=self._output_format,
include_comments=self.include_comments,
include_tables=self.include_tables,
include_images=self.include_images,
include_formatting=self.include_formatting,
include_links=self.include_links,
deduplicate=self.deduplicate,
url_blacklist=self.url_blacklist,
target_language=self.target_language,
)
if extracted_data:
extracted_dict = json.loads(extracted_data)
if "raw-text" in extracted_dict:
del extracted_dict["raw-text"]
return extracted_dict
def find_urls(self, url: str):
urls: List[str] = []
if self.is_sitemap:
urls = sitemaps.sitemap_search(url=url, target_lang=self.target_language)
elif self.is_feed:
urls = feeds.find_feed_urls(url=url, target_lang=self.target_language)
return urls
class TrafilaturaCrawlerSource(BaseSource):
NAME: Optional[str] = "Crawler"
def lookup(
self, config: TrafilaturaCrawlerConfig, **kwargs
) -> List[AnalyzerRequest]:
source_responses: List[AnalyzerRequest] = []
final_urls = []
if config.is_sitemap or config.is_feed:
for url in config.urls:
final_urls.extend(config.find_urls(url=url))
else:
final_urls = config.urls
for url in final_urls:
extracted_data = config.extract_url(url=url)
if extracted_data is None:
logger.warning(f"Unable to crawl {url}, hence skipping it")
continue
comments = (
"" if "comments" not in extracted_data else extracted_data["comments"]
)
source_responses.append(
AnalyzerRequest(
processed_text=f"{extracted_data['text']}. {comments}",
meta=extracted_data,
source_name=self.NAME,
)
)
return source_responses
| 31.974576
| 86
| 0.619136
|
4a17ba3b67633286fdc300b638dfbc6529cf9629
| 330
|
py
|
Python
|
GUI/LoginPage.py
|
vips28/Aid-for-Dysgraphia
|
632f00534003cec94357c08743ee9af81b2ca742
|
[
"Apache-2.0"
] | null | null | null |
GUI/LoginPage.py
|
vips28/Aid-for-Dysgraphia
|
632f00534003cec94357c08743ee9af81b2ca742
|
[
"Apache-2.0"
] | 4
|
2021-01-07T09:51:55.000Z
|
2021-01-07T09:54:30.000Z
|
GUI/LoginPage.py
|
vips28/Aid-for-Dysgraphia
|
632f00534003cec94357c08743ee9af81b2ca742
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 17:29:53 2021
@author: myanacondadont
"""
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
class MyGrid(Widget):
pass
class MyApp(App):
def build(self):
return MyGrid()
if __name__ == '__main__':
MyApp().run()
| 16.5
| 36
| 0.60303
|
4a17bc05f33ff4ea866d6a8112dc9646ede1572e
| 374
|
py
|
Python
|
main.py
|
abhisheks008/Digital-Clock
|
9a307d759df85ed550e54bd3ce0894f915fa9eb6
|
[
"MIT"
] | null | null | null |
main.py
|
abhisheks008/Digital-Clock
|
9a307d759df85ed550e54bd3ce0894f915fa9eb6
|
[
"MIT"
] | null | null | null |
main.py
|
abhisheks008/Digital-Clock
|
9a307d759df85ed550e54bd3ce0894f915fa9eb6
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter.ttk import *
from time import strftime
root = Tk()
root.title ("My Clock")
def time ():
string = strftime('%H:%M:%S %p')
label.config(text = string)
label.after(1000, time)
label = Label(root, font=("Calibri",80), background = 'black', foreground = 'cyan')
label.pack(anchor = 'center')
time ()
mainloop()
| 20.777778
| 84
| 0.625668
|
4a17bc581bccac081bc43345d1f567ae5a7b7676
| 23,835
|
py
|
Python
|
python/ray/tune/ray_trial_executor.py
|
silveryfu/ray
|
4fa2a6006c305694a682086b1b52608cc3b7b8ee
|
[
"Apache-2.0"
] | 1
|
2020-01-20T07:28:19.000Z
|
2020-01-20T07:28:19.000Z
|
python/ray/tune/ray_trial_executor.py
|
JunpingDu/ray
|
214f09d969480279930994cabbcc2a75535cc6ca
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/ray_trial_executor.py
|
JunpingDu/ray
|
214f09d969480279930994cabbcc2a75535cc6ca
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import random
import time
import traceback
import ray
from ray.tune.error import AbortTrialExecution
from ray.tune.logger import NoopLogger
from ray.tune.trial import Trial, Resources, Checkpoint
from ray.tune.trial_executor import TrialExecutor
from ray.tune.util import warn_if_slow
logger = logging.getLogger(__name__)
RESOURCE_REFRESH_PERIOD = 0.5 # Refresh resources every 500 ms
BOTTLENECK_WARN_PERIOD_S = 60
NONTRIVIAL_WAIT_TIME_THRESHOLD_S = 1e-3
class _LocalWrapper(object):
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
class RayTrialExecutor(TrialExecutor):
"""An implemention of TrialExecutor based on Ray."""
def __init__(self,
queue_trials=False,
reuse_actors=False,
ray_auto_init=False,
refresh_period=RESOURCE_REFRESH_PERIOD):
super(RayTrialExecutor, self).__init__(queue_trials)
self._running = {}
# Since trial resume after paused should not run
# trial.train.remote(), thus no more new remote object id generated.
# We use self._paused to store paused trials here.
self._paused = {}
self._reuse_actors = reuse_actors
self._cached_actor = None
self._avail_resources = Resources(cpu=0, gpu=0)
self._committed_resources = Resources(cpu=0, gpu=0)
self._resources_initialized = False
self._refresh_period = refresh_period
self._last_resource_refresh = float("-inf")
self._last_nontrivial_wait = time.time()
if not ray.is_initialized() and ray_auto_init:
logger.info("Initializing Ray automatically."
"For cluster usage or custom Ray initialization, "
"call `ray.init(...)` before `tune.run`.")
ray.init()
if ray.is_initialized():
self._update_avail_resources()
def _setup_runner(self, trial, reuse_allowed):
if (self._reuse_actors and reuse_allowed
and self._cached_actor is not None):
logger.debug("Reusing cached runner {} for {}".format(
self._cached_actor, trial.trial_id))
existing_runner = self._cached_actor
self._cached_actor = None
else:
if self._cached_actor:
logger.debug(
"Cannot reuse cached runner {} for new trial".format(
self._cached_actor))
self._cached_actor.stop.remote()
self._cached_actor.__ray_terminate__.remote()
self._cached_actor = None
existing_runner = None
cls = ray.remote(
num_cpus=trial.resources.cpu,
num_gpus=trial.resources.gpu,
resources=trial.resources.custom_resources)(
trial._get_trainable_cls())
trial.init_logger()
# We checkpoint metadata here to try mitigating logdir duplication
self.try_checkpoint_metadata(trial)
remote_logdir = trial.logdir
if existing_runner:
trial.runner = existing_runner
if not self.reset_trial(trial, trial.config, trial.experiment_tag):
raise AbortTrialExecution(
"Trial runner reuse requires reset_trial() to be "
"implemented and return True.")
return existing_runner
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
if not os.path.exists(remote_logdir):
os.makedirs(remote_logdir)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
return cls.remote(config=trial.config, logger_creator=logger_creator)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
assert trial.status == Trial.RUNNING, trial.status
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._running[remote] = trial
def _start_trial(self, trial, checkpoint=None):
"""Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails.
"""
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.runner = self._setup_runner(
trial,
reuse_allowed=checkpoint is not None
or trial._checkpoint.value is not None)
if not self.restore(trial, checkpoint):
if trial.status == Trial.ERROR:
raise RuntimeError(
"Restore from checkpoint failed for Trial {}.".format(
str(trial)))
previous_run = self._find_item(self._paused, trial)
if (prior_status == Trial.PAUSED and previous_run):
# If Trial was in flight when paused, self._paused stores result.
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
else:
self._train(trial)
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
"""
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return # don't retry fatal Tune errors
try:
# This forces the trial to not start from checkpoint.
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
# note that we don't return the resources, since they may
# have been lost
def _find_item(self, dictionary, item):
out = [rid for rid, t in dictionary.items() if t is item]
return out
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
if prior_status == Trial.RUNNING:
logger.debug("Returning resources for Trial %s.", str(trial))
self._return_resources(trial.resources)
out = self._find_item(self._running, trial)
for result_id in out:
self._running.pop(result_id)
def continue_training(self, trial):
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial):
"""Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which is restored when Trial is resumed.
"""
trial_future = self._find_item(self._running, trial)
if trial_future:
self._paused[trial_future[0]] = trial
super(RayTrialExecutor, self).pause_trial(trial)
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with warn_if_slow("reset_config"):
reset_val = ray.get(trainable.reset_config.remote(new_config))
return reset_val
def get_running_trials(self):
"""Returns the running trials."""
return list(self._running.values())
def get_alive_node_ips(self):
nodes = ray.state.nodes()
ip_addresses = set()
for node in nodes:
if node["alive"]:
ip_addresses.add(node["NodeManagerAddress"])
return ip_addresses
def get_current_trial_ips(self):
return {t.node_ip for t in self.get_running_trials()}
def get_next_available_trial(self):
if ray.worker._mode() != ray.worker.LOCAL_MODE:
live_cluster_ips = self.get_alive_node_ips()
if live_cluster_ips - self.get_current_trial_ips():
for trial in self.get_running_trials():
if trial.node_ip and trial.node_ip not in live_cluster_ips:
logger.warning(
"{} (ip: {}) detected as stale. This is likely "
"because the node was lost. Processing this "
"trial first.".format(trial, trial.node_ip))
return trial
shuffled_results = list(self._running.keys())
random.shuffle(shuffled_results)
# Note: We shuffle the results because `ray.wait` by default returns
# the first available result, and we want to guarantee that slower
# trials (i.e. trials that run remotely) also get fairly reported.
# See https://github.com/ray-project/ray/issues/4211 for details.
start = time.time()
[result_id], _ = ray.wait(shuffled_results)
wait_time = time.time() - start
if wait_time > NONTRIVIAL_WAIT_TIME_THRESHOLD_S:
self._last_nontrivial_wait = time.time()
if time.time() - self._last_nontrivial_wait > BOTTLENECK_WARN_PERIOD_S:
logger.warn(
"Over the last {} seconds, the Tune event loop has been "
"backlogged processing new results. Consider increasing your "
"period of result reporting to improve performance.".format(
BOTTLENECK_WARN_PERIOD_S))
self._last_nontrivial_wait = time.time()
return self._running[result_id]
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run."""
trial_future = self._find_item(self._running, trial)
if not trial_future:
raise ValueError("Trial was not running.")
self._running.pop(trial_future[0])
with warn_if_slow("fetch_result"):
result = ray.get(trial_future[0])
# For local mode
if isinstance(result, _LocalWrapper):
result = result.unwrap()
return result
def _commit_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) + resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu + resources.cpu_total(),
committed.gpu + resources.gpu_total(),
custom_resources=custom_resources)
def _return_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) - resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu - resources.cpu_total(),
committed.gpu - resources.gpu_total(),
custom_resources=custom_resources)
assert self._committed_resources.is_nonnegative(), (
"Resource invalid: {}".format(resources))
def _update_avail_resources(self, num_retries=5):
for i in range(num_retries):
try:
resources = ray.cluster_resources()
except Exception:
# TODO(rliaw): Remove this when local mode is fixed.
# https://github.com/ray-project/ray/issues/4147
logger.debug("Using resources for local machine.")
resources = ray.services.check_and_update_resources(
None, None, None)
if not resources:
logger.warning(
"Cluster resources not detected or are 0. Retrying...")
time.sleep(0.5)
if not resources:
# NOTE: This hides the possibility that Ray may be waiting for
# clients to connect.
resources.setdefault("CPU", 0)
resources.setdefault("GPU", 0)
logger.warning("Cluster resources cannot be detected or are 0. "
"You can resume this experiment by passing in "
"`resume=True` to `run`.")
resources = resources.copy()
num_cpus = resources.pop("CPU", 0)
num_gpus = resources.pop("GPU", 0)
custom_resources = resources
self._avail_resources = Resources(
int(num_cpus), int(num_gpus), custom_resources=custom_resources)
self._last_resource_refresh = time.time()
self._resources_initialized = True
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
"""
if time.time() - self._last_resource_refresh > self._refresh_period:
self._update_avail_resources()
currently_available = Resources.subtract(self._avail_resources,
self._committed_resources)
have_space = (
resources.cpu_total() <= currently_available.cpu
and resources.gpu_total() <= currently_available.gpu and all(
resources.get_res_total(res) <= currently_available.get(res)
for res in resources.custom_resources))
if have_space:
return True
can_overcommit = self._queue_trials
if (resources.cpu_total() > 0 and currently_available.cpu <= 0) or \
(resources.gpu_total() > 0 and currently_available.gpu <= 0) or \
any((resources.get_res_total(res_name) > 0
and currently_available.get(res_name) <= 0)
for res_name in resources.custom_resources):
can_overcommit = False # requested resource is already saturated
if can_overcommit:
logger.warning(
"Allowing trial to start even though the "
"cluster does not have enough free resources. Trial actors "
"may appear to hang until enough resources are added to the "
"cluster (e.g., via autoscaling). You can disable this "
"behavior by specifying `queue_trials=False` in "
"ray.tune.run().")
return True
return False
def debug_string(self):
"""Returns a human readable message for printing to the console."""
if self._resources_initialized:
status = "Resources requested: {}/{} CPUs, {}/{} GPUs".format(
self._committed_resources.cpu, self._avail_resources.cpu,
self._committed_resources.gpu, self._avail_resources.gpu)
customs = ", ".join([
"{}/{} {}".format(
self._committed_resources.get_res_total(name),
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources
])
if customs:
status += " ({})".format(customs)
return status
else:
return "Resources requested: ?"
def resource_string(self):
"""Returns a string describing the total resources available."""
if self._resources_initialized:
res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu,
self._avail_resources.gpu)
if self._avail_resources.custom_resources:
custom = ", ".join(
"{} {}".format(
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources)
res_str += " ({})".format(custom)
return res_str
else:
return "? CPUs, ? GPUs"
def on_step_begin(self):
"""Before step() called, update the available resources."""
self._update_avail_resources()
def save(self, trial, storage=Checkpoint.DISK):
"""Saves the trial's state to a checkpoint."""
trial._checkpoint.storage = storage
trial._checkpoint.last_result = trial.last_result
if storage == Checkpoint.MEMORY:
trial._checkpoint.value = trial.runner.save_to_object.remote()
else:
# Keeps only highest performing checkpoints if enabled
if trial.keep_checkpoints_num:
try:
last_attr_val = trial.last_result[
trial.checkpoint_score_attr]
if (trial.compare_checkpoints(last_attr_val)
and not math.isnan(last_attr_val)):
trial.best_checkpoint_attr_value = last_attr_val
self._checkpoint_and_erase(trial)
except KeyError:
logger.warning(
"Result dict has no key: {}. keep"
"_checkpoints_num flag will not work".format(
trial.checkpoint_score_attr))
else:
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(
trial.runner.save.remote())
return trial._checkpoint.value
def _checkpoint_and_erase(self, trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(trial.runner.save.remote())
if len(trial.history) >= trial.keep_checkpoints_num:
ray.get(trial.runner.delete_checkpoint.remote(trial.history[-1]))
trial.history.pop()
trial.history.insert(0, trial._checkpoint.value)
def restore(self, trial, checkpoint=None):
"""Restores training state from a given model checkpoint.
This will also sync the trial results to a new location
if restoring on a different node.
"""
if checkpoint is None or checkpoint.value is None:
checkpoint = trial._checkpoint
if checkpoint is None or checkpoint.value is None:
return True
if trial.runner is None:
logger.error("Unable to restore - no runner.")
self.set_status(trial, Trial.ERROR)
return False
try:
value = checkpoint.value
if checkpoint.storage == Checkpoint.MEMORY:
assert type(value) != Checkpoint, type(value)
trial.runner.restore_from_object.remote(value)
else:
# TODO: Somehow, the call to get the current IP on the
# remote actor can be very slow - a better fix would
# be to use an actor table to detect the IP of the Trainable
# and rsync the files there.
# See https://github.com/ray-project/ray/issues/5168
with warn_if_slow("get_current_ip"):
worker_ip = ray.get(trial.runner.current_ip.remote())
with warn_if_slow("sync_to_new_location"):
trial.sync_logger_to_new_location(worker_ip)
with warn_if_slow("restore_from_disk"):
ray.get(trial.runner.restore.remote(value))
trial.last_result = checkpoint.last_result
return True
except Exception:
logger.exception("Error restoring runner for Trial %s.", trial)
self.set_status(trial, Trial.ERROR)
return False
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
return ray.get(
trial.runner.export_model.remote(trial.export_formats))
return {}
| 40.126263
| 79
| 0.595804
|
4a17bd05725dd283ec848d54b43e7bf66a5d65f0
| 2,036
|
py
|
Python
|
compiler/tests/20_psram_1bank_2mux_1rw_1w_wmask_test.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 335
|
2018-03-13T21:05:22.000Z
|
2022-03-30T07:53:25.000Z
|
compiler/tests/20_psram_1bank_2mux_1rw_1w_wmask_test.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 87
|
2018-03-06T00:55:51.000Z
|
2022-03-30T19:38:29.000Z
|
compiler/tests/20_psram_1bank_2mux_1rw_1w_wmask_test.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 95
|
2018-03-14T16:22:55.000Z
|
2022-03-24T00:34:37.000Z
|
#!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys, os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
from sram_factory import factory
import debug
class psram_1bank_2mux_1rw_1w_wmask_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
from sram_config import sram_config
OPTS.bitcell = "pbitcell"
OPTS.num_rw_ports = 1
OPTS.num_w_ports = 1
OPTS.num_r_ports = 0
globals.setup_bitcell()
c = sram_config(word_size=8,
write_size=4,
num_words=32,
num_banks=1)
c.num_words = 32
c.words_per_row = 2
c.recompute_sizes()
debug.info(1, "Layout test for {}rw,{}r,{}w psram "
"with {} bit words, {} words, {} words per "
"row, {} banks".format(OPTS.num_rw_ports,
OPTS.num_r_ports,
OPTS.num_w_ports,
c.word_size,
c.num_words,
c.words_per_row,
c.num_banks))
a = factory.create(module_type="sram", sram_config=c)
self.local_check(a, final_verification=True)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
| 33.933333
| 81
| 0.565324
|
4a17be08c2dfdfc4476533a892ac794f4383b4b1
| 313
|
py
|
Python
|
week4/fib.py
|
pkoarmy/Learning-Python
|
79067de3c09240d26939ca23ec98e96304660e7c
|
[
"MIT"
] | null | null | null |
week4/fib.py
|
pkoarmy/Learning-Python
|
79067de3c09240d26939ca23ec98e96304660e7c
|
[
"MIT"
] | null | null | null |
week4/fib.py
|
pkoarmy/Learning-Python
|
79067de3c09240d26939ca23ec98e96304660e7c
|
[
"MIT"
] | null | null | null |
import time
def fib(num):
# First two fibonacci numbers are 1
if num == 1 or num == 2:
return 1
# Return the sum of the last two fibonacci numbers
return fib(num-1) + fib(num-2)
start = time.time()
for n in range(1, 36):
print("fib(", n, ") =", fib(n))
print( time.time() - start)
| 19.5625
| 54
| 0.587859
|
4a17be7be8674b2fc91d520ec974a243386d621b
| 277
|
py
|
Python
|
tests/test_setting_attr.py
|
gnud/django-translation-flags
|
8efaa59e4af377fce542b97e66b416acb144b5d1
|
[
"MIT"
] | 11
|
2018-11-29T17:00:44.000Z
|
2022-02-24T18:49:36.000Z
|
tests/test_setting_attr.py
|
gnud/django-translation-flags
|
8efaa59e4af377fce542b97e66b416acb144b5d1
|
[
"MIT"
] | 6
|
2019-04-28T09:26:50.000Z
|
2021-06-10T21:01:21.000Z
|
tests/test_setting_attr.py
|
silviolleite/django-translation-flags
|
c09cc0781c8692318d35e4159d8b135fe4dde4c7
|
[
"MIT"
] | 3
|
2019-08-05T15:52:55.000Z
|
2020-03-12T13:22:48.000Z
|
from django.test import TestCase
from django_translation_flags import app_settings
class AppSettingsTest(TestCase):
def test_has_defined(self):
"""Must have the MIDDLEWARE defined in app_settings.py"""
self.assertTrue(hasattr(app_settings, 'MIDDLEWARE'))
| 30.777778
| 65
| 0.765343
|
4a17bfcbcfdebf42a527ca63a8e33ef71ed67e7c
| 186
|
py
|
Python
|
tests/data/config/l.py
|
BIGWangYuDong/mmcv
|
c46deb0576edaff5cd5a7d384c617478c7a73a70
|
[
"Apache-2.0"
] | null | null | null |
tests/data/config/l.py
|
BIGWangYuDong/mmcv
|
c46deb0576edaff5cd5a7d384c617478c7a73a70
|
[
"Apache-2.0"
] | null | null | null |
tests/data/config/l.py
|
BIGWangYuDong/mmcv
|
c46deb0576edaff5cd5a7d384c617478c7a73a70
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
def func(x):
return x
_base_ = ['./l1.py', './l2.yaml', './l3.json', './l4.py']
item3 = False
item4 = 'test'
| 16.909091
| 57
| 0.607527
|
4a17c05e07f3f6971070d283193beef05340fab6
| 5,903
|
py
|
Python
|
doc/source/conf.py
|
ucl-exoplanets/taurex-cuda_public
|
1588815217ca0b3019d8a438207633ba07f2b5f2
|
[
"BSD-3-Clause"
] | null | null | null |
doc/source/conf.py
|
ucl-exoplanets/taurex-cuda_public
|
1588815217ca0b3019d8a438207633ba07f2b5f2
|
[
"BSD-3-Clause"
] | null | null | null |
doc/source/conf.py
|
ucl-exoplanets/taurex-cuda_public
|
1588815217ca0b3019d8a438207633ba07f2b5f2
|
[
"BSD-3-Clause"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'TauREx-CUDA'
copyright = '2020, Ahmed F. Al-Refaie'
author = 'Ahmed F. Al-Refaie'
# The full version, including alpha/beta/rc tags
release = '0.8'
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'nbsphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'taurexdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'taurex.tex', 'taurex Documentation',
'Ahmed Al-Refaie', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'TauREx-CUDA', 'TauREx-CUDA Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'taurex_cuda', 'TauREx-CUDA Documentation',
author, 'taurex_cuda', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# Add mappings
intersphinx_mapping = {
'taurex': ('https://taurex3-public.readthedocs.io/en/latest/', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
add_module_names = False
nbsphinx_execute = 'never'
| 29.813131
| 79
| 0.646282
|
4a17c0983a76dc672c68e1fdbb8ce234d8ff48e3
| 28,867
|
py
|
Python
|
src/saml2/assertion.py
|
clokep/pysaml2
|
e910240a3e4a208ad088693406712fdfe196b2ee
|
[
"Apache-2.0"
] | null | null | null |
src/saml2/assertion.py
|
clokep/pysaml2
|
e910240a3e4a208ad088693406712fdfe196b2ee
|
[
"Apache-2.0"
] | null | null | null |
src/saml2/assertion.py
|
clokep/pysaml2
|
e910240a3e4a208ad088693406712fdfe196b2ee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import importlib
import logging
import re
import six
from warnings import warn as _warn
from saml2 import saml
from saml2 import xmlenc
from saml2.attribute_converter import from_local, ac_factory
from saml2.attribute_converter import get_local_name
from saml2.s_utils import assertion_factory
from saml2.s_utils import factory
from saml2.s_utils import sid
from saml2.s_utils import MissingValue
from saml2.saml import NAME_FORMAT_URI
from saml2.time_util import instant
from saml2.time_util import in_a_while
logger = logging.getLogger(__name__)
def _filter_values(vals, vlist=None, must=False):
""" Removes values from *vals* that does not appear in vlist
:param vals: The values that are to be filtered
:param vlist: required or optional value
:param must: Whether the allowed values must appear
:return: The set of values after filtering
"""
if not vlist: # No value specified equals any value
return vals
if isinstance(vlist, six.string_types):
vlist = [vlist]
res = []
for val in vlist:
if val in vals:
res.append(val)
if must:
if res:
return res
else:
raise MissingValue("Required attribute value missing")
else:
return res
def _match(attr, ava):
if attr in ava:
return attr
_la = attr.lower()
if _la in ava:
return _la
for _at in ava.keys():
if _at.lower() == _la:
return _at
return None
def filter_on_attributes(ava, required=None, optional=None, acs=None,
fail_on_unfulfilled_requirements=True):
""" Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:param fail_on_unfulfilled_requirements: If required attributes
are missing fail or fail not depending on this parameter.
:return: The modified attribute value assertion
"""
def _match_attr_name(attr, ava):
local_name = None
for a in ['name_format', 'friendly_name']:
_val = attr.get(a)
if _val:
if a == 'name_format':
local_name = get_local_name(acs, attr['name'], _val)
else:
local_name = _val
break
if local_name:
_fn = _match(local_name, ava)
else:
_fn = None
if not _fn: # In the unlikely case that someone has provided us with
# URIs as attribute names
_fn = _match(attr["name"], ava)
return _fn
def _apply_attr_value_restrictions(attr, res, must=False):
try:
values = [av["text"] for av in attr["attribute_value"]]
except KeyError:
values = []
try:
res[_fn].extend(_filter_values(ava[_fn], values))
except KeyError:
# ignore duplicate RequestedAttribute entries
val = _filter_values(ava[_fn], values)
res[_fn] = val if val is not None else []
return _filter_values(ava[_fn], values, must)
res = {}
if required is None:
required = []
for attr in required:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, True)
elif fail_on_unfulfilled_requirements:
desc = "Required attribute missing: '%s'" % (attr["name"])
raise MissingValue(desc)
if optional is None:
optional = []
for attr in optional:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, False)
return res
def filter_on_demands(ava, required=None, optional=None):
""" Never return more than is needed. Filters out everything
the server is prepared to return but the receiver doesn't ask for
:param ava: Attribute value assertion as a dictionary
:param required: Required attributes
:param optional: Optional attributes
:return: The possibly reduced assertion
"""
# Is all what's required there:
if required is None:
required = {}
lava = dict([(k.lower(), k) for k in ava.keys()])
for attr, vals in required.items():
attr = attr.lower()
if attr in lava:
if vals:
for val in vals:
if val not in ava[lava[attr]]:
raise MissingValue(
"Required attribute value missing: %s,%s" % (attr,
val))
else:
raise MissingValue("Required attribute missing: %s" % (attr,))
if optional is None:
optional = {}
oka = [k.lower() for k in required.keys()]
oka.extend([k.lower() for k in optional.keys()])
# OK, so I can imaging releasing values that are not absolutely necessary
# but not attributes that are not asked for.
for attr in lava.keys():
if attr not in oka:
del ava[lava[attr]]
return ava
def filter_on_wire_representation(ava, acs, required=None, optional=None):
"""
:param ava: A dictionary with attributes and values
:param acs: List of tuples (Attribute Converter name,
Attribute Converter instance)
:param required: A list of saml.Attributes
:param optional: A list of saml.Attributes
:return: Dictionary of expected/wanted attributes and values
"""
acsdic = dict([(ac.name_format, ac) for ac in acs])
if required is None:
required = []
if optional is None:
optional = []
res = {}
for attr, val in ava.items():
done = False
for req in required:
try:
_name = acsdic[req.name_format]._to[attr]
if _name == req.name:
res[attr] = val
done = True
except KeyError:
pass
if done:
continue
for opt in optional:
try:
_name = acsdic[opt.name_format]._to[attr]
if _name == opt.name:
res[attr] = val
break
except KeyError:
pass
return res
def filter_attribute_value_assertions(ava, attribute_restrictions=None):
""" Will weed out attribute values and values according to the
rules defined in the attribute restrictions. If filtering results in
an attribute without values, then the attribute is removed from the
assertion.
:param ava: The incoming attribute value assertion (dictionary)
:param attribute_restrictions: The rules that govern which attributes
and values that are allowed. (dictionary)
:return: The modified attribute value assertion
"""
if not attribute_restrictions:
return ava
for attr, vals in list(ava.items()):
_attr = attr.lower()
try:
_rests = attribute_restrictions[_attr]
except KeyError:
del ava[attr]
else:
if _rests is None:
continue
if isinstance(vals, six.string_types):
vals = [vals]
rvals = []
for restr in _rests:
for val in vals:
if restr.match(val):
rvals.append(val)
if rvals:
ava[attr] = list(set(rvals))
else:
del ava[attr]
return ava
def restriction_from_attribute_spec(attributes):
restr = {}
for attribute in attributes:
restr[attribute.name] = {}
for val in attribute.attribute_value:
if not val.text:
restr[attribute.name] = None
break
else:
restr[attribute.name] = re.compile(val.text)
return restr
def compile(restrictions):
""" This is only for IdPs or AAs, and it's about limiting what
is returned to the SP.
In the configuration file, restrictions on which values that
can be returned are specified with the help of regular expressions.
This function goes through and pre-compiles the regular expressions.
:param restrictions: policy configuration
:return: The assertion with the string specification replaced with
a compiled regular expression.
"""
for who, spec in restrictions.items():
spec = spec or {}
entity_categories = spec.get("entity_categories", [])
ecs = []
for cat in entity_categories:
try:
_mod = importlib.import_module(cat)
except ImportError:
_mod = importlib.import_module("saml2.entity_category.%s" % cat)
_ec = {}
for key, items in _mod.RELEASE.items():
alist = [k.lower() for k in items]
_only_required = getattr(_mod, "ONLY_REQUIRED", {}).get(key, False)
_ec[key] = (alist, _only_required)
ecs.append(_ec)
spec["entity_categories"] = ecs or None
attribute_restrictions = spec.get("attribute_restrictions") or {}
_attribute_restrictions = {}
for key, values in attribute_restrictions.items():
lkey = key.lower()
values = [] if not values else values
_attribute_restrictions[lkey] = (
[re.compile(value) for value in values] or None
)
spec["attribute_restrictions"] = _attribute_restrictions or None
return restrictions
class Policy(object):
"""Handles restrictions on assertions."""
def __init__(self, restrictions=None, mds=None):
self.metadata_store = mds
self._restrictions = self.setup_restrictions(restrictions)
logger.debug("policy restrictions: %s", self._restrictions)
self.acs = []
def setup_restrictions(self, restrictions=None):
if restrictions is None:
return None
restrictions = copy.deepcopy(restrictions)
restrictions = compile(restrictions)
return restrictions
def get(self, attribute, sp_entity_id, default=None):
"""
:param attribute:
:param sp_entity_id:
:param default:
:return:
"""
if not self._restrictions:
return default
ra_info = (
self.metadata_store.registration_info(sp_entity_id) or {}
if self.metadata_store is not None
else {}
)
ra_entity_id = ra_info.get("registration_authority")
sp_restrictions = self._restrictions.get(sp_entity_id)
ra_restrictions = self._restrictions.get(ra_entity_id)
default_restrictions = (
self._restrictions.get("default")
or self._restrictions.get("")
)
restrictions = (
sp_restrictions
if sp_restrictions is not None
else ra_restrictions
if ra_restrictions is not None
else default_restrictions
if default_restrictions is not None
else {}
)
attribute_restriction = restrictions.get(attribute)
restriction = (
attribute_restriction
if attribute_restriction is not None
else default
)
return restriction
def get_nameid_format(self, sp_entity_id):
""" Get the NameIDFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
return self.get("nameid_format", sp_entity_id, saml.NAMEID_FORMAT_TRANSIENT)
def get_name_form(self, sp_entity_id):
""" Get the NameFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
return self.get("name_form", sp_entity_id, default=NAME_FORMAT_URI)
def get_lifetime(self, sp_entity_id):
""" The lifetime of the assertion
:param sp_entity_id: The SP entity ID
:param: lifetime as a dictionary
"""
# default is a hour
return self.get("lifetime", sp_entity_id, {"hours": 1})
def get_attribute_restrictions(self, sp_entity_id):
""" Return the attribute restriction for SP that want the information
:param sp_entity_id: The SP entity ID
:return: The restrictions
"""
return self.get("attribute_restrictions", sp_entity_id)
def get_fail_on_missing_requested(self, sp_entity_id):
""" Return the whether the IdP should should fail if the SPs
requested attributes could not be found.
:param sp_entity_id: The SP entity ID
:return: The restrictions
"""
return self.get("fail_on_missing_requested", sp_entity_id, default=True)
def get_sign(self, sp_entity_id):
"""
Possible choices
"sign": ["response", "assertion", "on_demand"]
:param sp_entity_id:
:return:
"""
return self.get("sign", sp_entity_id, default=[])
def get_entity_categories(self, sp_entity_id, mds=None, required=None):
"""
:param sp_entity_id:
:param required: required attributes
:return: A dictionary with restrictions
"""
if mds is not None:
warn_msg = (
"The mds parameter for saml2.assertion.Policy.get_entity_categories "
"is deprecated; "
"instead, initialize the Policy object setting the mds param."
)
logger.warning(warn_msg)
_warn(warn_msg, DeprecationWarning)
def post_entity_categories(maps, sp_entity_id=None, mds=None, required=None):
restrictions = {}
required = [d['friendly_name'].lower() for d in (required or [])]
if mds:
ecs = mds.entity_categories(sp_entity_id)
for ec_map in maps:
for key, (atlist, only_required) in ec_map.items():
if key == "": # always released
attrs = atlist
elif isinstance(key, tuple):
if only_required:
attrs = [a for a in atlist if a in required]
else:
attrs = atlist
for _key in key:
if _key not in ecs:
attrs = []
break
elif key in ecs:
if only_required:
attrs = [a for a in atlist if a in required]
else:
attrs = atlist
else:
attrs = []
for attr in attrs:
restrictions[attr] = None
return restrictions
sentinel = object()
result1 = self.get("entity_categories", sp_entity_id, default=sentinel)
if result1 is sentinel:
return {}
result2 = post_entity_categories(
result1,
sp_entity_id=sp_entity_id,
mds=(mds or self.metadata_store),
required=required,
)
return result2
def not_on_or_after(self, sp_entity_id):
""" When the assertion stops being valid, should not be
used after this time.
:param sp_entity_id: The SP entity ID
:return: String representation of the time
"""
return in_a_while(**self.get_lifetime(sp_entity_id))
def filter(self, ava, sp_entity_id, mdstore=None, required=None, optional=None):
""" What attribute and attribute values returns depends on what
the SP or the registration authority has said it wants in the request
or in the metadata file and what the IdP/AA wants to release.
An assumption is that what the SP or the registration authority
asks for overrides whatever is in the metadata. But of course the
IdP never releases anything it doesn't want to.
:param ava: The information about the subject as a dictionary
:param sp_entity_id: The entity ID of the SP
:param required: Attributes that the SP requires in the assertion
:param optional: Attributes that the SP regards as optional
:return: A possibly modified AVA
"""
if mdstore is not None:
warn_msg = (
"The mdstore parameter for saml2.assertion.Policy.filter "
"is deprecated; "
"instead, initialize the Policy object setting the mds param."
)
logger.warning(warn_msg)
_warn(warn_msg, DeprecationWarning)
# acs MUST have a value, fall back to default.
if not self.acs:
self.acs = ac_factory()
subject_ava = ava.copy()
# entity category restrictions
_ent_rest = self.get_entity_categories(sp_entity_id, mds=mdstore, required=required)
if _ent_rest:
subject_ava = filter_attribute_value_assertions(subject_ava, _ent_rest)
elif required or optional:
logger.debug("required: %s, optional: %s", required, optional)
subject_ava = filter_on_attributes(
subject_ava,
required,
optional,
self.acs,
self.get_fail_on_missing_requested(sp_entity_id),
)
# attribute restrictions
_attr_rest = self.get_attribute_restrictions(sp_entity_id)
subject_ava = filter_attribute_value_assertions(subject_ava, _attr_rest)
return subject_ava or {}
def restrict(self, ava, sp_entity_id, metadata=None):
""" Identity attribute names are expected to be expressed as FriendlyNames
:return: A filtered ava according to the IdPs/AAs rules and
the list of required/optional attributes according to the SP.
If the requirements can't be met an exception is raised.
"""
if metadata is not None:
warn_msg = (
"The metadata parameter for saml2.assertion.Policy.restrict "
"is deprecated and ignored; "
"instead, initialize the Policy object setting the mds param."
)
logger.warning(warn_msg)
_warn(warn_msg, DeprecationWarning)
metadata_store = metadata or self.metadata_store
spec = (
metadata_store.attribute_requirement(sp_entity_id) or {}
if metadata_store
else {}
)
return self.filter(
ava,
sp_entity_id,
required=spec.get("required"),
optional=spec.get("optional"),
)
def conditions(self, sp_entity_id):
""" Return a saml.Condition instance
:param sp_entity_id: The SP entity ID
:return: A saml.Condition instance
"""
return factory(
saml.Conditions,
not_before=instant(),
# How long might depend on who's getting it
not_on_or_after=self.not_on_or_after(sp_entity_id),
audience_restriction=[
factory(
saml.AudienceRestriction,
audience=[factory(saml.Audience, text=sp_entity_id)],
),
],
)
class EntityCategories(object):
pass
def _authn_context_class_ref(authn_class, authn_auth=None):
"""
Construct the authn context with a authn context class reference
:param authn_class: The authn context class reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
cntx_class = factory(saml.AuthnContextClassRef, text=authn_class)
if authn_auth:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
else:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class)
def _authn_context_decl(decl, authn_auth=None):
"""
Construct the authn context with a authn context declaration
:param decl: The authn context declaration
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl=decl,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def _authn_context_decl_ref(decl_ref, authn_auth=None):
"""
Construct the authn context with a authn context declaration reference
:param decl_ref: The authn context declaration reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl_ref=decl_ref,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def authn_statement(authn_class=None, authn_auth=None,
authn_decl=None, authn_decl_ref=None, authn_instant="",
subject_locality="", session_not_on_or_after=None):
"""
Construct the AuthnStatement
:param authn_class: Authentication Context Class reference
:param authn_auth: Authenticating Authority
:param authn_decl: Authentication Context Declaration
:param authn_decl_ref: Authentication Context Declaration reference
:param authn_instant: When the Authentication was performed.
Assumed to be seconds since the Epoch.
:param subject_locality: Specifies the DNS domain name and IP address
for the system from which the assertion subject was apparently
authenticated.
:return: An AuthnContext instance
"""
if authn_instant:
_instant = instant(time_stamp=authn_instant)
else:
_instant = instant()
if authn_class:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_class_ref(
authn_class, authn_auth))
elif authn_decl:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_decl(authn_decl, authn_auth))
elif authn_decl_ref:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_decl_ref(authn_decl_ref,
authn_auth))
else:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after)
if subject_locality:
res.subject_locality = saml.SubjectLocality(text=subject_locality)
return res
def do_subject_confirmation(not_on_or_after, key_info=None, **treeargs):
"""
:param not_on_or_after: not_on_or_after policy
:param subject_confirmation_method: How was the subject confirmed
:param address: The network address/location from which an attesting entity
can present the assertion.
:param key_info: Information of the key used to confirm the subject
:param in_response_to: The ID of a SAML protocol message in response to
which an attesting entity can present the assertion.
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:param not_before: A time instant before which the subject cannot be
confirmed. The time value MUST be encoded in UTC.
:return:
"""
_sc = factory(saml.SubjectConfirmation, **treeargs)
_scd = _sc.subject_confirmation_data
_scd.not_on_or_after = not_on_or_after
if _sc.method == saml.SCM_HOLDER_OF_KEY:
_scd.add_extension_element(key_info)
return _sc
def do_subject(not_on_or_after, name_id, **farg):
specs = farg['subject_confirmation']
if isinstance(specs, list):
res = [do_subject_confirmation(not_on_or_after, **s) for s in specs]
else:
res = [do_subject_confirmation(not_on_or_after, **specs)]
return factory(saml.Subject, name_id=name_id, subject_confirmation=res)
class Assertion(dict):
""" Handles assertions about subjects """
def __init__(self, dic=None):
dict.__init__(self, dic)
self.acs = []
def construct(self, sp_entity_id, attrconvs, policy, issuer, farg,
authn_class=None, authn_auth=None, authn_decl=None,
encrypt=None, sec_context=None, authn_decl_ref=None,
authn_instant="", subject_locality="", authn_statem=None,
name_id=None, session_not_on_or_after=None):
""" Construct the Assertion
:param sp_entity_id: The entityid of the SP
:param in_response_to: An identifier of the message, this message is
a response to
:param name_id: An NameID instance
:param attrconvs: AttributeConverters
:param policy: The policy that should be adhered to when replying
:param issuer: Who is issuing the statement
:param authn_class: The authentication class
:param authn_auth: The authentication instance
:param authn_decl: An Authentication Context declaration
:param encrypt: Whether to encrypt parts or all of the Assertion
:param sec_context: The security context used when encrypting
:param authn_decl_ref: An Authentication Context declaration reference
:param authn_instant: When the Authentication was performed
:param subject_locality: Specifies the DNS domain name and IP address
for the system from which the assertion subject was apparently
authenticated.
:param authn_statem: A AuthnStatement instance
:return: An Assertion instance
"""
_name_format = policy.get_name_form(sp_entity_id)
attr_statement = saml.AttributeStatement(
attribute=from_local(attrconvs, self, _name_format)
)
if encrypt == "attributes":
for attr in attr_statement.attribute:
enc = sec_context.encrypt(text="%s" % attr)
encd = xmlenc.encrypted_data_from_string(enc)
encattr = saml.EncryptedAttribute(encrypted_data=encd)
attr_statement.encrypted_attribute.append(encattr)
attr_statement.attribute = []
# start using now and for some time
conds = policy.conditions(sp_entity_id)
if authn_statem:
_authn_statement = authn_statem
elif authn_auth or authn_class or authn_decl or authn_decl_ref:
_authn_statement = authn_statement(authn_class, authn_auth,
authn_decl, authn_decl_ref,
authn_instant,
subject_locality,
session_not_on_or_after=session_not_on_or_after)
else:
_authn_statement = None
subject = do_subject(
policy.not_on_or_after(sp_entity_id), name_id, **farg['subject']
)
_ass = assertion_factory(issuer=issuer, conditions=conds, subject=subject)
if _authn_statement:
_ass.authn_statement = [_authn_statement]
if not attr_statement.empty():
_ass.attribute_statement = [attr_statement]
return _ass
def apply_policy(self, sp_entity_id, policy):
""" Apply policy to the assertion I'm representing
:param sp_entity_id: The SP entity ID
:param policy: The policy
:return: The resulting AVA after the policy is applied
"""
policy.acs = self.acs
ava = policy.restrict(self, sp_entity_id)
for key, val in list(self.items()):
if key in ava:
self[key] = ava[key]
else:
del self[key]
return ava
| 34.001178
| 95
| 0.606921
|
4a17c0b45295aca2f3d538e2a3fa67e325956dc7
| 4,617
|
py
|
Python
|
dataprocess/inout_points.py
|
AyakashiQ/PCGCv1
|
d01673bc5dd18afd24872f35ab52f20758aa1a18
|
[
"Apache-2.0"
] | null | null | null |
dataprocess/inout_points.py
|
AyakashiQ/PCGCv1
|
d01673bc5dd18afd24872f35ab52f20758aa1a18
|
[
"Apache-2.0"
] | null | null | null |
dataprocess/inout_points.py
|
AyakashiQ/PCGCv1
|
d01673bc5dd18afd24872f35ab52f20758aa1a18
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Nanjing University, Vision Lab.
# Last update: 2019.09.17
import numpy as np
import os
def load_ply_data(filename):
'''
load data from ply file.
'''
f = open(filename)
#1.read all points
points = []
for line in f:
#only x,y,z
wordslist = line.split(' ')
try:
x, y, z = float(wordslist[0]),float(wordslist[1]),float(wordslist[2])
except ValueError:
continue
points.append([x,y,z])
points = np.array(points)
points = points.astype(np.int32)#np.uint8
# print(filename,'\n','length:',points.shape)
f.close()
return points
def write_ply_data(filename, points):
'''
write data to ply file.
'''
if os.path.exists(filename):
os.system('rm '+filename)
f = open(filename,'a+')
#print('data.shape:',data.shape)
f.writelines(['ply\n','format ascii 1.0\n'])
f.write('element vertex '+str(points.shape[0])+'\n')
f.writelines(['property float x\n','property float y\n','property float z\n'])
f.write('end_header\n')
for _, point in enumerate(points):
f.writelines([str(point[0]), ' ', str(point[1]), ' ',str(point[2]), '\n'])
f.close()
return
def load_points(filename, cube_size=64, min_num=20):
"""Load point cloud & split to cubes.
Args: point cloud file; voxel size; minimun number of points in a cube.
Return: cube positions & points in each cube.
"""
# load point clouds
point_cloud = load_ply_data(filename)
# partition point cloud to cubes.
cubes = {}# {block start position, points in block}
for _, point in enumerate(point_cloud):
cube_index = tuple((point//cube_size).astype("int"))
local_point = point % cube_size
if not cube_index in cubes.keys():
cubes[cube_index] = local_point
else:
cubes[cube_index] = np.vstack((cubes[cube_index] ,local_point))
# filter by minimum number.
k_del = []
for _, k in enumerate(cubes.keys()):
if cubes[k].shape[0] < min_num:
k_del.append(k)
for _, k in enumerate(k_del):
del cubes[k]
# get points and cube positions.
cube_positions = np.array(list(cubes.keys()))
set_points = []
# orderd
step = cube_positions.max() + 1
cube_positions_n = cube_positions[:,0:1] + cube_positions[:,1:2]*step + cube_positions[:,2:3]*step*step
cube_positions_n = np.sort(cube_positions_n, axis=0)
x = cube_positions_n % step
y = (cube_positions_n // step) % step
z = cube_positions_n // step // step
cube_positions_orderd = np.concatenate((x,y,z), -1)
for _, k in enumerate(cube_positions_orderd):
set_points.append(cubes[tuple(k)].astype("int16"))
return set_points, cube_positions
def save_points(set_points, cube_positions, filename, cube_size=64):
"""Combine & save points."""
# order cube positions.
step = cube_positions.max() + 1
cube_positions_n = cube_positions[:,0:1] + cube_positions[:,1:2]*step + cube_positions[:,2:3]*step*step
cube_positions_n = np.sort(cube_positions_n, axis=0)
x = cube_positions_n % step
y = (cube_positions_n // step) % step
z = cube_positions_n // step // step
cube_positions_orderd = np.concatenate((x,y,z), -1)
# combine points.
point_cloud = []
for k, v in zip(cube_positions_orderd, set_points):
points = v + np.array(k) * cube_size
point_cloud.append(points)
point_cloud = np.concatenate(point_cloud).astype("int")
write_ply_data(filename, point_cloud)
return
def points2voxels(set_points, cube_size):
"""Transform points to voxels (binary occupancy map).
Args: points list; cube size;
Return: A tensor with shape [batch_size, cube_size, cube_size, cube_size, 1]
"""
voxels = []
for _, points in enumerate(set_points):
points = points.astype("int")
vol = np.zeros((cube_size,cube_size,cube_size))
vol[points[:,0],points[:,1],points[:,2]] = 1.0
vol = np.expand_dims(vol,-1)
voxels.append(vol)
voxels = np.array(voxels)
return voxels
def voxels2points(voxels):
"""extract points from each voxel."""
voxels = np.squeeze(np.uint8(voxels)) # 0 or 1
set_points = []
for _, vol in enumerate(voxels):
points = np.array(np.where(vol>0)).transpose((1,0))
set_points.append(points)
return set_points
if __name__=='__main__':
name = '../testdata/8iVFB/loot_vox10_1200.ply'
name_rec = 'rec.ply'
set_points, cube_positions = load_points(name, cube_size=64, min_num=20)
voxels = points2voxels(set_points, cube_size=64)
print('voxels:',voxels.shape)
points_rec = voxels2points(voxels)
save_points(points_rec, cube_positions, name_rec, cube_size=64)
os.system("../myutils/pc_error_d" \
+ ' -a ' + name + ' -b ' + name_rec + " -r 1023")
| 30.176471
| 105
| 0.674031
|
4a17c1049aebb79ed58308c2f60356f382ebf943
| 802
|
py
|
Python
|
alembic/versions/55c008192aa_controversy.py
|
wenbs/mptracker
|
e011ab11954bbf785ae11fea7ed977440df2284a
|
[
"MIT"
] | 4
|
2015-01-20T15:03:15.000Z
|
2017-03-15T09:56:07.000Z
|
alembic/versions/55c008192aa_controversy.py
|
wenbs/mptracker
|
e011ab11954bbf785ae11fea7ed977440df2284a
|
[
"MIT"
] | 3
|
2021-03-31T18:53:12.000Z
|
2022-03-21T22:16:35.000Z
|
alembic/versions/55c008192aa_controversy.py
|
wenbs/mptracker
|
e011ab11954bbf785ae11fea7ed977440df2284a
|
[
"MIT"
] | 6
|
2015-12-13T08:56:49.000Z
|
2021-08-07T20:36:29.000Z
|
revision = '55c008192aa'
down_revision = 'c9230f65c6'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('controversy',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('slug', sa.Text(), nullable=False),
sa.Column('title', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug'),
)
op.add_column('voting_session',
sa.Column('controversy_id', postgresql.UUID(), nullable=True))
op.create_foreign_key(
'controversy_id_fkey',
'voting_session', 'controversy',
['controversy_id'], ['id'],
)
def downgrade():
op.drop_column('voting_session', 'controversy_id')
op.drop_table('controversy')
| 27.655172
| 70
| 0.659601
|
4a17c1cda7b7cecd31fa5985b2943986e487db02
| 40,791
|
py
|
Python
|
test/sql/test_defaults.py
|
mjpieters/sqlalchemy
|
a8efeb6c052330b7b8d44960132d638b08d42d18
|
[
"MIT"
] | null | null | null |
test/sql/test_defaults.py
|
mjpieters/sqlalchemy
|
a8efeb6c052330b7b8d44960132d638b08d42d18
|
[
"MIT"
] | null | null | null |
test/sql/test_defaults.py
|
mjpieters/sqlalchemy
|
a8efeb6c052330b7b8d44960132d638b08d42d18
|
[
"MIT"
] | null | null | null |
from sqlalchemy.testing import eq_, assert_raises_message, assert_raises
import datetime
from sqlalchemy.schema import CreateSequence, DropSequence
from sqlalchemy.sql import select, text
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy import MetaData, Integer, String, ForeignKey, Boolean, exc,\
Sequence, func, literal, Unicode
from sqlalchemy.types import TypeDecorator, TypeEngine
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.dialects import sqlite
from sqlalchemy.testing import fixtures
from sqlalchemy.util import u, b
from sqlalchemy import util
t = f = f2 = ts = currenttime = metadata = default_generator = None
t = f = f2 = ts = currenttime = metadata = default_generator = None
class DefaultTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
global t, f, f2, ts, currenttime, metadata, default_generator
db = testing.db
metadata = MetaData(db)
default_generator = {'x': 50}
def mydefault():
default_generator['x'] += 1
return default_generator['x']
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text('13')])).scalar()
def mydefault_using_connection(ctx):
conn = ctx.connection
try:
return conn.execute(sa.select([sa.text('12')])).scalar()
finally:
# ensure a "close()" on this connection does nothing,
# since its a "branched" connection
conn.close()
use_function_defaults = testing.against('postgresql', 'mssql')
is_oracle = testing.against('oracle')
class MyClass(object):
@classmethod
def gen_default(cls, ctx):
return "hi"
# select "count(1)" returns different results on different DBs also
# correct for "current_date" compatible as column default, value
# differences
currenttime = func.current_date(type_=sa.Date, bind=db)
if is_oracle:
ts = db.scalar(sa.select([func.trunc(func.sysdate(), sa.literal_column("'DAY'"), type_=sa.Date).label('today')]))
assert isinstance(ts, datetime.date) and not isinstance(ts, datetime.datetime)
f = sa.select([func.length('abcdef')], bind=db).scalar()
f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar()
# TODO: engine propigation across nested functions not working
currenttime = func.trunc(currenttime, sa.literal_column("'DAY'"), bind=db, type_=sa.Date)
def1 = currenttime
def2 = func.trunc(sa.text("sysdate"), sa.literal_column("'DAY'"), type_=sa.Date)
deftype = sa.Date
elif use_function_defaults:
f = sa.select([func.length('abcdef')], bind=db).scalar()
f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar()
def1 = currenttime
deftype = sa.Date
if testing.against('mssql'):
def2 = sa.text("getdate()")
else:
def2 = sa.text("current_date")
ts = db.scalar(func.current_date())
else:
f = len('abcdef')
f2 = len('abcdefghijk')
def1 = def2 = "3"
ts = 3
deftype = Integer
t = Table('default_test1', metadata,
# python function
Column('col1', Integer, primary_key=True,
default=mydefault),
# python literal
Column('col2', String(20),
default="imthedefault",
onupdate="im the update"),
# preexecute expression
Column('col3', Integer,
default=func.length('abcdef'),
onupdate=func.length('abcdefghijk')),
# SQL-side default from sql expression
Column('col4', deftype,
server_default=def1),
# SQL-side default from literal expression
Column('col5', deftype,
server_default=def2),
# preexecute + update timestamp
Column('col6', sa.Date,
default=currenttime,
onupdate=currenttime),
Column('boolcol1', sa.Boolean, default=True),
Column('boolcol2', sa.Boolean, default=False),
# python function which uses ExecutionContext
Column('col7', Integer,
default=mydefault_using_connection,
onupdate=myupdate_with_ctx),
# python builtin
Column('col8', sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today),
# combo
Column('col9', String(20),
default='py',
server_default='ddl'),
# python method w/ context
Column('col10', String(20), default=MyClass.gen_default)
)
t.create()
@classmethod
def teardown_class(cls):
t.drop()
def teardown(self):
default_generator['x'] = 50
t.delete().execute()
def test_bad_arg_signature(self):
ex_msg = \
"ColumnDefault Python function takes zero "\
"or one positional arguments"
def fn1(x, y):
pass
def fn2(x, y, z=3):
pass
class fn3(object):
def __init__(self, x, y):
pass
class FN4(object):
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(sa.exc.ArgumentError,
ex_msg,
sa.ColumnDefault, fn)
def test_arg_signature(self):
def fn1():
pass
def fn2():
pass
def fn3(x=1):
eq_(x, 1)
def fn4(x=1, y=2, z=3):
eq_(x, 1)
fn5 = list
class fn6a(object):
def __init__(self, x):
eq_(x, "context")
class fn6b(object):
def __init__(self, x, y=3):
eq_(x, "context")
class FN7(object):
def __call__(self, x):
eq_(x, "context")
fn7 = FN7()
class FN8(object):
def __call__(self, x, y=3):
eq_(x, "context")
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6a, fn6b, fn7, fn8:
c = sa.ColumnDefault(fn)
c.arg("context")
@testing.fails_on('firebird', 'Data type unknown')
def test_standalone(self):
c = testing.db.engine.contextual_connect()
x = c.execute(t.c.col1.default)
y = t.c.col2.default.execute()
z = c.execute(t.c.col3.default)
assert 50 <= x <= 57
eq_(y, 'imthedefault')
eq_(z, f)
eq_(f2, 11)
def test_py_vs_server_default_detection(self):
def has_(name, *wanted):
slots = ['default', 'onupdate', 'server_default', 'server_onupdate']
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
tbl = t
has_('col1', 'default')
has_('col2', 'default', 'onupdate')
has_('col3', 'default', 'onupdate')
has_('col4', 'server_default')
has_('col5', 'server_default')
has_('col6', 'default', 'onupdate')
has_('boolcol1', 'default')
has_('boolcol2', 'default')
has_('col7', 'default', 'onupdate')
has_('col8', 'default', 'onupdate')
has_('col9', 'default', 'server_default')
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
t2 = Table('t2', MetaData(),
Column('col1', Integer, Sequence('foo')),
Column('col2', Integer,
default=Sequence('foo'),
server_default='y'),
Column('col3', Integer,
Sequence('foo'),
server_default='x'),
Column('col4', Integer,
ColumnDefault('x'),
DefaultClause('y')),
Column('col4', Integer,
ColumnDefault('x'),
DefaultClause('y'),
DefaultClause('y', for_update=True)),
Column('col5', Integer,
ColumnDefault('x'),
DefaultClause('y'),
onupdate='z'),
Column('col6', Integer,
ColumnDefault('x'),
server_default='y',
onupdate='z'),
Column('col7', Integer,
default='x',
server_default='y',
onupdate='z'),
Column('col8', Integer,
server_onupdate='u',
default='x',
server_default='y',
onupdate='z'))
tbl = t2
has_('col1', 'default')
has_('col2', 'default', 'server_default')
has_('col3', 'default', 'server_default')
has_('col4', 'default', 'server_default', 'server_onupdate')
has_('col5', 'default', 'server_default', 'onupdate')
has_('col6', 'default', 'server_default', 'onupdate')
has_('col7', 'default', 'server_default', 'onupdate')
has_('col8', 'default', 'server_default', 'onupdate', 'server_onupdate')
@testing.fails_on('firebird', 'Data type unknown')
def test_insert(self):
r = t.insert().execute()
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
r = t.insert(inline=True).execute()
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
t.insert().execute()
ctexec = sa.select([currenttime.label('now')], bind=testing.db).scalar()
l = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(l.fetchall(), [
(x, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py', 'hi')
for x in range(51, 54)])
t.insert().execute(col9=None)
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
eq_(t.select(t.c.col1 == 54).execute().fetchall(),
[(54, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, None, 'hi')])
@testing.fails_on('firebird', 'Data type unknown')
def test_insertmany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if (testing.against('mysql+mysqldb') and
testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)):
return
t.insert().execute({}, {}, {})
ctexec = currenttime.scalar()
l = t.select().execute()
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py', 'hi'),
(52, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py', 'hi'),
(53, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py', 'hi')])
def test_no_embed_in_sql(self):
"""Using a DefaultGenerator, Sequence, DefaultClause
in the columns, where clause of a select, or in the values
clause of insert, update, raises an informative error"""
for const in (
sa.Sequence('y'),
sa.ColumnDefault('y'),
sa.DefaultClause('y')
):
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression object or string expected.",
t.select, [const]
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str, t.insert().values(col4=const)
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str, t.update().values(col4=const)
)
def test_missing_many_param(self):
assert_raises_message(exc.StatementError,
"A value is required for bind parameter 'col7', in parameter group 1",
t.insert().execute,
{'col4': 7, 'col7': 12, 'col8': 19},
{'col4': 7, 'col8': 19},
{'col4': 7, 'col7': 12, 'col8': 19},
)
def test_insert_values(self):
t.insert(values={'col3': 50}).execute()
l = t.select().execute()
eq_(50, l.first()['col3'])
@testing.fails_on('firebird', 'Data type unknown')
def test_updatemany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if (testing.against('mysql+mysqldb') and
testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)):
return
t.insert().execute({}, {}, {})
t.update(t.c.col1 == sa.bindparam('pkval')).execute(
{'pkval': 51, 'col7': None, 'col8': None, 'boolcol1': False})
t.update(t.c.col1 == sa.bindparam('pkval')).execute(
{'pkval': 51},
{'pkval': 52},
{'pkval': 53})
l = t.select().execute()
ctexec = currenttime.scalar()
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'im the update', f2, ts, ts, ctexec, False, False,
13, today, 'py', 'hi'),
(52, 'im the update', f2, ts, ts, ctexec, True, False,
13, today, 'py', 'hi'),
(53, 'im the update', f2, ts, ts, ctexec, True, False,
13, today, 'py', 'hi')])
@testing.fails_on('firebird', 'Data type unknown')
def test_update(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1 == pk).execute(col4=None, col5=None)
ctexec = currenttime.scalar()
l = t.select(t.c.col1 == pk).execute()
l = l.first()
eq_(l,
(pk, 'im the update', f2, None, None, ctexec, True, False,
13, datetime.date.today(), 'py', 'hi'))
eq_(11, f2)
@testing.fails_on('firebird', 'Data type unknown')
def test_update_values(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1 == pk, values={'col3': 55}).execute()
l = t.select(t.c.col1 == pk).execute()
l = l.first()
eq_(55, l['col3'])
class PKDefaultTest(fixtures.TablesTest):
__requires__ = ('subqueries',)
@classmethod
def define_tables(cls, metadata):
t2 = Table('t2', metadata,
Column('nextid', Integer))
Table('t1', metadata,
Column('id', Integer, primary_key=True,
default=sa.select([func.max(t2.c.nextid)]).as_scalar()),
Column('data', String(30)))
@testing.requires.returning
def test_with_implicit_returning(self):
self._test(True)
def test_regular(self):
self._test(False)
def _test(self, returning):
t2, t1 = self.tables.t2, self.tables.t1
if not returning and not testing.db.dialect.implicit_returning:
engine = testing.db
else:
engine = engines.testing_engine(
options={'implicit_returning': returning})
engine.execute(t2.insert(), nextid=1)
r = engine.execute(t1.insert(), data='hi')
eq_([1], r.inserted_primary_key)
engine.execute(t2.insert(), nextid=2)
r = engine.execute(t1.insert(), data='there')
eq_([2], r.inserted_primary_key)
class PKIncrementTest(fixtures.TablesTest):
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table("aitable", metadata,
Column('id', Integer, Sequence('ai_id_seq', optional=True),
primary_key=True),
Column('int1', Integer),
Column('str1', String(20)))
# TODO: add coverage for increment on a secondary column in a key
@testing.fails_on('firebird', 'Data type unknown')
def _test_autoincrement(self, bind):
aitable = self.tables.aitable
ids = set()
rs = bind.execute(aitable.insert(), int1=1)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), str1='row 2')
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), int1=3, str1='row 3')
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(values={'int1': func.length('four')}))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(ids, set([1, 2, 3, 4]))
eq_(list(bind.execute(aitable.select().order_by(aitable.c.id))),
[(1, 1, None), (2, None, 'row 2'), (3, 3, 'row 3'), (4, 4, None)])
def test_autoincrement_autocommit(self):
self._test_autoincrement(testing.db)
def test_autoincrement_transaction(self):
con = testing.db.connect()
tx = con.begin()
try:
try:
self._test_autoincrement(con)
except:
try:
tx.rollback()
except:
pass
raise
else:
tx.commit()
finally:
con.close()
class EmptyInsertTest(fixtures.TestBase):
@testing.exclude('sqlite', '<', (3, 3, 8), 'no empty insert support')
@testing.fails_on('oracle', 'FIXME: unknown')
@testing.provide_metadata
def test_empty_insert(self):
t1 = Table('t1', self.metadata,
Column('is_true', Boolean, server_default=('1')))
self.metadata.create_all()
t1.insert().execute()
eq_(1, select([func.count(text('*'))], from_obj=t1).scalar())
eq_(True, t1.select().scalar())
class AutoIncrementTest(fixtures.TablesTest):
__requires__ = ('identity',)
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
"""Each test manipulates self.metadata individually."""
@testing.exclude('sqlite', '<', (3, 4), 'no database support')
def test_autoincrement_single_col(self):
single = Table('single', self.metadata,
Column('id', Integer, primary_key=True))
single.create()
r = single.insert().execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 1)
eq_(1, sa.select([func.count(sa.text('*'))], from_obj=single).scalar())
def test_autoincrement_fk(self):
nodes = Table('nodes', self.metadata,
Column('id', Integer, primary_key=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
nodes.create()
r = nodes.insert().execute(data='foo')
id_ = r.inserted_primary_key[0]
nodes.insert().execute(data='bar', parent_id=id_)
def test_autoinc_detection_no_affinity(self):
class MyType(TypeDecorator):
impl = TypeEngine
assert MyType()._type_affinity is None
t = Table('x', MetaData(),
Column('id', MyType(), primary_key=True)
)
assert t._autoincrement_column is None
def test_autoincrement_ignore_fk(self):
m = MetaData()
Table('y', m,
Column('id', Integer(), primary_key=True)
)
x = Table('x', m,
Column('id', Integer(),
ForeignKey('y.id'),
autoincrement="ignore_fk", primary_key=True)
)
assert x._autoincrement_column is x.c.id
def test_autoincrement_fk_disqualifies(self):
m = MetaData()
Table('y', m,
Column('id', Integer(), primary_key=True)
)
x = Table('x', m,
Column('id', Integer(),
ForeignKey('y.id'),
primary_key=True)
)
assert x._autoincrement_column is None
@testing.fails_on('sqlite', 'FIXME: unknown')
def test_non_autoincrement(self):
# sqlite INT primary keys can be non-unique! (only for ints)
nonai = Table("nonaitest", self.metadata,
Column('id', Integer, autoincrement=False, primary_key=True),
Column('data', String(20)))
nonai.create()
def go():
# postgresql + mysql strict will fail on first row,
# mysql in legacy mode fails on second row
nonai.insert().execute(data='row 1')
nonai.insert().execute(data='row 2')
assert_raises(
sa.exc.DBAPIError,
go
)
nonai.insert().execute(id=1, data='row 1')
class SequenceDDLTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_create_drop_ddl(self):
self.assert_compile(
CreateSequence(Sequence('foo_seq')),
"CREATE SEQUENCE foo_seq",
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', start=5)),
"CREATE SEQUENCE foo_seq START WITH 5",
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', increment=2)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2",
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', increment=2, start=5)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 5",
)
self.assert_compile(
DropSequence(Sequence('foo_seq')),
"DROP SEQUENCE foo_seq",
)
class SequenceExecTest(fixtures.TestBase):
__requires__ = ('sequences',)
@classmethod
def setup_class(cls):
cls.seq = Sequence("my_sequence")
cls.seq.create(testing.db)
@classmethod
def teardown_class(cls):
cls.seq.drop(testing.db)
def _assert_seq_result(self, ret):
"""asserts return of next_value is an int"""
assert isinstance(ret, util.int_types)
assert ret > 0
def test_implicit_connectionless(self):
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.execute())
def test_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(s.execute(testing.db))
def test_explicit_optional(self):
"""test dialect executes a Sequence, returns nextval, whether
or not "optional" is set """
s = Sequence("my_sequence", optional=True)
self._assert_seq_result(s.execute(testing.db))
def test_func_implicit_connectionless_execute(self):
"""test func.next_value().execute()/.scalar() works
with connectionless execution. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().execute().scalar())
def test_func_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(testing.db.scalar(s.next_value()))
def test_func_implicit_connectionless_scalar(self):
"""test func.next_value().execute()/.scalar() works. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().scalar())
def test_func_embedded_select(self):
"""test can use next_value() in select column expr"""
s = Sequence("my_sequence")
self._assert_seq_result(
testing.db.scalar(select([s.next_value()]))
)
@testing.fails_on('oracle', "ORA-02287: sequence number not allowed here")
@testing.provide_metadata
def test_func_embedded_whereclause(self):
"""test can use next_value() in whereclause"""
metadata = self.metadata
t1 = Table('t', metadata,
Column('x', Integer)
)
t1.create(testing.db)
testing.db.execute(t1.insert(), [{'x': 1}, {'x': 300}, {'x': 301}])
s = Sequence("my_sequence")
eq_(
testing.db.execute(
t1.select().where(t1.c.x > s.next_value())
).fetchall(),
[(300, ), (301, )]
)
@testing.provide_metadata
def test_func_embedded_valuesbase(self):
"""test can use next_value() in values() of _ValuesBase"""
metadata = self.metadata
t1 = Table('t', metadata,
Column('x', Integer)
)
t1.create(testing.db)
s = Sequence("my_sequence")
testing.db.execute(
t1.insert().values(x=s.next_value())
)
self._assert_seq_result(
testing.db.scalar(t1.select())
)
@testing.provide_metadata
def test_inserted_pk_no_returning(self):
"""test inserted_primary_key contains [None] when
pk_col=next_value(), implicit returning is not used."""
metadata = self.metadata
e = engines.testing_engine(options={'implicit_returning': False})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table('t', metadata,
Column('x', Integer, primary_key=True)
)
t1.create()
r = e.execute(
t1.insert().values(x=s.next_value())
)
eq_(r.inserted_primary_key, [None])
@testing.requires.returning
@testing.provide_metadata
def test_inserted_pk_implicit_returning(self):
"""test inserted_primary_key contains the result when
pk_col=next_value(), when implicit returning is used."""
metadata = self.metadata
e = engines.testing_engine(options={'implicit_returning': True})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table('t', metadata,
Column('x', Integer, primary_key=True)
)
t1.create()
r = e.execute(
t1.insert().values(x=s.next_value())
)
self._assert_seq_result(r.inserted_primary_key[0])
class SequenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__requires__ = ('sequences',)
@testing.fails_on('firebird', 'no FB support for start/increment')
def test_start_increment(self):
for seq in (
Sequence('foo_seq'),
Sequence('foo_seq', start=8),
Sequence('foo_seq', increment=5)):
seq.create(testing.db)
try:
values = [
testing.db.execute(seq) for i in range(3)
]
start = seq.start or 1
inc = seq.increment or 1
assert values == list(range(start, start + inc * 3, inc))
finally:
seq.drop(testing.db)
def _has_sequence(self, name):
return testing.db.dialect.has_sequence(testing.db, name)
def test_nextval_render(self):
"""test dialect renders the "nextval" construct,
whether or not "optional" is set """
for s in (
Sequence("my_seq"),
Sequence("my_seq", optional=True)):
assert str(s.next_value().
compile(dialect=testing.db.dialect)) in (
"nextval('my_seq')",
"gen_id(my_seq, 1)",
"my_seq.nextval",
)
def test_nextval_unsupported(self):
"""test next_value() used on non-sequence platform
raises NotImplementedError."""
s = Sequence("my_seq")
d = sqlite.dialect()
assert_raises_message(
NotImplementedError,
"Dialect 'sqlite' does not support sequence increments.",
s.next_value().compile,
dialect=d
)
def test_checkfirst_sequence(self):
s = Sequence("my_sequence")
s.create(testing.db, checkfirst=False)
assert self._has_sequence('my_sequence')
s.create(testing.db, checkfirst=True)
s.drop(testing.db, checkfirst=False)
assert not self._has_sequence('my_sequence')
s.drop(testing.db, checkfirst=True)
def test_checkfirst_metadata(self):
m = MetaData()
Sequence("my_sequence", metadata=m)
m.create_all(testing.db, checkfirst=False)
assert self._has_sequence('my_sequence')
m.create_all(testing.db, checkfirst=True)
m.drop_all(testing.db, checkfirst=False)
assert not self._has_sequence('my_sequence')
m.drop_all(testing.db, checkfirst=True)
def test_checkfirst_table(self):
m = MetaData()
s = Sequence("my_sequence")
t = Table('t', m, Column('c', Integer, s, primary_key=True))
t.create(testing.db, checkfirst=False)
assert self._has_sequence('my_sequence')
t.create(testing.db, checkfirst=True)
t.drop(testing.db, checkfirst=False)
assert not self._has_sequence('my_sequence')
t.drop(testing.db, checkfirst=True)
@testing.provide_metadata
def test_table_overrides_metadata_create(self):
metadata = self.metadata
Sequence("s1", metadata=metadata)
s2 = Sequence("s2", metadata=metadata)
s3 = Sequence("s3")
t = Table('t', metadata,
Column('c', Integer, s3, primary_key=True))
assert s3.metadata is metadata
t.create(testing.db, checkfirst=True)
s3.drop(testing.db)
# 't' is created, and 's3' won't be
# re-created since it's linked to 't'.
# 's1' and 's2' are, however.
metadata.create_all(testing.db)
assert self._has_sequence('s1')
assert self._has_sequence('s2')
assert not self._has_sequence('s3')
s2.drop(testing.db)
assert self._has_sequence('s1')
assert not self._has_sequence('s2')
metadata.drop_all(testing.db)
assert not self._has_sequence('s1')
assert not self._has_sequence('s2')
cartitems = sometable = metadata = None
class TableBoundSequenceTest(fixtures.TestBase):
__requires__ = ('sequences',)
@classmethod
def setup_class(cls):
global cartitems, sometable, metadata
metadata = MetaData(testing.db)
cartitems = Table("cartitems", metadata,
Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True),
Column("description", String(40)),
Column("createdate", sa.DateTime())
)
sometable = Table('Manager', metadata,
Column('obj_id', Integer, Sequence('obj_id_seq')),
Column('name', String(128)),
Column('id', Integer, Sequence('Manager_id_seq', optional=True),
primary_key=True),
)
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_insert_via_seq(self):
cartitems.insert().execute(description='hi')
cartitems.insert().execute(description='there')
r = cartitems.insert().execute(description='lala')
assert r.inserted_primary_key and r.inserted_primary_key[0] is not None
id_ = r.inserted_primary_key[0]
eq_(1,
sa.select([func.count(cartitems.c.cart_id)],
sa.and_(cartitems.c.description == 'lala',
cartitems.c.cart_id == id_)).scalar())
cartitems.select().execute().fetchall()
def test_seq_nonpk(self):
"""test sequences fire off as defaults on non-pk columns"""
engine = engines.testing_engine(
options={'implicit_returning': False})
result = engine.execute(sometable.insert(), name="somename")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
result = engine.execute(sometable.insert(), name="someother")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
sometable.insert().execute(
{'name': 'name3'},
{'name': 'name4'})
eq_(sometable.select().order_by(sometable.c.id).execute().fetchall(),
[(1, "somename", 1),
(2, "someother", 2),
(3, "name3", 3),
(4, "name4", 4)])
class SpecialTypePKTest(fixtures.TestBase):
"""test process_result_value in conjunction with primary key columns.
Also tests that "autoincrement" checks are against column.type._type_affinity,
rather than the class of "type" itself.
"""
@classmethod
def setup_class(cls):
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
if value is None:
return None
return int(value[4:])
def process_result_value(self, value, dialect):
if value is None:
return None
return "INT_%d" % value
cls.MyInteger = MyInteger
@testing.provide_metadata
def _run_test(self, *arg, **kw):
metadata = self.metadata
implicit_returning = kw.pop('implicit_returning', True)
kw['primary_key'] = True
if kw.get('autoincrement', True):
kw['test_needs_autoincrement'] = True
t = Table('x', metadata,
Column('y', self.MyInteger, *arg, **kw),
Column('data', Integer),
implicit_returning=implicit_returning
)
t.create()
r = t.insert().values(data=5).execute()
# we don't pre-fetch 'server_default'.
if 'server_default' in kw and (not
testing.db.dialect.implicit_returning or
not implicit_returning):
eq_(r.inserted_primary_key, [None])
else:
eq_(r.inserted_primary_key, ['INT_1'])
r.close()
eq_(
t.select().execute().first(),
('INT_1', 5)
)
def test_plain(self):
# among other things, tests that autoincrement
# is enabled.
self._run_test()
def test_literal_default_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger).label('foo'))
def test_literal_default_no_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger))
def test_sequence(self):
self._run_test(Sequence('foo_seq'))
def test_server_default(self):
self._run_test(server_default='1',)
def test_server_default_no_autoincrement(self):
self._run_test(server_default='1', autoincrement=False)
def test_clause(self):
stmt = select([literal("INT_1", type_=self.MyInteger)]).as_scalar()
self._run_test(default=stmt)
@testing.requires.returning
def test_no_implicit_returning(self):
self._run_test(implicit_returning=False)
@testing.requires.returning
def test_server_default_no_implicit_returning(self):
self._run_test(server_default='1', autoincrement=False)
class ServerDefaultsOnPKTest(fixtures.TestBase):
@testing.provide_metadata
def test_string_default_none_on_insert(self):
"""Test that without implicit returning, we return None for
a string server default.
That is, we don't want to attempt to pre-execute "server_default"
generically - the user should use a Python side-default for a case
like this. Testing that all backends do the same thing here.
"""
metadata = self.metadata
t = Table('x', metadata,
Column('y', String(10), server_default='key_one', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, [None])
eq_(
t.select().execute().fetchall(),
[('key_one', 'data')]
)
@testing.requires.returning
@testing.provide_metadata
def test_string_default_on_insert_with_returning(self):
"""With implicit_returning, we get a string PK default back no problem."""
metadata = self.metadata
t = Table('x', metadata,
Column('y', String(10), server_default='key_one', primary_key=True),
Column('data', String(10))
)
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, ['key_one'])
eq_(
t.select().execute().fetchall(),
[('key_one', 'data')]
)
@testing.provide_metadata
def test_int_default_none_on_insert(self):
metadata = self.metadata
t = Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
assert t._autoincrement_column is None
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, [None])
if testing.against('sqlite'):
eq_(
t.select().execute().fetchall(),
[(1, 'data')]
)
else:
eq_(
t.select().execute().fetchall(),
[(5, 'data')]
)
@testing.provide_metadata
def test_autoincrement_reflected_from_server_default(self):
metadata = self.metadata
t = Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
assert t._autoincrement_column is None
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table('x', m2, autoload=True, implicit_returning=False)
assert t2._autoincrement_column is None
@testing.provide_metadata
def test_int_default_none_on_insert_reflected(self):
metadata = self.metadata
Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table('x', m2, autoload=True, implicit_returning=False)
r = t2.insert().execute(data='data')
eq_(r.inserted_primary_key, [None])
if testing.against('sqlite'):
eq_(
t2.select().execute().fetchall(),
[(1, 'data')]
)
else:
eq_(
t2.select().execute().fetchall(),
[(5, 'data')]
)
@testing.requires.returning
@testing.provide_metadata
def test_int_default_on_insert_with_returning(self):
metadata = self.metadata
t = Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10))
)
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, [5])
eq_(
t.select().execute().fetchall(),
[(5, 'data')]
)
class UnicodeDefaultsTest(fixtures.TestBase):
def test_no_default(self):
Column(Unicode(32))
def test_unicode_default(self):
default = u('foo')
Column(Unicode(32), default=default)
def test_nonunicode_default(self):
default = b('foo')
assert_raises_message(
sa.exc.SAWarning,
"Unicode column received non-unicode default value.",
Column,
Unicode(32),
default=default
)
| 34.422785
| 125
| 0.557427
|
4a17c2007621e0b88cd951389a9c40ef1b9c6954
| 1,486
|
py
|
Python
|
tests/system/ec2_s3_pvm_tests.py
|
myhro/bootstrap-vz
|
bc1d5464f483434e5e291835bb9bc041f4879ecc
|
[
"Apache-2.0"
] | null | null | null |
tests/system/ec2_s3_pvm_tests.py
|
myhro/bootstrap-vz
|
bc1d5464f483434e5e291835bb9bc041f4879ecc
|
[
"Apache-2.0"
] | null | null | null |
tests/system/ec2_s3_pvm_tests.py
|
myhro/bootstrap-vz
|
bc1d5464f483434e5e291835bb9bc041f4879ecc
|
[
"Apache-2.0"
] | null | null | null |
from manifests import merge_manifest_data
from tools import boot_manifest
import random
s3_bucket_name = '{id:x}'.format(id=random.randrange(16 ** 16))
partials = {'s3_pvm': '''
provider:
name: ec2
virtualization: pvm
description: Debian {system.release} {system.architecture}
bucket: ''' + s3_bucket_name + '''
system: {bootloader: pvgrub}
volume: {backing: s3}
'''
}
def test_unpartitioned_oldstable():
std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password']
custom_partials = [partials['s3_pvm']]
manifest_data = merge_manifest_data(std_partials, custom_partials)
boot_vars = {'instance_type': 'm1.small'}
with boot_manifest(manifest_data, boot_vars) as instance:
print(instance.get_console_output().output)
def test_unpartitioned_stable():
std_partials = ['base', 'stable64', 'unpartitioned', 'root_password']
custom_partials = [partials['s3_pvm']]
manifest_data = merge_manifest_data(std_partials, custom_partials)
boot_vars = {'instance_type': 'm1.small'}
with boot_manifest(manifest_data, boot_vars) as instance:
print(instance.get_console_output().output)
def test_unpartitioned_unstable():
std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password']
custom_partials = [partials['s3_pvm']]
manifest_data = merge_manifest_data(std_partials, custom_partials)
boot_vars = {'instance_type': 'm1.small'}
with boot_manifest(manifest_data, boot_vars) as instance:
print(instance.get_console_output().output)
| 34.55814
| 73
| 0.757739
|
4a17c32e730365baf559b743bc95b32e8b681bb2
| 168
|
py
|
Python
|
rmidi/constant/event_format.py
|
rushike/rmidipy
|
7d80dc2cc584cb2e8b8df0eeedc34e9e11ab0de7
|
[
"MIT"
] | 5
|
2019-11-30T11:12:14.000Z
|
2021-08-15T00:47:23.000Z
|
rmidi/constant/event_format.py
|
rushike/rmidipy
|
7d80dc2cc584cb2e8b8df0eeedc34e9e11ab0de7
|
[
"MIT"
] | 3
|
2020-03-14T04:45:38.000Z
|
2020-05-31T15:07:13.000Z
|
rmidi/constant/event_format.py
|
rushike/rmidipy
|
7d80dc2cc584cb2e8b8df0eeedc34e9e11ab0de7
|
[
"MIT"
] | null | null | null |
from . import (ch_event_formatx, meta_event_formatx, sys_event_formatx)
X = {}
X.update(ch_event_formatx.X)
X.update(meta_event_formatx.X)
X.update(sys_event_formatx.X)
| 33.6
| 71
| 0.815476
|
4a17c34cc6e9d833dd9e8010c71dd18eb5fed08a
| 2,225
|
py
|
Python
|
jiant/tasks/lib/arc_challenge.py
|
angie-chen55/jiant
|
961bd577f736449956ddb2c15dcfce68bbb75e59
|
[
"MIT"
] | null | null | null |
jiant/tasks/lib/arc_challenge.py
|
angie-chen55/jiant
|
961bd577f736449956ddb2c15dcfce68bbb75e59
|
[
"MIT"
] | null | null | null |
jiant/tasks/lib/arc_challenge.py
|
angie-chen55/jiant
|
961bd577f736449956ddb2c15dcfce68bbb75e59
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from jiant.tasks.lib.templates.shared import labels_to_bimap
from jiant.tasks.lib.templates import multiple_choice as mc_template
from jiant.utils.python.io import read_json_lines
@dataclass
class Example(mc_template.Example):
@property
def task(self):
return ArcChallengeTask
@dataclass
class TokenizedExample(mc_template.TokenizedExample):
pass
@dataclass
class DataRow(mc_template.DataRow):
pass
@dataclass
class Batch(mc_template.Batch):
pass
class ArcChallengeTask(mc_template.AbstractMultipleChoiceTask):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
CHOICE_KEYS = ["A", "B", "C", "D", "E"]
CHOICE_TO_ID, ID_TO_CHOICE = labels_to_bimap(CHOICE_KEYS)
NUM_CHOICES = len(CHOICE_KEYS)
def get_train_examples(self):
return self._create_examples(lines=read_json_lines(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_json_lines(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_json_lines(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
potential_label_map = {
"1": "A",
"2": "B",
"3": "C",
"4": "D",
"5": "E",
}
NUM_CHOICES = len(potential_label_map)
examples = []
for i, line in enumerate(lines):
label = line["answerKey"]
if label in potential_label_map:
label = potential_label_map[label]
choice_list = [d["text"] for d in line["question"]["choices"]]
filler_choice_list = ["." for i in range(NUM_CHOICES - len(choice_list))]
choice_list = choice_list + filler_choice_list
assert len(choice_list) == NUM_CHOICES
examples.append(
Example(
guid="%s-%s" % (set_type, i),
prompt=line["question"]["stem"],
choice_list=choice_list,
label=label,
)
)
return examples
| 28.525641
| 94
| 0.62427
|
4a17c3a8a4c1d3c8643051c595695d306bbd8cf6
| 868
|
py
|
Python
|
sdks/python/test/test_StatusData.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/test/test_StatusData.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6
|
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/test/test_StatusData.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2
|
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from StatusData.clsStatusData import StatusData # noqa: E501
from appcenter_sdk.rest import ApiException
class TestStatusData(unittest.TestCase):
"""StatusData unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatusData(self):
"""Test StatusData"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsStatusData.StatusData() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.7
| 79
| 0.700461
|
4a17c4bf78b95b079b5d529241dd8aa5cdd401f1
| 1,029
|
py
|
Python
|
lib/cookie.py
|
YunYinORG/social
|
5020e980cacd8eca39fccc36faabc584f3c3e15f
|
[
"Apache-2.0"
] | 4
|
2015-12-20T14:57:57.000Z
|
2021-01-23T12:54:20.000Z
|
lib/cookie.py
|
YunYinORG/social
|
5020e980cacd8eca39fccc36faabc584f3c3e15f
|
[
"Apache-2.0"
] | 1
|
2016-03-13T15:19:02.000Z
|
2016-03-18T03:11:18.000Z
|
lib/cookie.py
|
YunYinORG/social
|
5020e980cacd8eca39fccc36faabc584f3c3e15f
|
[
"Apache-2.0"
] | 4
|
2015-12-21T02:26:29.000Z
|
2016-09-03T02:57:07.000Z
|
#!/usr/bin/env python
# coding=utf-8
import web
import base64
from config import COOKIE_KEY
from json import dumps, loads
from Crypto.Cipher import AES
__doc__ = "加密cookie存取"
_cipher = AES.new(COOKIE_KEY)
def _encrypt(data):
text = dumps(data, ensure_ascii=False, encoding='utf8')
text = text.encode('utf-8')
text += b"\0" * (AES.block_size - len(text) % AES.block_size) # ('\0'*(16-len(text) % 16))
e = _cipher.encrypt(text)
return base64.urlsafe_b64encode(e).rstrip('=')
def _decrypt(text):
try:
text += (b'='*(4-len(text) % 4))
m = base64.urlsafe_b64decode(text)
s = _cipher.decrypt(m).rstrip('\0')
return loads(s)
except Exception:
return None
def set(name, value):
"""保存加密的cookie"""
web.setcookie(name, _encrypt(value), path='/', httponly=1)
def get(name):
""" 获取cookie,空或者无效返回None"""
value = web.cookies().get(name)
return value and _decrypt(value)
def delete(name):
web.setcookie(name, None, -1, path='/', httponly=1)
| 23.386364
| 95
| 0.63654
|
4a17c52a69158cf7a76202cc96fa125d0c777303
| 516
|
py
|
Python
|
micro_orm/query/exceptions.py
|
RooYnnER/micro-orm
|
7ec329072a71565696fe27f44ff08412c708ff29
|
[
"MIT"
] | null | null | null |
micro_orm/query/exceptions.py
|
RooYnnER/micro-orm
|
7ec329072a71565696fe27f44ff08412c708ff29
|
[
"MIT"
] | null | null | null |
micro_orm/query/exceptions.py
|
RooYnnER/micro-orm
|
7ec329072a71565696fe27f44ff08412c708ff29
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class WrongQueryGiven(Exception):
pass
class NoContentFound(Exception):
pass
class NoFilterGiven(Exception):
pass
class WrongSpecialGiven(Exception):
pass
class NoOrderByAttributeGiven(Exception):
pass
class TooMuchOrderByAttributesGiven(Exception):
pass
class WrongOrderByAttribute(Exception):
pass
class FieldNotFoundException(Exception):
pass
class WrongInstanceToSave(Exception):
pass
class WrongInstanceToDrop(Exception):
pass
| 12.285714
| 47
| 0.74031
|
4a17c61ce17349b1eec47227e67401a9caf78e6d
| 10,156
|
py
|
Python
|
core/tests/unit/test_iam.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | 1
|
2021-01-04T11:40:17.000Z
|
2021-01-04T11:40:17.000Z
|
core/tests/unit/test_iam.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
core/tests/unit/test_iam.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestPolicy(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.iam import Policy
return Policy
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
empty = frozenset()
policy = self._make_one()
self.assertIsNone(policy.etag)
self.assertIsNone(policy.version)
self.assertEqual(policy.owners, empty)
self.assertEqual(policy.editors, empty)
self.assertEqual(policy.viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test_ctor_explicit(self):
VERSION = 17
ETAG = 'ETAG'
empty = frozenset()
policy = self._make_one(ETAG, VERSION)
self.assertEqual(policy.etag, ETAG)
self.assertEqual(policy.version, VERSION)
self.assertEqual(policy.owners, empty)
self.assertEqual(policy.editors, empty)
self.assertEqual(policy.viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test___getitem___miss(self):
policy = self._make_one()
self.assertEqual(policy['nonesuch'], set())
def test___setitem__(self):
USER = 'user:phred@example.com'
PRINCIPALS = set([USER])
policy = self._make_one()
policy['rolename'] = [USER]
self.assertEqual(policy['rolename'], PRINCIPALS)
self.assertEqual(len(policy), 1)
self.assertEqual(dict(policy), {'rolename': PRINCIPALS})
def test___delitem___hit(self):
policy = self._make_one()
policy._bindings['rolename'] = ['phred@example.com']
del policy['rolename']
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test___delitem___miss(self):
policy = self._make_one()
with self.assertRaises(KeyError):
del policy['nonesuch']
def test_owners_getter(self):
from google.cloud.iam import OWNER_ROLE
MEMBER = 'user:phred@example.com'
expected = frozenset([MEMBER])
policy = self._make_one()
policy[OWNER_ROLE] = [MEMBER]
self.assertEqual(policy.owners, expected)
def test_owners_setter(self):
import warnings
from google.cloud.iam import OWNER_ROLE
MEMBER = 'user:phred@example.com'
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings():
warnings.simplefilter('always')
policy.owners = [MEMBER]
self.assertEqual(policy[OWNER_ROLE], expected)
def test_editors_getter(self):
from google.cloud.iam import EDITOR_ROLE
MEMBER = 'user:phred@example.com'
expected = frozenset([MEMBER])
policy = self._make_one()
policy[EDITOR_ROLE] = [MEMBER]
self.assertEqual(policy.editors, expected)
def test_editors_setter(self):
import warnings
from google.cloud.iam import EDITOR_ROLE
MEMBER = 'user:phred@example.com'
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings():
warnings.simplefilter('always')
policy.editors = [MEMBER]
self.assertEqual(policy[EDITOR_ROLE], expected)
def test_viewers_getter(self):
from google.cloud.iam import VIEWER_ROLE
MEMBER = 'user:phred@example.com'
expected = frozenset([MEMBER])
policy = self._make_one()
policy[VIEWER_ROLE] = [MEMBER]
self.assertEqual(policy.viewers, expected)
def test_viewers_setter(self):
import warnings
from google.cloud.iam import VIEWER_ROLE
MEMBER = 'user:phred@example.com'
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings():
warnings.simplefilter('always')
policy.viewers = [MEMBER]
self.assertEqual(policy[VIEWER_ROLE], expected)
def test_user(self):
EMAIL = 'phred@example.com'
MEMBER = 'user:%s' % (EMAIL,)
policy = self._make_one()
self.assertEqual(policy.user(EMAIL), MEMBER)
def test_service_account(self):
EMAIL = 'phred@example.com'
MEMBER = 'serviceAccount:%s' % (EMAIL,)
policy = self._make_one()
self.assertEqual(policy.service_account(EMAIL), MEMBER)
def test_group(self):
EMAIL = 'phred@example.com'
MEMBER = 'group:%s' % (EMAIL,)
policy = self._make_one()
self.assertEqual(policy.group(EMAIL), MEMBER)
def test_domain(self):
DOMAIN = 'example.com'
MEMBER = 'domain:%s' % (DOMAIN,)
policy = self._make_one()
self.assertEqual(policy.domain(DOMAIN), MEMBER)
def test_all_users(self):
policy = self._make_one()
self.assertEqual(policy.all_users(), 'allUsers')
def test_authenticated_users(self):
policy = self._make_one()
self.assertEqual(policy.authenticated_users(), 'allAuthenticatedUsers')
def test_from_api_repr_only_etag(self):
empty = frozenset()
RESOURCE = {
'etag': 'ACAB',
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
self.assertEqual(policy.etag, 'ACAB')
self.assertIsNone(policy.version)
self.assertEqual(policy.owners, empty)
self.assertEqual(policy.editors, empty)
self.assertEqual(policy.viewers, empty)
self.assertEqual(dict(policy), {})
def test_from_api_repr_complete(self):
from google.cloud.iam import (
OWNER_ROLE,
EDITOR_ROLE,
VIEWER_ROLE,
)
OWNER1 = 'group:cloud-logs@google.com'
OWNER2 = 'user:phred@example.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
RESOURCE = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
self.assertEqual(policy.etag, 'DEADBEEF')
self.assertEqual(policy.version, 17)
self.assertEqual(policy.owners, frozenset([OWNER1, OWNER2]))
self.assertEqual(policy.editors, frozenset([EDITOR1, EDITOR2]))
self.assertEqual(policy.viewers, frozenset([VIEWER1, VIEWER2]))
self.assertEqual(
dict(policy), {
OWNER_ROLE: set([OWNER1, OWNER2]),
EDITOR_ROLE: set([EDITOR1, EDITOR2]),
VIEWER_ROLE: set([VIEWER1, VIEWER2]),
})
def test_from_api_repr_unknown_role(self):
USER = 'user:phred@example.com'
GROUP = 'group:cloud-logs@google.com'
RESOURCE = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': 'unknown', 'members': [USER, GROUP]},
],
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
self.assertEqual(policy.etag, 'DEADBEEF')
self.assertEqual(policy.version, 17)
self.assertEqual(dict(policy), {'unknown': set([GROUP, USER])})
def test_to_api_repr_defaults(self):
policy = self._make_one()
self.assertEqual(policy.to_api_repr(), {})
def test_to_api_repr_only_etag(self):
policy = self._make_one('DEADBEEF')
self.assertEqual(policy.to_api_repr(), {'etag': 'DEADBEEF'})
def test_to_api_repr_binding_wo_members(self):
policy = self._make_one()
policy['empty'] = []
self.assertEqual(policy.to_api_repr(), {})
def test_to_api_repr_binding_w_duplicates(self):
from google.cloud.iam import OWNER_ROLE
OWNER = 'group:cloud-logs@google.com'
policy = self._make_one()
policy.owners = [OWNER, OWNER]
self.assertEqual(
policy.to_api_repr(), {
'bindings': [{'role': OWNER_ROLE, 'members': [OWNER]}],
})
def test_to_api_repr_full(self):
import operator
from google.cloud.iam import (
OWNER_ROLE,
EDITOR_ROLE,
VIEWER_ROLE,
)
OWNER1 = 'group:cloud-logs@google.com'
OWNER2 = 'user:phred@example.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
BINDINGS = [
{'role': OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
]
policy = self._make_one('DEADBEEF', 17)
policy.owners = [OWNER1, OWNER2]
policy.editors = [EDITOR1, EDITOR2]
policy.viewers = [VIEWER1, VIEWER2]
resource = policy.to_api_repr()
self.assertEqual(resource['etag'], 'DEADBEEF')
self.assertEqual(resource['version'], 17)
key = operator.itemgetter('role')
self.assertEqual(
sorted(resource['bindings'], key=key), sorted(BINDINGS, key=key))
| 35.51049
| 79
| 0.613923
|
4a17c6279b346a6bdd8540f13772cd777ce0ed31
| 2,350
|
py
|
Python
|
Project 3/Problem 5/AutocompleteTries.py
|
Blac-Panda/Udacity-DSA-Nanodegree
|
4629c77274182687f63c73016a941ab21f54f9cb
|
[
"MIT"
] | 1
|
2020-05-14T12:44:53.000Z
|
2020-05-14T12:44:53.000Z
|
Project 3/Problem 5/AutocompleteTries.py
|
Blac-Panda/Udacity-DSA-Nanodegree
|
4629c77274182687f63c73016a941ab21f54f9cb
|
[
"MIT"
] | null | null | null |
Project 3/Problem 5/AutocompleteTries.py
|
Blac-Panda/Udacity-DSA-Nanodegree
|
4629c77274182687f63c73016a941ab21f54f9cb
|
[
"MIT"
] | null | null | null |
class TrieNode(object):
""" Represents a single node in the Trie """
def __init__(self):
""" Initialize this node in the Trie """
self.is_word = False
self.children = {}
def insert(self, char):
""" Add a child node in this Trie """
if char not in self.children:
self.children[char] = TrieNode()
else:
pass
def suffixes(self, suffix=''):
#Recursive function that collects the suffix for all complete words below this point
results = []
if self.is_word and suffix != '':
results.append(suffix)
if len(self.children) == 0:
return results
results = []
if self.is_word and suffix != '':
results.append(suffix)
for char in self.children:
results.extend(self.children[char].suffixes(suffix=suffix+char))
return results
class Trie(object):
""" The Trie itself containing the root node and insert/find functions """
def __init__(self):
""" Initialize this Trie (add a root node) """
self.root = TrieNode()
def insert(self, word):
""" Add a word to the Trie """
node = self.root
for char in word:
node.insert(char)
node = node.children[char]
node.is_word = True
def find(self, prefix):
""" Find the Trie node that represents this prefix """
node = self.root
for char in prefix:
if char not in node.children:
return False
node = node.children[char]
return node
MyTrie = Trie()
wordList = [
"ant", "anthology", "antagonist", "antonym",
"fun", "function", "factory",
"trie", "trigger", "trigonometry", "tripod"
]
for word in wordList:
MyTrie.insert(word)
##from ipywidgets import widgets
##from IPython.display import display
##from ipywidgets import interact
##def f(prefix):
##
## if prefix != '':
## prefixNode = MyTrie.find(prefix)
## if prefixNode:
## print('\n'.join(prefixNode.suffixes()))
## else:
## print(prefix + " not found")
## else:
## print('')
##interact(f,prefix='');
#testcases
print(MyTrie.find("ant").suffixes())
print(MyTrie.find("a").suffixes())
print(MyTrie.find("").suffixes())
| 24.736842
| 92
| 0.567234
|
4a17c636005e3ceba776e66e1203ef87f8c01e7b
| 336
|
py
|
Python
|
lc/1451_RearrangeWordsInASentence.py
|
xiangshiyin/coding-challenge
|
a75a644b96dec1b6c7146b952ca4333263f0a461
|
[
"Apache-2.0"
] | null | null | null |
lc/1451_RearrangeWordsInASentence.py
|
xiangshiyin/coding-challenge
|
a75a644b96dec1b6c7146b952ca4333263f0a461
|
[
"Apache-2.0"
] | null | null | null |
lc/1451_RearrangeWordsInASentence.py
|
xiangshiyin/coding-challenge
|
a75a644b96dec1b6c7146b952ca4333263f0a461
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def arrangeWords(self, text: str) -> str:
N = len(text)
if N<=1:
return text
else:
tmp = sorted(text.lower().split(), key=len)
# fix the initial letter
tmp[0] = tmp[0][0].upper()+tmp[0][1:]
return ' '.join(tmp)
| 28
| 55
| 0.440476
|
4a17c6abe95d967c6a7bc313ad4cc294c3661e4e
| 17,249
|
py
|
Python
|
testing/citest/tests/appengine_smoke_test.py
|
ajordens/buildtool
|
7f43220ef29ca8902eaf016a9817916970f873d2
|
[
"Apache-2.0"
] | 6
|
2020-02-26T23:38:30.000Z
|
2021-01-07T01:35:13.000Z
|
testing/citest/tests/appengine_smoke_test.py
|
ajordens/buildtool
|
7f43220ef29ca8902eaf016a9817916970f873d2
|
[
"Apache-2.0"
] | 59
|
2020-02-13T15:35:45.000Z
|
2021-07-29T08:45:41.000Z
|
testing/citest/tests/appengine_smoke_test.py
|
ajordens/buildtool
|
7f43220ef29ca8902eaf016a9817916970f873d2
|
[
"Apache-2.0"
] | 22
|
2020-02-07T16:01:09.000Z
|
2022-03-10T00:35:44.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration test for App Engine.
Primarily tests the deploy operation and upsert load balancer pipeline stage,
which are relatively complex and not well covered by unit tests.
Sample Usage:
Before running this test, verify that the App Engine application
in your GCP project has a default service. If it does not, deploy
any App Engine version to your application that will use the default service.
Assuming you have created $PASSPHRASE_FILE (which you should chmod 400)
and $CITEST_ROOT points to the root directory of the citest library.
The passphrase file can be omited if you run ssh-agent and add
.ssh/compute_google_engine.
PYTHONPATH=$CITEST_ROOT \
python buildtool/testing/citest/tests/appengine_smoke_test.py \
--gce_ssh_passphrase_file=$PASSPHRASE_FILE \
--gce_project=$PROJECT \
--gce_zone=$ZONE \
--gce_instance=$INSTANCE
or
PYTHONPATH=$CITEST_ROOT \
python buildtool/testing/citest/tests/appengine_smoke_test.py \
--native_hostname=host-running-smoke-test
"""
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import citest.gcp_testing as gcp
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
ov_factory = jc.ObservationPredicateFactory()
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
import spinnaker_testing.frigga as frigga
import citest.base
class AppengineSmokeTestScenario(sk.SpinnakerTestScenario):
"""Defines the scenario for the integration test.
We're going to:
Create a Spinnaker Application
Create a Spinnaker Server Group (implicitly creates a Load Balancer)
Create a Pipeline with the following stages
- Deploy
- Upsert Load Balancer
Delete Load Balancer (implicitly destroys the Server Groups
created within this test)
Delete Application
"""
@classmethod
def new_agent(cls, bindings):
return gate.new_agent(bindings)
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser."""
super(AppengineSmokeTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
parser.add_argument(
'--test_gcs_bucket', default=None,
help='URL to use for testing appengine deployment from a bucket.'
' The test will write into this bucket'
' then deploy what it writes.')
parser.add_argument(
'--test_storage_account_name', default=None,
help='Storage account when testing GCS buckets.'
' If not specified, use the application default credentials.')
parser.add_argument(
'--test_appengine_region', default='us-central',
help='Region to use for AppEngine tests.')
parser.add_argument('--git_repo_url', default=None,
help='URL of a GIT source code repository used by Spinnaker to deploy to App Engine.')
parser.add_argument('--branch', default='master',
help='Git branch to be used when deploying from source code repository.')
parser.add_argument('--app_directory_root', default=None,
help='Path from the root of source code repository to the application directory.')
def __init__(self, bindings, agent=None):
super(AppengineSmokeTestScenario, self).__init__(bindings, agent)
if not bindings['GIT_REPO_URL']:
raise ValueError('Must supply value for --git_repo_url')
if not bindings['APP_DIRECTORY_ROOT']:
raise ValueError('Must supply value for --app_directory_root')
self.TEST_APP = bindings['TEST_APP']
self.TEST_STACK = bindings['TEST_STACK']
self.__path = 'applications/%s/tasks' % self.TEST_APP
self.__gcp_project = bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID']
self.__cluster_name = frigga.Naming.cluster(self.TEST_APP, self.TEST_STACK)
self.__server_group_name = frigga.Naming.server_group(self.TEST_APP, self.TEST_STACK)
self.__lb_name = self.__cluster_name
# Python is clearly hard-coded as the runtime here, but we're just asking App Engine to be a static file server.
self.__app_yaml = ('\n'.join(['runtime: python27',
'api_version: 1',
'threadsafe: true',
'service: {service}',
'handlers:',
' - url: /.*',
' static_dir: .']).format(service=self.__lb_name))
self.__app_directory_root = bindings['APP_DIRECTORY_ROOT']
self.__branch = bindings['BRANCH']
self.pipeline_id = None
try:
repo_path = self.__clone_app_repo()
appengine_dir = self.bindings['APP_DIRECTORY_ROOT']
repo_appengine_path = os.path.join(repo_path, appengine_dir)
self.__prepare_app_default_version(repo_appengine_path)
test_bucket = bindings['TEST_GCS_BUCKET']
if test_bucket:
self.__prepare_bucket(test_bucket, repo_appengine_path)
self.__test_repository_url = 'gs://' + test_bucket
else:
self.__test_repository_url = bindings['GIT_REPO_URL']
finally:
shutil.rmtree(repo_path)
def __clone_app_repo(self):
temp = tempfile.mkdtemp()
git_repo = self.bindings['GIT_REPO_URL']
branch = self.bindings['BRANCH']
command = 'git clone {repo} -b {branch} {dir}'.format(
repo=git_repo, branch=branch, dir=temp)
logging.info('Fetching %s', git_repo)
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
return temp
def __prepare_bucket(self, bucket, repo_appengine_path):
root = self.bindings['APP_DIRECTORY_ROOT']
gcs_path = 'gs://{bucket}/{root}'.format(
bucket=self.bindings['TEST_GCS_BUCKET'], root=root)
command = 'gsutil -m rsync {local} {gcs}'.format(
local=repo_appengine_path, gcs=gcs_path)
logging.info('Preparing %s', gcs_path)
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
def __prepare_app_default_version(self, repo_appengine_path):
if not self.__has_default_version():
deployable_path = os.path.join(repo_appengine_path, 'app.yaml')
command = 'gcloud app deploy {deployable} --project={project} --quiet'.format(
project=self.__gcp_project, deployable=deployable_path)
logging.info('Deploying AppEngine app with default version')
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
def __has_default_version(self):
command = 'gcloud app services list --project={project}'.format(
project=self.__gcp_project)
out, err = (subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
.communicate())
logging.debug(
'Checking if project has default app version: {command} returned: {out}'
.format(command=command, out=out))
# Expect an output similar to:
# SERVICE NUM_VERSIONS
# default 1
# other_version 1
return '\ndefault ' in out.decode(encoding='utf-8')
def create_app(self):
# Not testing create_app, since the operation is well tested elsewhere.
# Retryable to handle platform flakiness.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings,
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
cloud_providers="appengine"),
contract=contract)
def delete_app(self):
# Not testing delete_app, since the operation is well tested elsewhere.
# Retryable to handle platform flakiness.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT']),
contract=contract)
def create_server_group(self):
group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.bindings['TEST_STACK'],
version='v000')
job_spec = {
'application': self.TEST_APP,
'stack': self.TEST_STACK,
'credentials': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'repositoryUrl': self.__test_repository_url,
'applicationDirectoryRoot': self.__app_directory_root,
'configFiles': [self.__app_yaml],
'type': 'createServerGroup',
'cloudProvider': 'appengine',
'region': self.bindings['TEST_APPENGINE_REGION']
}
storageAccountName = self.bindings.get('TEST_STORAGE_ACCOUNT_NAME')
if storageAccountName is not None:
job_spec['storageAccountName'] = storageAccountName
if not self.__test_repository_url.startswith('gs://'):
job_spec.update({
'gitCredentialType': 'NONE',
'branch': self.__branch
})
payload = self.agent.make_json_payload_from_kwargs(job=[job_spec],
description='Create Server Group in ' + group_name,
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Version Added', retryable_for_secs=60)
.inspect_resource('apps.services.versions',
group_name,
appsId=self.__gcp_project,
servicesId=self.__lb_name)
.EXPECT(ov_factory.value_list_path_contains(
'servingStatus', jp.STR_EQ('SERVING'))))
return st.OperationContract(
self.new_post_operation(
title='create_server_group', data=payload, path='tasks'),
contract=builder.build())
def make_deploy_stage(self):
cluster_spec = {
'account': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'applicationDirectoryRoot': self.__app_directory_root,
'configFiles': [self.__app_yaml],
'application': self.TEST_APP,
'cloudProvider': 'appengine',
'provider': 'appengine',
'region': self.bindings['TEST_APPENGINE_REGION'],
'repositoryUrl': self.__test_repository_url,
'stack': self.TEST_STACK
}
if not self.__test_repository_url.startswith('gs://'):
cluster_spec.update({
'gitCredentialType': 'NONE',
'branch': self.__branch
})
result = {
'clusters': [cluster_spec],
'name': 'Deploy',
'refId': '1',
'requisiteStageRefIds': [],
'type': 'deploy'
}
return result
def make_upsert_load_balancer_stage(self):
result = {
'cloudProvider': 'appengine',
'loadBalancers': [
{
'cloudProvider': 'appengine',
'credentials': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'loadBalancerName': self.__lb_name,
'migrateTraffic': False,
'name': self.__lb_name,
'region': self.bindings['TEST_APPENGINE_REGION'],
'splitDescription': {
'allocationDescriptions': [
{
'allocation': 0.1,
'cluster': self.__cluster_name,
'locatorType': 'targetCoordinate',
'target': 'current_asg_dynamic'
},
{
'allocation': 0.9,
'cluster': self.__cluster_name,
'locatorType': 'targetCoordinate',
'target': 'ancestor_asg_dynamic'
}
],
'shardBy': 'IP'
}
}
],
'name': 'Edit Load Balancer',
'refId': '2',
'requisiteStageRefIds': ['1'],
'type': 'upsertAppEngineLoadBalancers'
}
return result
def create_deploy_upsert_load_balancer_pipeline(self):
name = 'promoteServerGroupPipeline'
self.pipeline_id = name
deploy_stage = self.make_deploy_stage()
upsert_load_balancer_stage = self.make_upsert_load_balancer_stage()
pipeline_spec = dict(
name=name,
stages=[deploy_stage, upsert_load_balancer_stage],
triggers=[],
application=self.TEST_APP,
stageCounter=2,
parallel=True,
limitConcurrent=True,
appConfig={},
index=0
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline',
retryable_for_secs=5)
.get_url_path('applications/{0}/pipelineConfigs'.format(self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_deploy_upsert_load_balancer_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def run_deploy_upsert_load_balancer_pipeline(self):
url_path = 'pipelines/{0}/{1}'.format(self.TEST_APP, self.pipeline_id)
previous_group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.TEST_STACK,
version='v000')
deployed_group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.TEST_STACK,
version='v001')
payload = self.agent.make_json_payload_from_kwargs(
type='manual',
user='[anonymous]')
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Service Modified', retryable_for_secs=60)
.inspect_resource('apps.services',
self.__lb_name,
appsId=self.__gcp_project)
.EXPECT(
ov_factory.value_list_path_contains(
jp.build_path('split', 'allocations'),
jp.DICT_MATCHES({previous_group_name: jp.NUM_EQ(0.9),
deployed_group_name: jp.NUM_EQ(0.1)}))))
return st.OperationContract(
self.new_post_operation(
title='run_deploy_upsert_load_balancer_pipeline',
data=payload, path=url_path),
builder.build())
def delete_load_balancer(self):
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'deleteLoadBalancer',
'cloudProvider': 'appengine',
'loadBalancerName': self.__lb_name,
'account': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'credentials': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'user': '[anonymous]'
}],
description='Delete Load Balancer: {0} in {1}'.format(
self.__lb_name,
bindings['SPINNAKER_APPENGINE_ACCOUNT']),
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Service Deleted', retryable_for_secs=60)
.inspect_resource('apps.services',
self.__lb_name,
appsId=self.__gcp_project)
.EXPECT(
ov_factory.error_list_contains(gcp.HttpErrorPredicate(http_code=404))))
return st.OperationContract(
self.new_post_operation(
title='delete_load_balancer', data=payload, path='tasks'),
contract=builder.build())
class AppengineSmokeTest(st.AgentTestCase):
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(AppengineSmokeTestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app(),
retry_interval_secs=8, max_retries=8)
def test_b_create_server_group(self):
self.run_test_case(self.scenario.create_server_group())
def test_c_create_pipeline(self):
self.run_test_case(self.scenario.create_deploy_upsert_load_balancer_pipeline())
def test_d_run_pipeline(self):
self.run_test_case(self.scenario.run_deploy_upsert_load_balancer_pipeline())
def test_y_delete_load_balancer(self):
self.run_test_case(self.scenario.delete_load_balancer(),
retry_interval_secs=8, max_retries=8)
def test_z_delete_app(self):
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
defaults = {
'TEST_STACK': AppengineSmokeTestScenario.DEFAULT_TEST_ID,
'TEST_APP': 'gae1' + AppengineSmokeTestScenario.DEFAULT_TEST_ID,
}
return citest.base.TestRunner.main(
parser_inits=[AppengineSmokeTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[AppengineSmokeTest])
if __name__ == '__main__':
sys.exit(main())
| 36.93576
| 116
| 0.665894
|
4a17c6e6c5c018bbaa3884ed924193cc0872495b
| 4,174
|
py
|
Python
|
cai/domains/mazeDomain/simulator/.waf3-1.7.16-9ca17eb492c97b689870b4ff9db75880/waflib/Tools/cs.py
|
alexander-hagg/phdexperiments
|
6506c8f2a438c67ca7c808df8715711fde73dfce
|
[
"MIT"
] | null | null | null |
cai/domains/mazeDomain/simulator/.waf3-1.7.16-9ca17eb492c97b689870b4ff9db75880/waflib/Tools/cs.py
|
alexander-hagg/phdexperiments
|
6506c8f2a438c67ca7c808df8715711fde73dfce
|
[
"MIT"
] | null | null | null |
cai/domains/mazeDomain/simulator/.waf3-1.7.16-9ca17eb492c97b689870b4ff9db75880/waflib/Tools/cs.py
|
alexander-hagg/phdexperiments
|
6506c8f2a438c67ca7c808df8715711fde73dfce
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Utils,Task,Options,Logs,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
import os,tempfile
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'bintype',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS','/platform:%s'%getattr(self,'platform','anycpu'))
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.bld.install_files(inst_to,self.cs_task.outputs[:],env=self.env,chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs','use_cs')
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.env.append_value('CSFLAGS',val)
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
try:
tmp=None
if isinstance(cmd,list)and len(' '.join(cmd))>=8192:
program=cmd[0]
cmd=[self.quote_response_command(x)for x in cmd]
(fd,tmp)=tempfile.mkstemp()
os.write(fd,'\r\n'.join(i.replace('\\','\\\\')for i in cmd[1:]).encode())
os.close(fd)
cmd=[program,'@'+tmp]
ret=self.generator.bld.exec_command(cmd,**kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
def quote_response_command(self,flag):
if flag.lower()=='/noconfig':
return''
if flag.find(' ')>-1:
for x in('/r:','/reference:','/resource:','/lib:','/out:'):
if flag.startswith(x):
flag='%s"%s"'%(x,'","'.join(flag[len(x):].split(',')))
break
else:
flag='"%s"'%flag
return flag
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
| 31.383459
| 102
| 0.692381
|
4a17c85867b211792699d52b54f2df61f1a1816b
| 1,455
|
py
|
Python
|
backend/search/utils.py
|
HalmonLui/square-hackathon
|
62d5be7a229f9e39e27a546c164facd779d28aa4
|
[
"MIT"
] | 3
|
2020-06-13T02:47:29.000Z
|
2020-06-20T17:34:15.000Z
|
backend/search/utils.py
|
HalmonLui/square-hackathon
|
62d5be7a229f9e39e27a546c164facd779d28aa4
|
[
"MIT"
] | 2
|
2020-06-14T20:29:26.000Z
|
2020-06-14T20:29:34.000Z
|
backend/search/utils.py
|
HalmonLui/square-hackathon
|
62d5be7a229f9e39e27a546c164facd779d28aa4
|
[
"MIT"
] | 1
|
2020-09-04T01:45:39.000Z
|
2020-09-04T01:45:39.000Z
|
# Author: Sonam Ghosh
# The following script provides utility functions to be used for all search page functionalities.
import urllib.parse
import requests
from typing import List, Tuple
from math import sin, cos, sqrt, atan2, radians
def address_to_latlong(address: str) -> Tuple:
url = 'https://nominatim.openstreetmap.org/search/' + \
urllib.parse.quote(address) + '?format=json'
response = requests.get(url).json()
return (float(response[0]["lat"]), float(response[0]["lon"]))
def find_distance(start_loc: Tuple, end_loc: Tuple, unit: str) -> float:
if unit not in ('km', 'mi'):
raise ValueError('Not valid unit, choose between km or mi')
lat1, lon1 = start_loc
lat2, lon2 = end_loc
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
# Approximate radius of the earth in km
radius = 6371
dlat = lat2 - lat1
dlon = lon2 - lon1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = radius * c
if unit == 'mi':
distance = distance / 1.609
return distance
if __name__ == "__main__":
# Testing 123
print('Hello World')
address = '754 Post St, San Francisco, CA 94109'
a = address_to_latlong(address)
address2 = 'Salesforce Tower, San Francisco, CA'
b = address_to_latlong(address2)
print(find_distance(a, b, unit='mi'))
| 26.454545
| 97
| 0.643986
|
4a17c881ca107bda0eff2ee87ff8f6f303468dbc
| 1,900
|
py
|
Python
|
taskbuster/apps/taskmanager/tests.py
|
mazulo/taskbuster_project
|
40833f4d969a413500c84b16702c1ad6932aff81
|
[
"MIT"
] | null | null | null |
taskbuster/apps/taskmanager/tests.py
|
mazulo/taskbuster_project
|
40833f4d969a413500c84b16702c1ad6932aff81
|
[
"MIT"
] | null | null | null |
taskbuster/apps/taskmanager/tests.py
|
mazulo/taskbuster_project
|
40833f4d969a413500c84b16702c1ad6932aff81
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from . import models
class TestProfileModel(TestCase):
def test_profile_creation(self):
User = get_user_model()
# New user created
user = User.objects.create(
username='taskbuster',
password='django-tutorial'
)
# Check that a Profile instance has been created
self.assertIsInstance(user.profile, models.Profile)
# Call the save method of the user to activate the signal again,
# and check that it doesn't try to create another profile instance
user.save()
self.assertIsInstance(user.profile, models.Profile)
class TestProjectModel(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create(
username='taskbuster',
password='django-tutorial'
)
self.profile = self.user.profile
def tearDown(self):
self.user.delete()
def test_validation_color(self):
# This first project uses the default value, #fff
project = models.Project(
user=self.profile,
name='TaskManager'
)
self.assertTrue(project.color == '#fff')
# Validation shouldn't raise an Error
project.full_clean()
# Good color inputs (without Errors):
for color in ['#1ca', '#1256aB']:
project.color = color
project.full_clean()
# Bad color inputs:
for color in ["1cA", "1256aB", "#1", "#12", "#1234",
"#12345", "#1234567"]:
with self.assertRaises(
ValidationError,
msg='%s didn\'t raise a ValidationError' % color
):
project.color = color
project.full_clean()
| 30.645161
| 74
| 0.592632
|
4a17ca64a3b077b1edf8207841b07c94a656d06d
| 308
|
py
|
Python
|
image_processor/__init__.py
|
sushrutm29/EE551-image-processing-website
|
e630ccf68a4566d0c060bf3de31823a2a6fd2473
|
[
"MIT"
] | null | null | null |
image_processor/__init__.py
|
sushrutm29/EE551-image-processing-website
|
e630ccf68a4566d0c060bf3de31823a2a6fd2473
|
[
"MIT"
] | null | null | null |
image_processor/__init__.py
|
sushrutm29/EE551-image-processing-website
|
e630ccf68a4566d0c060bf3de31823a2a6fd2473
|
[
"MIT"
] | null | null | null |
# Course: EE551 Python for Engineer
# Author: Sushrut Madhavi
# Date: 2021/04/25
# Version: 1.0
# Creates the flask app
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = '2ee9596db295364f62dd6aef17a2e3ca'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
from image_processor import routes
| 25.666667
| 61
| 0.779221
|
4a17cb321d3ba9c504dfce040275c061e39abacb
| 33,018
|
py
|
Python
|
pandas/tests/io/test_packers.py
|
MaxVanDeursen/pandas
|
9821b77de692716d7c2b62db1a68cac9ffc456c3
|
[
"BSD-3-Clause"
] | 1
|
2019-03-25T09:31:34.000Z
|
2019-03-25T09:31:34.000Z
|
pandas/tests/io/test_packers.py
|
MaxVanDeursen/pandas
|
9821b77de692716d7c2b62db1a68cac9ffc456c3
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/io/test_packers.py
|
MaxVanDeursen/pandas
|
9821b77de692716d7c2b62db1a68cac9ffc456c3
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from distutils.version import LooseVersion
import glob
import os
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.errors import PerformanceWarning
import pandas
from pandas import (
Categorical, DataFrame, Index, Interval, MultiIndex, NaT, Period, Series,
Timestamp, bdate_range, compat, date_range, period_range)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_categorical_equal, assert_frame_equal, assert_index_equal,
assert_series_equal, ensure_clean)
from pandas.io.packers import read_msgpack, to_msgpack
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
@pytest.fixture(scope='module')
def current_packers_data():
# our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_msgpack_data)
return create_msgpack_data()
@pytest.fixture(scope='module')
def all_packers_data():
# our all of our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_data)
return create_data()
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPackers(object):
def setup_method(self, method):
self.path = '__%s__.msg' % tm.rands(10)
def teardown_method(self, method):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
with open(p, 'wb') as fh:
fh.write(s)
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
msg = (r"Invalid file path or buffer object type: <(class|type)"
r" '{}'>")
with pytest.raises(ValueError, match=msg.format('NoneType')):
read_msgpack(path_or_buf=None)
with pytest.raises(ValueError, match=msg.format('dict')):
read_msgpack(path_or_buf={})
with pytest.raises(ValueError, match=msg.format(r'.*\.A')):
read_msgpack(path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_bool(self):
x = np.bool_(1)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
x = np.bool_(0)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
pytest.skip('numpy can not handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="complex value")
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="numpy complex128")
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
assert (all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), 'foo', np.bool_(1)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_nat(self):
nat_rec = self.encode_decode(NaT)
assert NaT is nat_rec
def test_datetimes(self):
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_periods(self):
# 13463
for i in [Period('2010-09', 'M'), Period('2014-Q1', 'Q')]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_intervals(self):
# 19967
for i in [Interval(0, 1), Interval(0, 1, 'left'),
Interval(10, 25., 'right')]:
i_rec = self.encode_decode(i)
assert i == i_rec
class TestIndex(TestPackers):
def setup_method(self, method):
super(TestIndex, self).setup_method(method)
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
'cat': tm.makeCategoricalIndex(100),
'interval': tm.makeIntervalIndex(100),
'timedelta': tm.makeTimedeltaIndex(100, 'H')
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def categorical_index(self):
# GH15487
df = DataFrame(np.random.randn(10, 2))
df = df.astype({0: 'category'}).set_index(0)
result = self.encode_decode(df)
tm.assert_frame_equal(result, df)
class TestSeries(TestPackers):
def setup_method(self, method):
super(TestSeries, self).setup_method(method)
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
'H': Categorical([1, 2, 3, 4, 5]),
'I': Categorical([1, 2, 3, 4, 5], ordered=True),
'J': (np.bool_(1), 2, 3, 4, 5),
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
self.d['cat_ordered'] = Series(data['H'])
self.d['cat_unordered'] = Series(data['I'])
self.d['numpy_bool_mixed'] = Series(data['J'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setup_method(self, method):
super(TestCategorical, self).setup_method(method)
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestNDFrame(TestPackers):
def setup_method(self, method):
super(TestNDFrame, self).setup_method(method)
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
packed_items = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(packed_items)
check_arbitrary(packed_items, l_rec)
# this is an oddity in that packed lists will be returned as tuples
packed_items = [self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None]
l_rec = self.encode_decode(packed_items)
assert isinstance(l_rec, tuple)
check_arbitrary(packed_items, l_rec)
def test_iterator(self):
packed_items = [self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *packed_items)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, packed_items[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
msg = r"msgpack sparse (series|frame) is not implemented"
with pytest.raises(NotImplementedError, match=msg):
self.encode_decode(obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.loc[3:5, 1:3] = np.nan
s.loc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pandas-dev/pandas/pull/9783
"""
def setup_method(self, method):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super(TestCompression, self).setup_method(method)
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame({k: data[k] for k in ['A', 'A']}),
'int': DataFrame({k: data[k] for k in ['B', 'B']}),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
assert block.values.flags.writeable
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression('zlib')
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression('blosc')
def _test_compression_warns_when_decompress_caches(
self, monkeypatch, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype('float64'): 1.0,
np.dtype('int32'): 1,
np.dtype('object'): 'a',
np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
}
with monkeypatch.context() as m, \
tm.assert_produces_warning(PerformanceWarning) as ws:
m.setattr(compress_module, 'decompress', decompress)
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
assert block.values.flags.writeable
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
assert str(w.message) == ('copying data after decompressing; '
'this may mean that decompress is '
'caching its result')
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
assert buf == control_buf
def test_compression_warns_when_decompress_caches_zlib(self, monkeypatch):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression_warns_when_decompress_caches(
monkeypatch, 'zlib')
def test_compression_warns_when_decompress_caches_blosc(self, monkeypatch):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression_warns_when_decompress_caches(
monkeypatch, 'blosc')
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype='uint8')
with tm.assert_produces_warning(None):
empty_unpacked = self.encode_decode(empty, compress=compress)
tm.assert_numpy_array_equal(empty_unpacked, empty)
assert empty_unpacked.flags.writeable
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
tm.assert_numpy_array_equal(char_unpacked, char)
assert char_unpacked.flags.writeable
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
# we compare the ord of bytes b'a' with unicode u'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
assert ord(b'a') == ord(u'a')
tm.assert_numpy_array_equal(
char_unpacked,
np.array([ord(b'b')], dtype='uint8'),
)
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_small_strings_no_warn('zlib')
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_small_strings_no_warn('blosc')
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='blosc')
assert 1. in self.encode_decode(df2['A'], compress='blosc')
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='zlib')
assert 1. in self.encode_decode(df2['A'], compress='zlib')
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='blosc')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='zlib')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
class TestEncoding(TestPackers):
def setup_method(self, method):
super(TestEncoding, self).setup_method(method)
data = {
'A': ['\u2019'] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame({k: data[k] for k in ['A', 'A']}),
'int': DataFrame({k: data[k] for k in ['B', 'B']}),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in compat.itervalues(self.frame):
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding='utf8')
assert result == expected
result = self.encode_decode(frame)
assert_frame_equal(result, frame)
files = glob.glob(os.path.join(os.path.dirname(__file__), "data",
"legacy_msgpack", "*", "*.msgpack"))
@pytest.fixture(params=files)
def legacy_packer(request, datapath):
return datapath(request.param)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestMsgpack(object):
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
"""
minimum_structure = {'series': ['float', 'int', 'mixed',
'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data, version):
for typ, v in self.minimum_structure.items():
if typ == "panel":
# FIXME: kludge; get this key out of the legacy file
continue
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
msg = '"{0}" not found in data["{1}"]'.format(kind, typ)
assert kind in data[typ], msg
def compare(self, current_data, all_data, vf, version):
# GH12277 encoding default used to be latin-1, now utf-8
if LooseVersion(version) < LooseVersion('0.18.0'):
data = read_msgpack(vf, encoding='latin-1')
else:
data = read_msgpack(vf)
if "panel" in data:
# FIXME: kludge; get the key out of the stored file
del data["panel"]
self.check_min_structure(data, version)
for typ, dv in data.items():
assert typ in all_data, ('unpacked data contains '
'extra key "{0}"'
.format(typ))
for dt, result in dv.items():
assert dt in current_data[typ], ('data["{0}"] contains extra '
'key "{1}"'.format(typ, dt))
try:
expected = current_data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comp_method = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comp_method, None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_msgpacks_legacy(self, current_packers_data, all_packers_data,
legacy_packer, datapath):
version = os.path.basename(os.path.dirname(legacy_packer))
# GH12142 0.17 files packed in P2 can't be read in P3
if (version.startswith('0.17.') and
legacy_packer.split('.')[-4][-1] == '2'):
msg = "Files packed in Py2 can't be read in Py3 ({})"
pytest.skip(msg.format(version))
try:
with catch_warnings(record=True):
self.compare(current_packers_data, all_packers_data,
legacy_packer, version)
except ImportError:
# blosc not installed
pass
def test_msgpack_period_freq(self):
# https://github.com/pandas-dev/pandas/issues/24135
s = Series(np.random.rand(5), index=date_range('20130101', periods=5))
r = read_msgpack(s.to_msgpack())
repr(r)
| 34.792413
| 79
| 0.575383
|
4a17cbd18c04c909411b7cea9f9cc00e05569f17
| 48,395
|
py
|
Python
|
chi/_problems.py
|
Lethay/chi
|
2182d8f9d54878b3f27670b157593f5a3bf9df4a
|
[
"BSD-3-Clause"
] | null | null | null |
chi/_problems.py
|
Lethay/chi
|
2182d8f9d54878b3f27670b157593f5a3bf9df4a
|
[
"BSD-3-Clause"
] | null | null | null |
chi/_problems.py
|
Lethay/chi
|
2182d8f9d54878b3f27670b157593f5a3bf9df4a
|
[
"BSD-3-Clause"
] | null | null | null |
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
# The InverseProblem class is based on the SingleOutputProblem and
# MultiOutputProblem classes of PINTS (https://github.com/pints-team/pints/),
# which is distributed under the BSD 3-clause license.
#
import copy
from warnings import warn
import myokit
import numpy as np
import pandas as pd
import pints
import chi
class InverseProblem(object):
"""
Represents an inference problem where a model is fit to a
one-dimensional or multi-dimensional time series, such as measured in a
PKPD study.
Parameters
----------
model
An instance of a :class:`MechanisticModel`.
times
A sequence of points in time. Must be non-negative and increasing.
values
A sequence of single- or multi-valued measurements. Must have shape
``(n_times, n_outputs)``, where ``n_times`` is the number of points in
``times`` and ``n_outputs`` is the number of outputs in the model. For
``n_outputs = 1``, the data can also have shape ``(n_times, )``.
"""
def __init__(self, model, times, values):
# Check model
if not isinstance(model, chi.MechanisticModel):
raise ValueError(
'Model has to be an instance of a chi.Model.'
)
self._model = model
# Check times, copy so that they can no longer be changed and set them
# to read-only
self._times = pints.vector(times)
if np.any(self._times < 0):
raise ValueError('Times cannot be negative.')
if np.any(self._times[:-1] > self._times[1:]):
raise ValueError('Times must be increasing.')
# Check values, copy so that they can no longer be changed
values = np.asarray(values)
if values.ndim == 1:
np.expand_dims(values, axis=1)
self._values = pints.matrix2d(values)
# Check dimensions
self._n_parameters = int(model.n_parameters())
self._n_outputs = int(model.n_outputs())
self._n_times = len(self._times)
# Check for correct shape
if self._values.shape != (self._n_times, self._n_outputs):
raise ValueError(
'Values array must have shape `(n_times, n_outputs)`.')
def evaluate(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values as a NumPy array of shape ``(n_times, n_outputs)``.
"""
output = self._model.simulate(parameters, self._times)
# The chi.Model.simulate method returns the model output as
# (n_outputs, n_times). We therefore need to transponse the result.
return output.transpose()
def evaluateS1(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values.
The returned data is a tuple of NumPy arrays ``(y, y')``, where ``y``
has shape ``(n_times, n_outputs)``, while ``y'`` has shape
``(n_times, n_outputs, n_parameters)``.
*This method only works for problems whose model implements the
:class:`ForwardModelS1` interface.*
"""
raise NotImplementedError
def n_outputs(self):
"""
Returns the number of outputs for this problem.
"""
return self._n_outputs
def n_parameters(self):
"""
Returns the dimension (the number of parameters) of this problem.
"""
return self._n_parameters
def n_times(self):
"""
Returns the number of sampling points, i.e. the length of the vectors
returned by :meth:`times()` and :meth:`values()`.
"""
return self._n_times
def times(self):
"""
Returns this problem's times.
The returned value is a read-only NumPy array of shape
``(n_times, n_outputs)``, where ``n_times`` is the number of time
points and ``n_outputs`` is the number of outputs.
"""
return self._times
def values(self):
"""
Returns this problem's values.
The returned value is a read-only NumPy array of shape
``(n_times, n_outputs)``, where ``n_times`` is the number of time
points and ``n_outputs`` is the number of outputs.
"""
return self._values
class ProblemModellingController(object):
"""
A problem modelling controller which simplifies the model building process
of a pharmacokinetic and pharmacodynamic problem.
The class is instantiated with an instance of a :class:`MechanisticModel`
and one instance of an :class:`ErrorModel` for each mechanistic model
output.
:param mechanistic_model: A mechanistic model for the problem.
:type mechanistic_model: MechanisticModel
:param error_models: A list of error models. One error model has to be
provided for each mechanistic model output.
:type error_models: list[ErrorModel]
:param outputs: A list of mechanistic model output names, which can be used
to map the error models to mechanistic model outputs. If ``None``, the
error models are assumed to be ordered in the same order as
:meth:`MechanisticModel.outputs`.
:type outputs: list[str], optional
"""
def __init__(self, mechanistic_model, error_models, outputs=None):
super(ProblemModellingController, self).__init__()
# Check inputs
if not isinstance(mechanistic_model, chi.MechanisticModel) and chi.MechanisticModel not in type(mechanistic_model).__mro__:
raise TypeError(
'The mechanistic model has to be an instance of a '
'chi.MechanisticModel.')
if not isinstance(error_models, list):
error_models = [error_models]
for error_model in error_models:
if not isinstance(error_model, chi.ErrorModel):
raise TypeError(
'Error models have to be instances of a '
'chi.ErrorModel.')
# Copy mechanistic model
mechanistic_model = copy.deepcopy(mechanistic_model)
# Set outputs
if outputs is not None:
mechanistic_model.set_outputs(outputs)
# Get number of outputs
n_outputs = mechanistic_model.n_outputs()
if len(error_models) != n_outputs:
raise ValueError(
'Wrong number of error models. One error model has to be '
'provided for each mechanistic error model.')
# Copy error models
error_models = [copy.copy(error_model) for error_model in error_models]
# Remember models
self._mechanistic_model = mechanistic_model
self._error_models = error_models
# Set defaults
self._population_models = None
self._log_prior = None
self._data = None
self._dataErr = None
self._dosing_regimens = None
self._individual_fixed_param_dict = None
# Set error models to un-normalised by default
self.set_normalised_error_models(False)
# Set parameter names and number of parameters
self._set_error_model_parameter_names()
self._n_parameters, self._parameter_names = \
self._get_number_and_parameter_names()
def _clean_data(self, data, dose_key, dose_duration_key):
"""
Makes sure that the data is formated properly.
1. ids are strings
2. time are numerics or NaN
3. biomarkers are strings
4. measurements are numerics or NaN
5. dose are numerics or NaN
6. duration are numerics or NaN
"""
# Create container for data
columns = [
self._id_key, self._time_key, self._biom_key, self._meas_key]
if dose_key is not None:
columns += [dose_key]
if dose_duration_key is not None:
columns += [dose_duration_key]
cleanData = pd.DataFrame(columns=columns)
# Convert IDs to strings
cleanData[self._id_key] = data[self._id_key].astype(
"string")
# Convert times to numerics
cleanData[self._time_key] = pd.to_numeric(data[self._time_key])
# Convert biomarkers to strings
cleanData[self._biom_key] = data[self._biom_key].astype(
"string")
# Convert measurements to numerics
cleanData[self._meas_key] = pd.to_numeric(data[self._meas_key])
# Convert dose to numerics
if dose_key is not None:
cleanData[dose_key] = pd.to_numeric(
data[dose_key])
# Convert duration to numerics
if dose_duration_key is not None:
cleanData[dose_duration_key] = pd.to_numeric(
data[dose_duration_key])
return cleanData
def _create_log_likelihoods(self, individual):
"""
Returns a list of log-likelihoods, one for each individual in the
dataset.
"""
# Get IDs
ids = self._ids
if individual is not None:
ids = [individual]
# Create a likelihood for each individual
log_likelihoods = []
for individual in ids:
# Set dosing regimen
try:
self._mechanistic_model.simulator.set_protocol(
self._dosing_regimens[individual])
except TypeError:
# TypeError is raised when applied regimens is still None,
# i.e. no doses were defined by the datasets.
pass
#Set individually fixed parameters
if self._individual_fixed_param_dict is not None and len(self._individual_fixed_param_dict)>0:
try:
mechanistic_model = self._mechanistic_model.copy()
mechanistic_model.fix_parameters(self._individual_fixed_param_dict[individual])
except TypeError:
pass
else:
mechanistic_model = self._mechanistic_model
log_likelihood = self._create_log_likelihood(individual, mechanistic_model)
if log_likelihood is not None:
# If data exists for this individual, append to log-likelihoods
log_likelihoods.append(log_likelihood)
return log_likelihoods
def _create_log_likelihood(self, individual, mechanistic_model=None):
"""
Gets the relevant data for the individual and returns the resulting
chi.LogLikelihood.
"""
#get mechanistic_model
if mechanistic_model is None:
mechanistic_model = self._mechanistic_model
# Flag for considering errors, too
haveErrors = self._dataErr is not None
# Get individuals data
times = []
observations = []
observationErrors = []
mask = self._data[self._id_key] == individual
data = self._data[mask][
[self._time_key, self._biom_key, self._meas_key]]
# Get individual measurement errors on the data
if haveErrors:
maskE = self._dataErr[self._id_key] == individual
assert (np.asarray(mask)==np.asarray(maskE)).all()
dataErr = self._dataErr[mask][
[self._time_key, self._biom_key, self._meas_key]]
for output in mechanistic_model.outputs():
# Mask data for biomarker
biomarker = self._output_biomarker_dict[output]
mask = data[self._biom_key] == biomarker
temp_df = data[mask]
if haveErrors:
maskE = dataErr[self._biom_key] == biomarker
assert (np.asarray(mask)==np.asarray(maskE)).all()
temp_ef = dataErr[mask]
# Filter observations for non-NaN entries
mask = temp_df[self._meas_key].notnull()
temp_df = temp_df[[self._time_key, self._meas_key]][mask]
if haveErrors:
maskE = temp_ef[self._meas_key].notnull()
assert (np.asarray(mask)==np.asarray(maskE)).all()
temp_ef = temp_ef[[self._time_key, self._meas_key]][mask]
# Filter times for non-NaN entries
mask = temp_df[self._time_key].notnull()
temp_df = temp_df[mask]
if haveErrors:
maskE = temp_ef[self._time_key].notnull()
assert (np.asarray(mask)==np.asarray(maskE)).all()
temp_ef = temp_ef[mask]
# Collect data for output
times.append(temp_df[self._time_key].to_numpy())
observations.append(temp_df[self._meas_key].to_numpy())
if haveErrors:
observationErrors.append(temp_ef[self._meas_key].to_numpy())
# Count outputs that were measured
# TODO: copy mechanistic model and update model outputs.
# (Useful for e.g. control group and dose group training)
n_measured_outputs = 0
for output_measurements in observations:
if len(output_measurements) > 0:
n_measured_outputs += 1
# If no outputs were measured, do not construct a likelihood
if n_measured_outputs == 0:
return None
# Create log-likelihood and set ID to individual
if haveErrors:
for model_id, error_model in enumerate(self._error_models):
isMeas = isinstance(error_model, (
chi.ErrorModelWithMeasuringErrors, chi.ReducedErrorModelWithMeasuringErrors))
if not isMeas:
if isinstance(error_model, chi.ReducedErrorModel):
self._error_models[model_id] = \
chi.ReducedErrorModelWithMeasuringErrors.init_from_reduced_error_model(error_model)
else:
self._error_models[model_id] = chi.return_measuring_error_model_from_error_model(error_model)
log_likelihood = chi.LogLikelihoodWithMeasuringErrors(
mechanistic_model, self._error_models, observations, observationErrors, times)
else:
log_likelihood = chi.LogLikelihood(
mechanistic_model, self._error_models, observations, times)
log_likelihood.set_id(individual)
return log_likelihood
def _initialise_individual_fixed_params(self):
"""
Initialises a dictionary to contain parameters that are fixed for each patient parameter.
"""
fixedParams = dict()
for label in self._ids:
fixedParams = dict()
return fixedParams
def _extract_dosing_regimens(self, dose_key, duration_key):
"""
Converts the dosing regimens defined by the pandas.DataFrame into
myokit.Protocols, and returns them as a dictionary with individual
IDs as keys, and regimens as values.
For each dose entry in the dataframe a dose event is added
to the myokit.Protocol. If the duration of the dose is not provided
a bolus dose of duration 0.01 time units is assumed.
"""
# Create duration column if it doesn't exist and set it to default
# bolus duration of 0.01
if duration_key is None:
duration_key = 'Duration in base time unit'
self._data[duration_key] = 0.01
# Extract regimen from dataset
regimens = dict()
for label in self._ids:
# Filter times and dose events for non-NaN entries
mask = self._data[self._id_key] == label
data = self._data[
[self._time_key, dose_key, duration_key]][mask]
mask = data[dose_key].notnull()
data = data[mask]
mask = data[self._time_key].notnull()
data = data[mask]
# Add dose events to dosing regimen
regimen = myokit.Protocol()
for _, row in data.iterrows():
# Set duration
duration = row[duration_key]
if np.isnan(duration):
# If duration is not provided, we assume a bolus dose
# which we approximate by 0.01 time_units.
duration = 0.01
# Compute dose rate and set regimen
dose_rate = row[dose_key] / duration
time = row[self._time_key]
regimen.add(myokit.ProtocolEvent(dose_rate, time, duration))
regimens[label] = regimen
return regimens
def _get_number_and_parameter_names(
self, exclude_pop_model=False, exclude_bottom_level=False):
"""
Returns the number and names of the log-likelihood.
The parameters of the HierarchicalLogLikelihood depend on the
data, and the population model. So unless both are set, the
parameters will reflect the parameters of the individual
log-likelihoods.
"""
# Get mechanistic model parameters
parameter_names = self._mechanistic_model.parameters()
# Get error model parameters
for error_model in self._error_models:
parameter_names += error_model.get_parameter_names()
# Stop here if population model is excluded or isn't set
if (self._population_models is None) or (
exclude_pop_model):
# Get number of parameters
n_parameters = len(parameter_names)
return (n_parameters, parameter_names)
# Set default number of individuals
n_ids = 0
if self._data is not None:
n_ids = len(self._ids)
# Construct population parameter names
pop_parameter_names = []
for param_id, pop_model in enumerate(self._population_models):
# Get mechanistic/error model parameter name
name = parameter_names[param_id]
# Add names for individual parameters
n_indiv, _ = pop_model.n_hierarchical_parameters(n_ids)
if (n_indiv > 0):
# If individual parameters are relevant for the hierarchical
# model, append them
indiv_names = ['ID %s: %s' % (n, name) for n in self._ids]
pop_parameter_names += indiv_names
# Add population-level parameters
if pop_model.n_parameters() > 0:
# pop_names = ["%s %s" % (name, pnam) for pnam in pop_model.get_parameter_names()]
pop_parameter_names += pop_model.get_parameter_names()
# Return only top-level parameters, if bottom is excluded
if exclude_bottom_level:
# Filter bottom-level
start = 0
parameter_names = []
for param_id, pop_model in enumerate(self._population_models):
n_indiv, n_pop = pop_model.n_hierarchical_parameters(n_ids)
# If heterogenous or uniform population model,
# individuals count as top-level
if chi.is_heterogeneous_or_uniform_model(pop_model):
end = start + n_indiv + n_pop
else:
# Otherwise, we skip individuals
start += n_indiv
end = start + n_pop
# Add population parameters
parameter_names += pop_parameter_names[start:end]
# Shift start index
start = end
# Get number of parameters
n_parameters = len(parameter_names)
return (n_parameters, parameter_names)
# Get number of parameters
n_parameters = len(pop_parameter_names)
return (n_parameters, pop_parameter_names)
def _set_error_model_parameter_names(self):
"""
Resets the error model parameter names and prepends the output name
if more than one output exists.
"""
# Reset error model parameter names to defaults
for error_model in self._error_models:
error_model.set_parameter_names(None)
# Rename error model parameters, if more than one output
n_outputs = self._mechanistic_model.n_outputs()
if n_outputs > 1:
# Get output names
outputs = self._mechanistic_model.outputs()
for output_id, error_model in enumerate(self._error_models):
# Get original parameter names
names = error_model.get_parameter_names()
# Prepend output name
output = outputs[output_id]
names = [output + ' ' + name for name in names]
# Set new parameter names
error_model.set_parameter_names(names)
def _set_population_model_parameter_names(self):
"""
Resets the population model parameter names and appends the individual
parameter names.
"""
# Get individual parameter names
parameter_names = self.get_parameter_names(exclude_pop_model=True)
# Construct population parameter names
for param_id, pop_model in enumerate(self._population_models):
# Get mechanistic/error model parameter name
name = parameter_names[param_id]
# Create names for population-level parameters
if pop_model.n_parameters() > 0:
# Get original parameter names
pop_model.set_parameter_names()
pop_names = pop_model.get_parameter_names()
# Append individual names and rename population model
# parameters
names = [
'%s %s' % (pop_prefix, name) for pop_prefix in pop_names]
pop_model.set_parameter_names(names)
def fix_parameters(self, name_value_dict):
"""
Fixes the value of model parameters, and effectively removes them as a
parameter from the model. Fixing the value of a parameter at ``None``,
sets the parameter free again.
.. note::
1. Fixing model parameters resets the log-prior to ``None``.
2. Once a population model is set, only population model
parameters can be fixed.
:param name_value_dict: A dictionary with model parameters as keys, and
the value to be fixed at as values.
:type name_value_dict: dict
"""
# Check type of dictionanry
try:
name_value_dict = dict(name_value_dict)
except (TypeError, ValueError):
raise ValueError(
'The name-value dictionary has to be convertable to a python '
'dictionary.')
#Find parameters that are fixed for individuals, rather than fixed with one value for all
valuesWithLen = {k: v for k, v in name_value_dict.items() if hasattr(v, "__len__")}
if len(valuesWithLen)>0:
assert all([len(v)==len(self._ids) for v in valuesWithLen.values()])
if self._individual_fixed_param_dict is None:
self._individual_fixed_param_dict = self._initialise_individual_fixed_params()
for i, _id in enumerate(self._ids):
self._individual_fixed_param_dict[_id] = {k: v[i] for k, v in valuesWithLen.items()}
# If a population model is set, fix only population parameters
if self._population_models is not None:
pop_models = self._population_models
# Convert models to reduced models
for model_id, pop_model in enumerate(pop_models):
if not isinstance(pop_model, chi.ReducedPopulationModel):
pop_models[model_id] = chi.ReducedPopulationModel(
pop_model)
# Fix parameters
for pop_model in pop_models:
pop_model.fix_parameters(name_value_dict)
# If no parameters are fixed, get original model back
for model_id, pop_model in enumerate(pop_models):
if pop_model.n_fixed_parameters() == 0:
pop_model = pop_model.get_population_model()
pop_models[model_id] = pop_model
# Safe reduced models and reset priors
self._population_models = pop_models
self._log_prior = None
# Update names and number of parameters
self._n_parameters, self._parameter_names = \
self._get_number_and_parameter_names()
# Stop here
# (individual parameters cannot be fixed when pop model is set)
return None
# Get submodels
mechanistic_model = self._mechanistic_model
error_models = self._error_models
# Convert models to reduced models
if not isinstance(mechanistic_model, chi.ReducedMechanisticModel):
mechanistic_model = chi.ReducedMechanisticModel(mechanistic_model)
for model_id, error_model in enumerate(error_models):
if not isinstance(error_model, chi.ReducedErrorModel):
if isinstance(error_model, chi.ErrorModelWithMeasuringErrors):
error_models[model_id] = chi.ReducedErrorModelWithMeasuringErrors(error_model)
else:
error_models[model_id] = chi.ReducedErrorModel(error_model)
# Fix model parameters
mechanistic_model.fix_parameters(name_value_dict)
for error_model in error_models:
error_model.fix_parameters(name_value_dict)
# If no parameters are fixed, get original model back
if mechanistic_model.n_fixed_parameters() == 0:
mechanistic_model = mechanistic_model.mechanistic_model()
self._individual_fixed_param_dict = self._initialise_individual_fixed_params()
for model_id, error_model in enumerate(error_models):
if error_model.n_fixed_parameters() == 0:
error_model = error_model.get_error_model()
error_models[model_id] = error_model
# Save reduced models and reset priors
self._mechanistic_model = mechanistic_model
self._error_models = error_models
self._log_prior = None
# Update names and number of parameters
self._n_parameters, self._parameter_names = \
self._get_number_and_parameter_names()
def get_dosing_regimens(self):
"""
Returns a dictionary of dosing regimens in form of
:class:`myokit.Protocol` instances.
The dosing regimens are extracted from the dataset if a dose key is
provided. If no dose key is provided ``None`` is returned.
"""
return self._dosing_regimens
def get_log_prior(self):
"""
Returns the :class:`LogPrior` for the model parameters. If no
log-prior is set, ``None`` is returned.
"""
return self._log_prior
def get_log_posterior(self, individual=None, prior_is_id_specific=False):
r"""
Returns the :class:`LogPosterior` defined by the observed biomarkers,
the administered dosing regimen, the mechanistic model, the error
model, the log-prior, and optionally the population model and the
fixed model parameters.
If measurements of multiple individuals exist in the dataset, the
indiviudals ID can be passed to return the log-posterior associated
to that individual. If no ID is selected and no population model
has been set, a list of log-posteriors is returned correspodning to
each of the individuals.
This method raises an error if the data or the log-prior has not been
set. See :meth:`set_data` and :meth:`set_log_prior`.
.. note::
When a population model has been set, individual log-posteriors
can no longer be selected and ``individual`` is ignored.
:param individual: The ID of an individual. If ``None`` the
log-posteriors for all individuals is returned.
:type individual: str | None, optional
:param prior_is_id_specific: If True and this is a population model,
then the resulting log_prior will be a list of priors for each ID.
:type prior_is_id_specific: bool, optional
"""
# Check prerequesites
if self._log_prior is None:
raise ValueError(
'The log-prior has not been set.')
# Make sure individual is None, when population model is set
_id = individual if self._population_models is None else None
# Check that individual is in ids
if (_id is not None) and (_id not in self._ids):
raise ValueError(
'The individual cannot be found in the ID column of the '
'dataset.')
#Ignore prior_is_id_specific if this is not a population model
if self._population_models is None:
prior_is_id_specific = False
# Create log-likelihoods
log_likelihoods = self._create_log_likelihoods(_id)
log_priors = self._log_prior
if self._population_models is not None:
# Compose HierarchicalLogLikelihoods
log_likelihoods = [chi.HierarchicalLogLikelihood(
log_likelihoods, self._population_models,
id_key=self._id_key, time_key=self._time_key, biom_key=self._biom_key, meas_key=self._meas_key
)]
if prior_is_id_specific:
log_priors = chi.IDSpecificLogPrior([
log_priors for i in self._ids], self._population_models, self._ids)
# Compose the log-posteriors
log_posteriors = []
for log_likelihood in log_likelihoods:
# Create individual posterior
if isinstance(log_likelihood, chi.LogLikelihood):
log_posterior = chi.LogPosterior(
log_likelihood, log_priors)
# Create hierarchical posterior
elif isinstance(log_likelihood, chi.HierarchicalLogLikelihood):
log_posterior = chi.HierarchicalLogPosterior(
log_likelihood, log_priors)
# Append to list
log_posteriors.append(log_posterior)
# If only one log-posterior in list, unwrap the list
if len(log_posteriors) == 1:
return log_posteriors.pop()
return log_posteriors
def get_n_parameters(
self, exclude_pop_model=False, exclude_bottom_level=False):
"""
Returns the number of model parameters, i.e. the combined number of
parameters from the mechanistic model, the error model and, if set,
the population model.
Any parameters that have been fixed to a constant value will not be
included in the number of model parameters.
:param exclude_pop_model: A boolean flag which can be used to obtain
the number of parameters as if the population model wasn't set.
:type exclude_pop_model: bool, optional
:param exclude_bottom_level: A boolean flag which can be used to
exclude the bottom-level parameters. This only has an effect when
a population model is set.
:type exclude_bottom_level: bool, optional
"""
if exclude_pop_model:
n_parameters, _ = self._get_number_and_parameter_names(
exclude_pop_model=True)
return n_parameters
if exclude_bottom_level:
n_parameters, _ = self._get_number_and_parameter_names(
exclude_bottom_level=True)
return n_parameters
return self._n_parameters
def get_parameter_names(
self, exclude_pop_model=False, exclude_bottom_level=False):
"""
Returns the names of the model parameters, i.e. the parameter names
of the mechanistic model, the error model and, if set, the
population model.
Any parameters that have been fixed to a constant value will not be
included in the list of model parameters.
:param exclude_pop_model: A boolean flag which can be used to obtain
the parameter names as if the population model wasn't set.
:type exclude_pop_model: bool, optional
:param exclude_bottom_level: A boolean flag which can be used to
exclude the bottom-level parameters. This only has an effect when
a population model is set.
:type exclude_bottom_level: bool, optional
"""
if exclude_pop_model:
_, parameter_names = self._get_number_and_parameter_names(
exclude_pop_model=True)
return copy.copy(parameter_names)
if exclude_bottom_level:
_, parameter_names = self._get_number_and_parameter_names(
exclude_bottom_level=True)
return parameter_names
return copy.copy(self._parameter_names)
def get_predictive_model(self, exclude_pop_model=False, individual=None):
"""
Returns the :class:`PredictiveModel` defined by the mechanistic model,
the error model, and optionally the population model and the
fixed model parameters.
:param exclude_pop_model: A boolean flag which can be used to obtain
the predictive model as if the population model wasn't set.
:type exclude_pop_model: bool, optional
"""
#Check if no population model has been set, or is excluded
no_population_model = (self._population_models is None) or (exclude_pop_model)
#Check if we have individual-specific fixed parameters
sifpd = self._individual_fixed_param_dict
if sifpd is not None and len(sifpd)>0 and no_population_model:
if individual is None:
warn(UserWarning(
"No individual given for predictive model, but individual-specific fixed parameters exist."))
mechanistic_model = self._mechanistic_model
else:
mechanistic_model = self._mechanistic_model.copy()
mechanistic_model.fix_parameters(sifpd[individual])
else:
mechanistic_model = self._mechanistic_model
# Create predictive model
predictive_model = chi.PredictiveModel(
mechanistic_model, self._error_models)
# Return if no population model has been set, or is excluded
if no_population_model:
return predictive_model
# Create predictive population model
#TODO: Check that all of the _population_models have the right individual fixed parameters
predictive_model = chi.PredictivePopulationModel(
predictive_model, self._population_models, IDs=self._ids)
return predictive_model
def set_data(
self, data, dataErr=None, output_biomarker_dict=None, id_key='ID',
time_key='Time', biom_key='Biomarker', meas_key='Measurement',
dose_key='Dose', dose_duration_key='Duration'):
"""
Sets the data of the modelling problem.
The data contains information about the measurement time points, the
observed biomarker values, the type of biomarkers, IDs to
identify the corresponding individuals, and optionally information
on the administered dose amount and duration.
The data is expected to be in form of a :class:`pandas.DataFrame`
with the columns ID | Time | Biomarker | Measurement | Dose |
Duration.
If no dose or duration information exists, the corresponding column
keys can be set to ``None``.
:param data: A dataframe with an ID, time, biomarker,
measurement and optionally a dose and duration column.
:type data: pandas.DataFrame
:param dataErr: A dataframe with entries labelled as in data,
but whose measurement column gives the measuring error
of entries in data.
:type dataErr: pandas.DataFrame
:param output_biomarker_dict: A dictionary with mechanistic model
output names as keys and dataframe biomarker names as values. If
``None`` the model outputs and biomarkers are assumed to have the
same names.
:type output_biomarker_dict: dict, optional
:param id_key: The key of the ID column in the
:class:`pandas.DataFrame`. Default is `'ID'`.
:type id_key: str, optional
:param time_key: The key of the time column in the
:class:`pandas.DataFrame`. Default is `'ID'`.
:type time_key: str, optional
:param biom_key: The key of the biomarker column in the
:class:`pandas.DataFrame`. Default is `'Biomarker'`.
:type biom_key: str, optional
:param meas_key: The key of the measurement column in the
:class:`pandas.DataFrame`. Default is `'Measurement'`.
:type meas_key: str, optional
:param dose_key: The key of the dose column in the
:class:`pandas.DataFrame`. Default is `'Dose'`.
:type dose_key: str, optional
:param dose_duration_key: The key of the duration column in the
:class:`pandas.DataFrame`. Default is `'Duration'`.
:type dose_duration_key: str, optional
"""
# Check if we need to store data errors, too
haveDataErrors = dataErr is not None
# Check input format
if not isinstance(data, pd.DataFrame):
raise TypeError(
'Data has to be a pandas.DataFrame.')
if haveDataErrors and not isinstance(dataErr, pd.DataFrame):
raise TypeError(
'Data errors have to be a pandas.DataFrame.')
# If model does not support dose administration, set dose keys to None
mechanistic_model = self._mechanistic_model
if isinstance(self._mechanistic_model, chi.ReducedMechanisticModel):
mechanistic_model = self._mechanistic_model.mechanistic_model()
if isinstance(mechanistic_model, chi.PharmacodynamicModel):
dose_key = None
dose_duration_key = None
keys = [id_key, time_key, biom_key, meas_key]
if dose_key is not None:
keys += [dose_key]
if dose_duration_key is not None:
keys += [dose_duration_key]
for key in keys:
if key not in data.keys():
raise ValueError(
'Data does not have the key <' + str(key) + '>.')
if haveDataErrors and key not in dataErr.keys():
raise ValueError(
'DataErr does not have the key <' + str(key) + '>.')
# Get default output-biomarker map
outputs = self._mechanistic_model.outputs()
biomarkers = data[biom_key].dropna().unique()
if haveDataErrors:
biomarkersE = dataErr[biom_key].dropna().unique()
assert (biomarkers==biomarkersE).all()
if output_biomarker_dict is None:
if (len(outputs) == 1) and (len(biomarkers) == 1):
# Create map of single output to single biomarker
output_biomarker_dict = {outputs[0]: biomarkers[0]}
else:
# Assume trivial map
output_biomarker_dict = {output: output for output in outputs}
# Check that output-biomarker map is valid
for output in outputs:
if output not in list(output_biomarker_dict.keys()):
raise ValueError(
'The output <' + str(output) + '> could not be identified '
'in the output-biomarker map.')
biomarker = output_biomarker_dict[output]
if biomarker not in biomarkers:
raise ValueError(
'The biomarker <' + str(biomarker) + '> could not be '
'identified in the dataframe.')
self._id_key, self._time_key, self._biom_key, self._meas_key = [
id_key, time_key, biom_key, meas_key]
self._data = data[keys]
self._output_biomarker_dict = output_biomarker_dict
# Make sure data is formatted correctly
self._data = self._clean_data(self._data, dose_key, dose_duration_key)
self._ids = self._data[self._id_key].unique()
#Do the same thing to data errors
if haveDataErrors:
self._dataErr = dataErr[keys]
self._dataErr = self._clean_data(self._dataErr, dose_key, dose_duration_key)
err_ids = self._data[self._id_key].unique()
assert (self._ids == err_ids).all()
# Extract dosing regimens
self._dosing_regimens = None
if dose_key is not None:
self._dosing_regimens = self._extract_dosing_regimens(
dose_key, dose_duration_key)
# Update number and names of parameters
self._n_parameters, self._parameter_names = \
self._get_number_and_parameter_names()
def set_log_prior(self, log_priors, parameter_names=None, prior_is_id_specific=False):
"""
Sets the log-prior probability distribution of the model parameters.
By default the log-priors are assumed to be ordered according to
:meth:`get_parameter_names`. Alternatively, the mapping of the
log-priors can be specified explicitly with the input argument
``param_names``.
If a population model has not been set, the provided log-prior is used
for all individuals.
.. note::
This method requires that the data has been set, since the number
of parameters of an hierarchical log-posterior vary with the number
of individuals in the dataset.
:param log_priors: A list of :class:`pints.LogPrior` of the length
:meth:`get_n_parameters`.
:type log_priors: list[pints.LogPrior]
:param parameter_names: A list of model parameter names, which is used
to map the log-priors to the model parameters. If ``None`` the
log-priors are assumed to be ordered according to
:meth:`get_parameter_names`.
:type parameter_names: list[str], optional
:param prior_is_id_specific: If True and this is a population model,
then the resulting log_prior will be a list of priors for each ID.
:type prior_is_id_specific: bool, optional
"""
# Check prerequesites
if self._data is None:
raise ValueError('The data has not been set.')
# Check inputs
for log_prior in log_priors:
if not isinstance(log_prior, pints.LogPrior):
raise ValueError(
'All marginal log-priors have to be instances of a '
'pints.LogPrior.')
expected_n_parameters = self.get_n_parameters(
exclude_pop_model=prior_is_id_specific, exclude_bottom_level=not prior_is_id_specific)
if len(log_priors) != expected_n_parameters:
raise ValueError(
'One marginal log-prior has to be provided for each '
'parameter.There are <' + str(expected_n_parameters) + '> model '
'parameters.')
n_parameters = 0
for log_prior in log_priors:
n_parameters += log_prior.n_parameters()
if n_parameters != expected_n_parameters:
raise ValueError(
'The joint log-prior does not match the dimensionality of the '
'problem. At least one of the marginal log-priors appears to '
'be multivariate.')
if parameter_names is not None:
model_names = self.get_parameter_names(
exclude_pop_model=prior_is_id_specific, exclude_bottom_level=not prior_is_id_specific)
if sorted(list(parameter_names)) != sorted(model_names):
raise ValueError(
'The specified parameter names do not match the model '
'parameter names.')
# Sort log-priors according to parameter names
ordered = []
for name in model_names:
index = parameter_names.index(name)
ordered.append(log_priors[index])
log_priors = ordered
self._log_prior = pints.ComposedLogPrior(*log_priors)
def set_normalised_error_models(self, value):
"""
Makes all error functions divide likelihoods by the mean log observation before returning.
:param value: A boolean.
"""
self._normalised_error_models = value
for em in self._error_models:
em.set_normalised_log_likelihood(value)
def set_population_model(self, pop_models, parameter_names=None):
"""
Sets the population model of the modelling problem.
A population model specifies how model parameters vary across
individuals. The population model is defined by a list of
:class:`PopulationModel` instances, one for each individual model
parameter.
.. note::
Setting a population model resets the log-prior to ``None``.
:param pop_models: A list of :class:`PopulationModel` instances of
the same length as the number of individual model parameters, see
:meth:`get_n_parameters` with ``exclude_pop_model=True``.
:type pop_models: list[PopulationModel]
:param parameter_names: A list of model parameter names, which can be
used to map the population models to model parameters. If ``None``,
the population models are assumed to be ordered in the same way as
the model parameters, see
:meth:`get_parameter_names` with ``exclude_pop_model=True``.
:type parameter_names: list[str], optional
"""
# Check inputs
for pop_model in pop_models:
if not isinstance(pop_model, chi.PopulationModel):
raise TypeError(
'The population models have to be an instance of a '
'chi.PopulationModel.')
# Get individual parameter names
n_parameters, param_names = self._get_number_and_parameter_names(
exclude_pop_model=True)
# Make sure that each parameter is assigned to a population model
if len(pop_models) != n_parameters:
raise ValueError(
'The number of population models does not match the number of '
'model parameters. Exactly one population model has to be '
'provided for each parameter. There are '
'<' + str(n_parameters) + '> model parameters.')
if (parameter_names is not None) and (
sorted(parameter_names) != sorted(param_names)):
raise ValueError(
'The parameter names do not coincide with the model parameter '
'names.')
# Sort inputs according to `params`
if parameter_names is not None:
# Create default population model container
ordered_pop_models = []
# Map population models according to parameter names
for name in param_names:
index = parameter_names.index(name)
ordered_pop_models.append(pop_models[index])
pop_models = ordered_pop_models
# Set data within each pop_model that needs it
for pop_model in pop_models:
if isinstance(pop_model, chi.KolmogorovSmirnovPopulationModel):
pop_model.create_observation_CDF(
self._data, time_key=self._time_key, biom_key=self._biom_key, meas_key=self._meas_key)
# Save individual parameter names and population models
self._population_models = copy.copy(pop_models)
# Update parameter names and number of parameters
self._set_population_model_parameter_names()
self._n_parameters, self._parameter_names = \
self._get_number_and_parameter_names()
# Set prior to default
self._log_prior = None
| 41.082343
| 131
| 0.625767
|
4a17cbf21e85ff1225e543c9acfa22a00883cb53
| 1,194
|
py
|
Python
|
statefulGenerator.py
|
AshTiwari/Python-Fundamentals
|
392ba4fea3c3a1a5eccc9852c84951ed4f83763d
|
[
"Adobe-Glyph"
] | null | null | null |
statefulGenerator.py
|
AshTiwari/Python-Fundamentals
|
392ba4fea3c3a1a5eccc9852c84951ed4f83763d
|
[
"Adobe-Glyph"
] | null | null | null |
statefulGenerator.py
|
AshTiwari/Python-Fundamentals
|
392ba4fea3c3a1a5eccc9852c84951ed4f83763d
|
[
"Adobe-Glyph"
] | null | null | null |
''' Stateful Generator Function '''
def firstN(n, iterable):
''' This function returns the first `n` items of the `iterable`.
It does a `lazy-evaluation`.
Generator Function maintains the value of the variable `counter` apart from `item`
to use it when the function resumes for the next evaluation. '''
counter = 0
for item in iterable:
if counter == n:
return
counter += 1
yield item
def unique(iterable):
''' This function returns the unique values on the iterable based on the first occurence.
It does a `lazy-evaluation`.
Generator Function maintains the state of the variable `see `
to use it when the function resumes for the next evaluation.
`see.add(item)` comes after `yield` statement and so it is executed only when the
generator function resumes.
Till then, the state of the `seen` and `item` is maintained. '''
seen = set()
for item in iterable:
if item in seen:
continue
yield item
seen.add(item)
if __name__ == "__main__":
for item in firstN(3, unique([1,2,2,3,3,4,5])):
print(item)
| 31.421053
| 93
| 0.621441
|
4a17cc6543b40a4c81baa6222f85c2294d8467a7
| 614
|
py
|
Python
|
src/util/hashing.py
|
markste-in/malware_analysis
|
285c284a9575ce3cda383d242dff14f4f06881af
|
[
"MIT"
] | null | null | null |
src/util/hashing.py
|
markste-in/malware_analysis
|
285c284a9575ce3cda383d242dff14f4f06881af
|
[
"MIT"
] | null | null | null |
src/util/hashing.py
|
markste-in/malware_analysis
|
285c284a9575ce3cda383d242dff14f4f06881af
|
[
"MIT"
] | null | null | null |
import hashlib
import glob
import os
def hash_file(fname, hash_fnc): #https://stackoverflow.com/a/3431838
if hash_fnc == 'md5': hash_fun = hashlib.md5()
elif hash_fnc == "sha1" : hash_fun = hashlib.sha1()
else: return -1
try:
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_fun.update(chunk)
return hash_fun.hexdigest()
except:
return -1
def get_hash_from_filenames(path):
files = glob.glob(os.path.join(path,"*"))
hashes = [os.path.splitext(os.path.basename(f))[0] for f in files]
return hashes
| 29.238095
| 70
| 0.625407
|
4a17cd18d0bfc9c4028191bcdb7d39dcc5e42369
| 734
|
py
|
Python
|
tests/test_distance.py
|
bprinty/sequtils
|
a4ee5cb1ed2e84547050e4ebc4a9148df23768af
|
[
"Apache-2.0"
] | 1
|
2021-11-23T12:04:48.000Z
|
2021-11-23T12:04:48.000Z
|
tests/test_distance.py
|
bprinty/sequtils
|
a4ee5cb1ed2e84547050e4ebc4a9148df23768af
|
[
"Apache-2.0"
] | null | null | null |
tests/test_distance.py
|
bprinty/sequtils
|
a4ee5cb1ed2e84547050e4ebc4a9148df23768af
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from parameterized import parameterized
import sequtils
class TestDistance(unittest.TestCase):
@parameterized.expand((
('ACGT', 'ACGT', 0),
('AAAAAAAAAAA', 'AAAAAAATAAA', 1),
('ATGACTGAATATAAACTTGT', 'ATGACTCATTATGAACTTGT', 3),
))
def test_hamming(self, sequence, other, res):
self.assertEqual(sequtils.hamming(sequence, other), res)
return
@parameterized.expand((
('ACGT', 'ATGT', 1),
('AAAAAAAAAAA', 'AAAAAATAA', 3),
('ATGACTGAATATAAACTTGT', 'ATGACTGAATTAGTAAAAACTTGT', 4),
))
def test_edit(self, sequence, other, res):
self.assertEqual(sequtils.edit(sequence, other), res)
return
| 25.310345
| 64
| 0.628065
|
4a17cd1fcb776051e26bc32be916e20c5f7c9237
| 328
|
py
|
Python
|
XXOORR.py
|
abphilip-codes/Codechef_Practice
|
21fd52e03df8a0f72a08b0e2a0b48dbd508aac95
|
[
"MIT"
] | 2
|
2021-07-26T03:32:24.000Z
|
2021-07-31T02:32:14.000Z
|
XXOORR.py
|
abphilip-codes/Codechef_Practice
|
21fd52e03df8a0f72a08b0e2a0b48dbd508aac95
|
[
"MIT"
] | null | null | null |
XXOORR.py
|
abphilip-codes/Codechef_Practice
|
21fd52e03df8a0f72a08b0e2a0b48dbd508aac95
|
[
"MIT"
] | 1
|
2021-07-14T17:45:33.000Z
|
2021-07-14T17:45:33.000Z
|
# https://www.codechef.com/problems/XXOORR
import math
for T in range(int(input())):
n,k = map(int, input().split())
a,b = list(map(int, input().split())), [0]*32
for z in a:
p=0
while(z>0):
if(z&1): b[p]+=1
z=z>>1
p+=1
print(sum([math.ceil(z/k) for z in b]))
| 25.230769
| 49
| 0.481707
|
4a17cdb2e1cd1fe31c657f773490c789e6c32911
| 2,062
|
py
|
Python
|
Python3/1239.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 854
|
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
Python3/1239.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 29
|
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
Python3/1239.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 347
|
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 20 ms submission
class Solution:
def maxLength(self, arr: List[str]) -> int:
arr = [item for item in arr if len(item) == len(set(item))]
def check(a, b):
c = a + b
return len(set(c)) == len(c)
search = {}
n = len(arr)
for i in range(n-1):
for j in range(i+1, n):
search[(arr[i], arr[j])] = check(arr[i], arr[j])
def maxSubLength(arr, search, dp):
n = len(arr)
if dp.get(tuple(arr)) is not None:
return dp.get(tuple(arr))
if n == 0:
return 0
if n == 1:
return len(arr[0])
narr = []
for j in range(1, n):
if search[(arr[0], arr[j])]:
narr.append(arr[j])
contains = len(arr[0]) + maxSubLength(narr, search, dp)
notContains = maxSubLength(arr[1:], search, dp)
maxValue = max(contains, notContains)
dp[tuple(arr)] = maxValue
return maxValue
dp = {}
return maxSubLength(arr, search, dp)
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def maxLength(self, arr: List[str]) -> int:
arr = [s for s in arr if len(s) == len(set(s))]
if not arr:
return 0
arr.sort(reverse=True, key=len)
max_len = 0
for i in range(len(arr)):
tmp = arr[i]
for j in range(len(arr)):
if not set(tmp) & set(arr[j]):
tmp += arr[j]
max_len = max(max_len, len(tmp))
return max_len
__________________________________________________________________________________________________
| 32.21875
| 98
| 0.507274
|
4a17cdea6d647f59cfdaa53eaa2da7602ca7c382
| 2,699
|
py
|
Python
|
bin/generate_markdown.py
|
mpgarate/spotify-infinite
|
0c864177825a060d95d2d127a119b33d06a634ec
|
[
"MIT"
] | 1
|
2019-03-05T15:05:05.000Z
|
2019-03-05T15:05:05.000Z
|
bin/generate_markdown.py
|
mpgarate/spotify-infinite
|
0c864177825a060d95d2d127a119b33d06a634ec
|
[
"MIT"
] | null | null | null |
bin/generate_markdown.py
|
mpgarate/spotify-infinite
|
0c864177825a060d95d2d127a119b33d06a634ec
|
[
"MIT"
] | null | null | null |
import dateutil.parser as dp
import json
class AlbumFormatter(object):
def __init__(self, album):
self.album = album
def _raw_album_url(self):
return self.album['external_urls']['spotify']
def image(self):
image = None
for i in self.album['images']:
if not image:
image = i
if i['height'] == 64 or (image['height'] < 300
and i['height'] > image['height']):
image = i
if not image:
return "None"
image = '<a href="%s" target="_blank"><img width="64" src="%s"></a>' % (
self._raw_album_url(), image['url'])
return image
def name(self):
return '<a href="%s" target="_blank">%s</a>' % (self._raw_album_url(),
self.album['name'])
def artists(self):
return "<br>".join(
map(
lambda a: '<a href="%s" target="_blank">%s</a>' % (a[
'external_urls']['spotify'], a['name']),
self.album['artists']))
def release_date(self):
return self.album['release_date']
def total_tracks(self):
return self.album['total_tracks']
def library_add_date(self):
return dp.parse(self.album['added_at']).strftime('%F')
class MarkdownGenerator(object):
albumstore_filename = 'albums.json'
recent_albums_filename = 'recent_albums.md'
def __init__(self):
self.albums = []
with open(self.albumstore_filename, 'r') as f:
self.albums = [v for k, v in json.loads(f.read() or "{}").items()]
def _get_recent_albums(self):
return sorted(
self.albums,
key=lambda a: dp.parse(a['added_at']).strftime('%s'),
reverse=True)
def write_recent_albums(self):
with open(self.recent_albums_filename, 'w') as f:
f.write("# Recent Albums\n\n")
f.write("[Jump to bottom](#bottom)\n\n")
f.write(
"|Cover|Album|Artist|Release Date|Tracks|Library Add Date|\n")
f.write(
"|-----|-----|------|------------|------|----------------|\n")
for album in self._get_recent_albums():
afmt = AlbumFormatter(album)
doc = "%s | %s | %s | %s | %s | %s\n" % (
afmt.image(), afmt.name(), afmt.artists(),
afmt.release_date(), afmt.total_tracks(),
afmt.library_add_date())
f.write(doc)
f.write("\n")
f.write("### Bottom")
MarkdownGenerator().write_recent_albums()
| 29.988889
| 80
| 0.497221
|
4a17cdfa0451662a73f7ac70cf93ef7c5f913a7f
| 4,017
|
py
|
Python
|
tools/scripts/json-Elastic-bhr.py
|
wisslab/judaicalink-search
|
25f937dfc1cad385a355feacedf3ea722f953a14
|
[
"MIT"
] | null | null | null |
tools/scripts/json-Elastic-bhr.py
|
wisslab/judaicalink-search
|
25f937dfc1cad385a355feacedf3ea722f953a14
|
[
"MIT"
] | null | null | null |
tools/scripts/json-Elastic-bhr.py
|
wisslab/judaicalink-search
|
25f937dfc1cad385a355feacedf3ea722f953a14
|
[
"MIT"
] | null | null | null |
# Maral Dadvar
#This code extracts further information from GND for the authors of Freimann collection who have an GND-ID assigned to them.
#15/01/2018
#Ver. 01
import rdflib
from rdflib import Namespace, URIRef, Graph , Literal
from SPARQLWrapper import SPARQLWrapper2, XML , RDF , JSON
from rdflib.namespace import RDF, FOAF , SKOS ,RDFS
import os
import json
import io
import re
os.chdir('C:\Users\Maral\Desktop')
sparql = SPARQLWrapper2("http://localhost:3030/Datasets/sparql")
file = io.open('textfile-bhr.json','w' , encoding ='utf-8')
foaf = Namespace("http://xmlns.com/foaf/0.1/")
skos = Namespace("http://www.w3.org/2004/02/skos/core#")
gndo = Namespace("http://d-nb.info/standards/elementset/gnd#")
jl = Namespace("http://data.judaicalink.org/ontology/")
owl = Namespace ("http://www.w3.org/2002/07/owl#")
sparql.setQuery("""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX gndo: <http://d-nb.info/standards/elementset/gnd#>
PREFIX pro: <http://purl.org/hpi/patchr#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX edm: <http://www.europeana.eu/schemas/edm/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dblp: <http://dblp.org/rdf/schema-2015-01-26#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX bibtex: <http://data.bibbase.org/ontology/#>
PREFIX jl: <http://data.judaicalink.org/ontology/>
select distinct ?o ?name ?bd ?dd ?bl ?dl ?abs ?pub
FROM <http://maral.wisslab.org/graphs/bhr>
where
{
?o skos:prefLabel ?name
optional {?o jl:birthDate ?bd}
optional {?o jl:deathDate ?dd}
optional{ ?o jl:birthLocation ?bl }
optional {?o jl:deathLocation ?dl}
optional {?o jl:hasAbstract ?abs}
optional {?o jl:hasPublication ?pub}
}
""")
sparql.setReturnFormat(XML)
results = sparql.query().convert()
for i in range(0,len(results.bindings)):
uri = results.bindings[i]['o'].value
name = results.bindings[i]['name'].value
if 'bd' in results.bindings[i].keys():
birth = results.bindings[i]['bd'].value
else: birth="NA"
if 'dd' in results.bindings[i].keys():
death = results.bindings[i]['dd'].value
else: death = "NA"
if 'bl' in results.bindings[i].keys():
blocation = results.bindings[i]['bl'].value
blocation = blocation.replace('\n','')
else: blocation = "NA"
if 'dl' in results.bindings[i].keys():
dlocation=results.bindings[i]['dl'].value
dlocation = dlocation.replace('\n','')
else: dlocation = "NA"
if 'abs' in results.bindings[i].keys():
abstract = results.bindings[i]['abs'].value
abstract = abstract.replace('"','')
abstract = abstract.replace('{','')
abstract = abstract.replace('}','')
abstract = abstract.replace('/','')
abstract = abstract.replace('\n','')
abstract = abstract.replace('\r','')
abstract = re.sub( '\s+', ' ', abstract).strip()
else: abstract = "NA"
if 'pub' in results.bindings[i].keys():
publication = results.bindings[i]['pub'].value
publication=publication.replace('"','')
publication=publication.replace('{','')
publication=publication.replace('}','')
publication=publication.replace('/','')
publication=publication.replace('\n','')
publication=publication.replace('\r','')
publication = re.sub( '\s+', ' ', publication).strip()
else: publication = "NA"
index = '{' + '"index":' + '{"_id":"' + uri + '"}}'
file.writelines(index + '\n')
rest = '{' + '"Name":"' + name + '",' + '"birthDate":"' + birth + '",' + '"deathDate":"' + death + '",' + '"birthLocation":"' + blocation +'",' + '"deathLocation":"' + dlocation + '"' + ',' + '"Abstract":"' + abstract + '"' + ',' + '"Publication":"' + publication + '"' + '}'
file.writelines(rest + '\n')
file.close()
| 32.92623
| 280
| 0.619368
|
4a17cfdb2f104a85fd29f62cd8aefb33366b0d8a
| 4,226
|
py
|
Python
|
code/feature_extraction/extract_features.py
|
KristinaSig/MLinPractice
|
09c7729c0e1c92326cc85fe7d76fa43ebbfd4ca5
|
[
"MIT"
] | null | null | null |
code/feature_extraction/extract_features.py
|
KristinaSig/MLinPractice
|
09c7729c0e1c92326cc85fe7d76fa43ebbfd4ca5
|
[
"MIT"
] | null | null | null |
code/feature_extraction/extract_features.py
|
KristinaSig/MLinPractice
|
09c7729c0e1c92326cc85fe7d76fa43ebbfd4ca5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Runs the specified collection of feature extractors.
Created on Wed Sep 29 11:00:24 2021
@author: lbechberger
"""
import argparse, csv, pickle
import pandas as pd
import numpy as np
from code.feature_extraction.character_length import CharacterLength
from code.feature_extraction.avg_len_flag import AvgLenFeature
from code.feature_extraction.hashtags_count import HashtagCountFeature
from code.feature_extraction.mentions_count import MentionsCountFeature
from code.feature_extraction.media import ContainsMediaFeature
from code.feature_extraction.sentiment_score import SentimentScoreFeature
from code.feature_extraction.feature_collector import FeatureCollector
from code.util import COLUMN_TWEET, COLUMN_LABEL, COLUMN_TWEET_CLEAN
# setting up CLI
parser = argparse.ArgumentParser(description = "Feature Extraction")
parser.add_argument("input_file", help = "path to the input csv file")
parser.add_argument("output_file", help = "path to the output pickle file")
parser.add_argument("-e", "--export_file", help = "create a pipeline and export to the given location", default = None)
parser.add_argument("-i", "--import_file", help = "import an existing pipeline from the given location", default = None)
parser.add_argument("-c", "--char_length", action = "store_true", help = "compute the number of characters in the tweet")
parser.add_argument("-alf", "--avg_len_flag", action = "store_true", help = "compute the binary flag that indicates if length of the tweet is above average")
parser.add_argument("-hc", "--hashtag_count", action = "store_true", help = "count the number of hashtags extracted from the tweet")
parser.add_argument("-mc", "--mentions_count", action = "store_true", help = "count the number of mentions extracted from the tweet")
parser.add_argument("-m", "--media", action = "store_true", help = "state whether there was any media found in the tweet")
parser.add_argument("-s", "--sentiment_score", action = "store_true", help = "state the given score of sentiment polarity of the tweet")
args = parser.parse_args()
# load data
df = pd.read_csv(args.input_file, quoting = csv.QUOTE_NONNUMERIC, lineterminator = "\n")
if args.import_file is not None:
# simply import an exisiting FeatureCollector
with open(args.import_file, "rb") as f_in:
feature_collector = pickle.load(f_in)
else: # need to create FeatureCollector manually
# collect all feature extractors
features = []
if args.char_length:
# character length of original tweet (without any changes)
features.append(CharacterLength(COLUMN_TWEET))
if args.avg_len_flag:
# average character length flag based on the given dataset (plain text)
features.append(AvgLenFeature(COLUMN_TWEET_CLEAN))
if args.hashtag_count:
# count of hashtags extracted in the hashtags column
features.append(HashtagCountFeature())
if args.mentions_count:
# count of mentions extracted in the mentions column
features.append(MentionsCountFeature())
if args.media:
# state the presence of any media in the tweet
features.append(ContainsMediaFeature())
if args.sentiment_score:
# sentiment score indicating the polarity of the tweet (based on the plain text)
features.append(SentimentScoreFeature())
# create overall FeatureCollector
feature_collector = FeatureCollector(features)
# fit it on the given data set (assumed to be training data)
feature_collector.fit(df)
# apply the given FeatureCollector on the current data set
# maps the pandas DataFrame to an numpy array
feature_array = feature_collector.transform(df)
# get label array
label_array = np.array(df[COLUMN_LABEL])
label_array = label_array.reshape(-1, 1)
# store the results
results = {"features": feature_array, "labels": label_array,
"feature_names": feature_collector.get_feature_names()}
with open(args.output_file, 'wb') as f_out:
pickle.dump(results, f_out)
# export the FeatureCollector as pickle file if desired by user
if args.export_file is not None:
with open(args.export_file, 'wb') as f_out:
pickle.dump(feature_collector, f_out)
| 45.44086
| 157
| 0.750592
|
4a17d103ddaeeb70c59a76339d40fef8a7fe0b1b
| 6,243
|
py
|
Python
|
old-version/cyclum/illustration.py
|
lshh125/cyclum
|
4bd7f136680108d28e4d07e627cda7cd4a242e64
|
[
"MIT"
] | 12
|
2020-03-01T09:15:45.000Z
|
2021-10-03T07:58:48.000Z
|
old-version/cyclum/illustration.py
|
lshh125/cyclum
|
4bd7f136680108d28e4d07e627cda7cd4a242e64
|
[
"MIT"
] | 5
|
2020-11-13T18:38:18.000Z
|
2021-12-17T18:47:32.000Z
|
old-version/cyclum/illustration.py
|
lshh125/cyclum
|
4bd7f136680108d28e4d07e627cda7cd4a242e64
|
[
"MIT"
] | 5
|
2020-03-21T01:51:44.000Z
|
2022-03-15T11:08:59.000Z
|
import matplotlib as mpl
import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
import numpy as np
from cyclum import evaluation
class FigureWriter:
"""
keep and write figures into a pdf file.
"""
def __init__(self, pdf_name: str):
self.figures = []
if not pdf_name.endswith('.pdf'):
pdf_name += '.pdf'
self.pdf_name = pdf_name
def add_figure(self, figure, title=None):
"""
add a figure, but not write to file
:param figure:
:param title:
:return:
"""
if figure != None:
self.figures.append(figure)
if title is not None:
self.figures[-1].suptitle(title)
def write(self):
with matplotlib.backends.backend_pdf.PdfPages(self.pdf_name) as pdf:
for figure in self.figures:
pdf.savefig(figure, bbox_inches='tight')
def add_figure_and_write(self, figure, title=None):
self.add_figure(figure, title)
self.write()
def __call__(self, figure, title=None, wait=False):
"""
add a figure. write to file if not "wait"
:param figure:
:param title:
:param wait: if set to False, write to file.
:return:
"""
if wait:
self.add_figure(figure, title)
else:
self.add_figure_and_write(figure, title)
def plot_gene_sparsity(linear_data, use_ratio=True):
"""
Return a figure of #{cell, none_zero_genes(cell) > x}
:param linear_data: data
:param use_ratio: plot as ratio or
:return:
"""
figure = plt.figure()
axes = figure.subplots()
nonzero_cells_per_gene = np.sum(linear_data > 1e-3, axis=0)
if use_ratio:
nonzero_genes_ratio = nonzero_cells_per_gene / linear_data.shape[0]
axes.plot(np.sort(nonzero_genes_ratio) * 100)
axes.set_ylabel('nonzero cells %')
else:
axes.plot(np.sort(nonzero_cells_per_gene))
axes.set_ylabel('nonzero cells #')
axes.set_xlabel("gene # sorted by nonzero genes")
return figure
def plot_cell_sparsity(linear_data, use_ratio=True):
"""
Return a figure of #{cell, none_zero_genes(cell) > x}
:param linear_data: data
:param use_ratio: plot as ratio or
:return:
"""
figure = plt.figure()
axes = figure.subplots()
nonzero_genes_per_cell = np.sum(linear_data > 1e-3, axis=1)
if use_ratio:
nonzero_genes_ratio = nonzero_genes_per_cell / linear_data.shape[1]
axes.plot(np.sort(nonzero_genes_ratio) * 100)
axes.set_ylabel('nonzero genes %')
else:
axes.plot(np.sort(nonzero_genes_per_cell))
axes.set_ylabel('nonzero genes #')
axes.set_xlabel("cell # sorted by nonzero genes")
return figure
def plot_pair_color(a, b, color):
"""
either plot an embedding, two dimensions at a time
or compare two embeddings
:param a:
:param b:
:param color:
:return:
"""
n_col = a.shape[1]
n_row = b.shape[1]
figure = plt.figure(figsize=(n_row * 2 + 0.1, n_col * 2 + 0.1))
ax_list = [figure.add_subplot(n_col, n_row, i + 1) for i in range(n_col * n_row)]
n = 0
for i in range(n_col):
for j in range(n_row):
ax_list[n].scatter(b[:, j], a[:, i], s=6, c=color)
n = n + 1
return figure
def plot_round_color(flat, color):
figure = plt.figure(figsize=(8, 8))
axes = figure.subplots()
xx = np.array([[0.5], [1]]) @ np.cos(flat).T
yy = np.array([[0.5], [1]]) @ np.sin(flat).T
for i in range(len(color)):
axes.plot(xx[:, i], yy[:, i], c=color[i], lw=1.)
axes.set_xlim([-1.1, 1.1])
axes.set_ylim([-1.1, 1.1])
return figure
def plot_round_distr_color(flat, label, color_dict):
figure = plt.figure()
ax = figure.subplots(subplot_kw={'projection': 'polar'})
color = [color_dict[l] for l in label]
for x, color in zip(flat, color):
ax.plot([x, x], [1.5, 2], color=color, linewidth=0.5)
xx = []
pp = []
max_p = 0
for l in color_dict:
_ = evaluation.periodic_parzen_estimate(flat[label == l], 2 * np.pi)
xx.append(_[0])
pp.append(_[1])
max_p = np.max([np.max(pp[-1]), max_p])
for x, p, l in zip(xx, pp, color_dict):
ax.fill_between(x, p / max_p + 2, 2, color=color_dict[l], alpha=0.5, linewidth=0.0)
ax.set_yticks([])
return figure
def plot_round_distr_color2(flat, label1, label2, color_dict1, color_dict2):
figure = plt.figure()
ax = figure.subplots(subplot_kw={'projection': 'polar'})
color = [color_dict1[l] for l in label1]
for x, color in zip(flat, color):
ax.plot([x, x], [2, 2.5], color=color, linewidth=0.5)
color = [color_dict2[l] for l in label2]
for x, color in zip(flat, color):
ax.plot([x, x], [1.5, 2.0], color=color, linewidth=0.5)
xx = []
pp = []
max_p = 0
for l in color_dict1:
_ = evaluation.periodic_parzen_estimate(flat[label1 == l], 2 * np.pi)
xx.append(_[0])
pp.append(_[1])
max_p = np.max([np.max(pp[-1]), max_p])
for x, p, l in zip(xx, pp, color_dict1):
ax.fill_between(x, p / max_p + 2.5, 2.5, color=color_dict1[l], alpha=0.6, linewidth=0.0)
xx = []
pp = []
max_p = 0
for l in color_dict2:
_ = evaluation.periodic_parzen_estimate(flat[label2 == l], 2 * np.pi)
xx.append(_[0])
pp.append(_[1])
max_p = np.max([np.max(pp[-1]), max_p])
for x, p, l in zip(xx, pp, color_dict2):
ax.fill_between(x, 1.5 - p / max_p, 1.5, color=color_dict2[l], alpha=0.6, linewidth=0.0)
ax.set_yticks([])
return figure
def plot_multi_distr(xs, ys, colors, labels):
figure = plt.figure()
axes = figure.subplots()
if type(xs) is list:
for x, y, color, label in zip(xs, ys, colors, labels):
axes.fill_between(x, y, alpha=0.4, linewidth=0.0, color=color, label=label)
else:
x = xs
for y, color, label in zip(ys, colors, labels):
axes.fill_between(x, y, alpha=0.4, linewidth=0.0, color=color, label=color)
axes.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
return figure
| 29.448113
| 96
| 0.595547
|
4a17d16d5c57f6707afe4e735ec8004c2673a528
| 5,310
|
py
|
Python
|
GAN/Classifier.py
|
chaitrasj/Deep-Learning-for-Computer-Vision-DLCV-Assignment
|
80cb72b3a5c013290b91e0f1504a75fb627a465d
|
[
"MIT"
] | null | null | null |
GAN/Classifier.py
|
chaitrasj/Deep-Learning-for-Computer-Vision-DLCV-Assignment
|
80cb72b3a5c013290b91e0f1504a75fb627a465d
|
[
"MIT"
] | null | null | null |
GAN/Classifier.py
|
chaitrasj/Deep-Learning-for-Computer-Vision-DLCV-Assignment
|
80cb72b3a5c013290b91e0f1504a75fb627a465d
|
[
"MIT"
] | null | null | null |
# import torch, torchvision
import os
import torch
import torchvision.transforms as transforms
from torchvision import datasets
from torchvision.utils import save_image
from torch.utils.data import DataLoader
import torch.nn as nn
import numpy as np
from collections import OrderedDict
from torch.nn import init
import argparse
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import torch.optim as optim
################ HYPER PARAMETERS ###################
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--img_size", type=int, default=28, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
# parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
opt = parser.parse_args(args=[])
print(opt)
cuda = True if torch.cuda.is_available() else False
############### LOADING DATA ###############
transform = transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
train_loader = DataLoader(datasets.MNIST('./mnist/', train=True, download=True,transform=transform),batch_size=opt.batch_size, shuffle=True)
test_loader = DataLoader(datasets.MNIST('./mnist/', train=False, download=True,transform=transform),batch_size=256, shuffle=True)
#################### UTILITY METHODS ######################
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.conv_1 = nn.Sequential(nn.Conv2d(1,32,5,1,0,True), nn.ReLU(), nn.Conv2d(32,32,5,1,0,True), nn.ReLU()) #28->24->20->10
self.maxpool_1 = nn.MaxPool2d(2,2)
self.conv_2 = nn.Sequential(nn.Conv2d(32,64,5,1,0,True), nn.ReLU(), nn.Conv2d(64,64,5,1,0,True), nn.ReLU()) #10->6->2->1
self.maxpool_2 = nn.MaxPool2d(2,2)
self.out = nn.Sequential(nn.Linear(64*(1)**2,512), nn.ReLU(), nn.Linear(512,10))
def forward(self,x):
x = self.conv_1(x)
x = self.maxpool_1(x)
x = self.conv_2(x)
x = self.maxpool_2(x)
x = x.view(x.size(0),-1)
x = self.out(x)
return x
classifier = Classifier()
classifier.apply(weights_init_normal)
classifier.cuda()
classifier.train()
criterion = nn.CrossEntropyLoss()
# optimizer = torch.optim.Adam(classifier.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer = optim.SGD(classifier.parameters(), lr=0.001, momentum=0.9)
running_loss = 0.0
for epoch in range (opt.epochs):
print('Epoch',epoch,'---------------------------------------------------')
total = 0
correct = 0
for iteration, sampled_batch in enumerate(train_loader):
optimizer.zero_grad()
img, label = sampled_batch
out = classifier(img.cuda())[0]
loss = criterion(out, label.cuda())
loss.backward()
optimizer.step()
_, predicted = torch.max(out.data,1)
total += len(label)
correct += (predicted == label.cuda()).sum().item()
running_loss += loss.item()
if iteration % 100 == 99: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, iteration + 1, running_loss / 100))
running_loss = 0.0
train_acc = 100.*correct/total
print('Train accuracy after epoch',epoch,train_acc)
######### TEST DATA ACCURACY ##########
with torch.no_grad():
total = 0
correct = 0
test_loss = 0
for iter_, sampled_batch in enumerate(test_loader):
img, label = sampled_batch
out = classifier(img.cuda())[0]
loss = criterion(out, label.cuda())
_, predicted = torch.max(out.data,1)
total += len(label)
correct += (predicted == label.cuda()).sum().item()
test_loss += loss.item()
test_acc = 100.*correct/total
test_loss = test_loss/total
print('After epoch',epoch,'test acc %f test loss %f' % (test_acc,test_loss))
if epoch % 5==0:
checkpoint = {
'classifier': classifier.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(checkpoint,'classifier_50_dim.pth.tar')
print('Finished Training')
| 36.62069
| 140
| 0.618832
|
4a17d1df978fbf1606aa612c9ea3d0eaf72dd164
| 694
|
py
|
Python
|
mainland/__init__.py
|
moshez/mainland
|
4aadf63d6e971518940828b1cc0b648ff5629bdd
|
[
"MIT"
] | null | null | null |
mainland/__init__.py
|
moshez/mainland
|
4aadf63d6e971518940828b1cc0b648ff5629bdd
|
[
"MIT"
] | 1
|
2015-06-28T04:29:16.000Z
|
2015-06-28T04:29:16.000Z
|
mainland/__init__.py
|
moshez/mainland
|
4aadf63d6e971518940828b1cc0b648ff5629bdd
|
[
"MIT"
] | null | null | null |
# Copyright (c) Moshe Zadka
# See LICENSE for details.
"""mainland -- a main for Python"""
from mainland._version import get_versions as _get_versions
__version__ = _get_versions()['version']
_long_description = '''\
mainland_: A way to run Python scripts without console-scripts
.. _mainland: https://mainland.rtfd.org
'''
metadata = dict(
name='mainland',
description='Run your modules',
long_description=_long_description,
author='Moshe Zadka',
author_email='zadka.moshe@gmail.com',
license='MIT',
copyright='2015',
)
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from mainland._main import main
main = main
| 21.030303
| 62
| 0.727666
|
4a17d415bf85136c5c163c69415c6f22cdb09dce
| 4,184
|
py
|
Python
|
python/scripts/demo/m3_demo_hand_viz_h2r3.py
|
ahoarau/m3meka
|
237739f0266ce60aaa3013b0d2b22fc07b6374c4
|
[
"MIT"
] | 1
|
2015-06-19T12:14:18.000Z
|
2015-06-19T12:14:18.000Z
|
python/scripts/demo/m3_demo_hand_viz_h2r3.py
|
semeyerz/m3meka
|
6e5d6b73ad3ebdd8429497923e601eae65d8b2fe
|
[
"MIT"
] | null | null | null |
python/scripts/demo/m3_demo_hand_viz_h2r3.py
|
semeyerz/m3meka
|
6e5d6b73ad3ebdd8429497923e601eae65d8b2fe
|
[
"MIT"
] | 2
|
2015-11-27T09:25:54.000Z
|
2021-08-16T16:29:22.000Z
|
#! /usr/bin/python
#Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
import m3.gui as m3g
import m3.hand as m3h
import m3.toolbox as m3t
import m3.unit_conversion as m3u
import m3.component_factory as m3f
import Numeric as nu
import m3.rt_proxy as m3p
import yaml
import os
import roslib; roslib.load_manifest('m3_defs_ros')
import rospy
from sensor_msgs.msg import JointState
from roslib.msg import Header
import subprocess
class M3Proc:
def __init__(self):
self.proxy = m3p.M3RtProxy()
self.gui = m3g.M3Gui()
def stop(self):
self.proxy.stop()
def start(self):
self.proxy.start()
self.ndof_finger = 3
self.flex_factor_index = [0.3] * self.ndof_finger
self.flex_factor_ring = [0.3] * self.ndof_finger
self.flex_factor_pinky = [0.3] * self.ndof_finger
self.flex_factor_thumb = [0.3] * 2
self.p = subprocess.Popen(['roslaunch', 'm3_defs_ros', 'm3_launch.launch'])
rospy.init_node("joint_state_publisher")
self.pub = rospy.Publisher("/joint_states", JointState)
time.sleep(4.0)
hand_names=self.proxy.get_available_components('m3hand')
self.hands = []
self.hand_nums = []
for i in range(len(hand_names)):
self.hands.append(m3f.create_component(hand_names[i]))
self.proxy.subscribe_status(self.hands[i])
#self.proxy.publish_command(self.hands[i])
if hand_names[i][-2].isdigit():
self.hand_nums.append(hand_names[i][-2:])
else:
self.hand_nums.append(hand_names[i][-1])
#r_hand_ua_num = 14
self.ndof_hand_ua = 12
self.positions = []
self.joints = []
for j in range(len(self.hands)):
for i in range(self.ndof_hand_ua):
self.positions.append(0.0)
self.joints.append('m3joint_ua_mh'+str(self.hand_nums[j])+'_j'+str(i))
# Thumb: J0,J1,J2
# Index: J3, J4, J5
# Ring: J6,J7,J8
# Pinkie: J9, J10, J11
print 'Starting hand viz.'
while(True):
self.positions = []
self.proxy.step()
for i in range(len(self.hands)):
th =self.hands[i].get_theta_rad()
#Thumb
self.positions.append(-th[0]+1.57) #0
self.positions.append(th[1] * self.flex_factor_thumb[0])
self.positions.append(th[1] * self.flex_factor_thumb[1])
#Index
self.positions.append(th[2] * self.flex_factor_index[0])
self.positions.append(th[2] * self.flex_factor_index[1])
self.positions.append(th[2] * self.flex_factor_index[2])
#Ring
self.positions.append(th[3] * self.flex_factor_ring[0])
self.positions.append(th[3] * self.flex_factor_ring[1])
self.positions.append(th[3] * self.flex_factor_ring[2])
#Pinkie
self.positions.append(th[4] * self.flex_factor_pinky[0])
self.positions.append(th[4] * self.flex_factor_pinky[1])
self.positions.append(th[4] * self.flex_factor_pinky[2])
if self.pub is not None and not rospy.is_shutdown():
header = Header(0,rospy.Time.now(),'0')
self.pub.publish(JointState(header, self.joints, self.positions, [0]*len(self.positions), [0]*len(self.positions)))
else:
print 'Error...exiting.'
break
time.sleep(0.1)
if __name__ == '__main__':
t=M3Proc()
try:
t.start()
except (KeyboardInterrupt,EOFError):
pass
t.stop(force_safeop=False)
print 'Exiting hand viz.'
| 30.100719
| 119
| 0.699092
|
4a17d420e63355061892e706a2a00f38c76350d3
| 11,696
|
py
|
Python
|
tests/integration/ip_messaging/v2/service/test_channel.py
|
scotta/twilio-python
|
93cf463f914f55c4c4bd1c259b834953dd81609d
|
[
"MIT"
] | null | null | null |
tests/integration/ip_messaging/v2/service/test_channel.py
|
scotta/twilio-python
|
93cf463f914f55c4c4bd1c259b834953dd81609d
|
[
"MIT"
] | null | null | null |
tests/integration/ip_messaging/v2/service/test_channel.py
|
scotta/twilio-python
|
93cf463f914f55c4c4bd1c259b834953dd81609d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ChannelTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://ip-messaging.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
'''
))
actual = self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://ip-messaging.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels.create()
self.holodeck.assert_has_request(Request(
'post',
'https://ip-messaging.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:38Z",
"created_by": "username",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
'''
))
actual = self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels.create()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels.list()
self.holodeck.assert_has_request(Request(
'get',
'https://ip-messaging.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.holodeck.assert_has_request(Request(
'post',
'https://ip-messaging.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:38Z",
"created_by": "username",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
'''
))
actual = self.client.ip_messaging.v2.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.assertIsNotNone(actual)
| 45.6875
| 166
| 0.570793
|
4a17d46b33bab679d8e1de78336328ebdfe63c73
| 28,926
|
py
|
Python
|
tensorflow_probability/python/stats/quantiles_test.py
|
maksymsur/probability
|
331c943103e51df49a985895293c7e064a363ed5
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/stats/quantiles_test.py
|
maksymsur/probability
|
331c943103e51df49a985895293c7e064a363ed5
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/stats/quantiles_test.py
|
maksymsur/probability
|
331c943103e51df49a985895293c7e064a363ed5
|
[
"Apache-2.0"
] | 1
|
2020-06-04T07:27:39.000Z
|
2020-06-04T07:27:39.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for quantiles.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
rng = np.random.RandomState(0)
@test_util.run_all_in_graph_and_eager_modes
class BincountTest(tfp_test_util.TestCase):
def test_like_tf_math_bincount_if_axis_is_none(self):
arr = rng.randint(0, 10, size=(2, 3, 4)).astype(np.int32)
tf_counts, tfp_counts = self.evaluate(
[tf.math.bincount(arr),
tfp.stats.count_integers(arr)])
self.assertAllEqual(tf_counts, tfp_counts)
def test_like_tf_math_bincount_if_axis_is_all_the_dims(self):
arr = rng.randint(0, 10, size=(2, 3, 4)).astype(np.int32)
tf_counts, tfp_counts = self.evaluate(
[tf.math.bincount(arr),
tfp.stats.count_integers(arr, axis=[0, 1, 2])])
self.assertAllEqual(tf_counts, tfp_counts)
def test_3d_arr_axis_1_neg1_no_weights(self):
arr = rng.randint(0, 10, size=(2, 3, 4)).astype(np.int32)
counts = tfp.stats.count_integers(
arr, weights=None, axis=[1, -1], minlength=10)
# The first index will be length 10, but it isn't known statically because
# bincount needs to figure out how many bins are filled (even if minlength
# and maxlength are both specified).
self.assertAllEqual([2], counts.shape[1:])
counts_, counts_0_, counts_1_ = self.evaluate([
counts,
tf.math.bincount(arr[0], minlength=10),
tf.math.bincount(arr[1], minlength=10),
])
self.assertAllEqual([10, 2], counts_.shape)
self.assertAllClose(counts_0_, counts_[:, 0])
self.assertAllClose(counts_1_, counts_[:, 1])
def test_2d_arr_axis_0_yes_weights(self):
arr = rng.randint(0, 4, size=(3, 2)).astype(np.int32)
weights = rng.rand(3, 2)
counts = tfp.stats.count_integers(arr, weights=weights, axis=0, minlength=4)
# The first index will be length 4, but it isn't known statically because
# bincount needs to figure out how many bins are filled (even if minlength
# and maxlength are both specified).
self.assertAllEqual([2], counts.shape[1:])
counts_, counts_0_, counts_1_ = self.evaluate([
counts,
tf.math.bincount(arr[:, 0], weights=weights[:, 0], minlength=4),
tf.math.bincount(arr[:, 1], weights=weights[:, 1], minlength=4),
])
self.assertAllEqual([4, 2], counts_.shape)
self.assertAllClose(counts_0_, counts_[:, 0])
self.assertAllClose(counts_1_, counts_[:, 1])
@test_util.run_all_in_graph_and_eager_modes
class FindBinsTest(tfp_test_util.TestCase):
def test_1d_array_no_extend_lower_and_upper_dtype_int64(self):
x = [-1., 0., 4., 5., 10., 20.]
edges = [0., 5., 10.]
bins = tfp.stats.find_bins(x, edges, dtype=tf.int64)
self.assertDTypeEqual(bins, np.int64)
self.assertAllEqual((6,), bins.shape)
bins_ = self.evaluate(bins)
self.assertAllEqual([-1, 0, 0, 1, 1, 2], bins_)
def test_1d_array_extend_lower_and_upper(self):
x = [-1., 0., 4., 5., 10., 20.]
edges = [0., 5., 10.]
bins = tfp.stats.find_bins(
x, edges, extend_lower_interval=True, extend_upper_interval=True)
self.assertDTypeEqual(bins, np.float32)
self.assertAllEqual((6,), bins.shape)
bins_ = self.evaluate(bins)
self.assertAllEqual([0, 0, 0, 1, 1, 1], bins_)
def test_1d_array_no_extend_lower_and_upper(self):
x = [-1., 0., 4., 5., 10., 20.]
edges = [0., 5., 10.]
bins = tfp.stats.find_bins(
x, edges, extend_lower_interval=False, extend_upper_interval=False)
self.assertDTypeEqual(bins, np.float32)
self.assertAllEqual((6,), bins.shape)
bins_ = self.evaluate(bins)
self.assertAllEqual([np.nan, 0, 0, 1, 1, np.nan], bins_)
def test_x_is_2d_array_dtype_int32(self):
x = [[0., 8., 60.],
[10., 20., 3.]]
edges = [[0., 5., 10.],
[5., 7., 11.],
[10., 50., 100.]]
# The intervals for the first column are
# [0, 5), [5, 10]
# and for the second column
# [5, 7), [7, 50]
# and for the third column
# [10, 11), [11, 100]
expected_bins = [[0, 1, 1],
[1, 1, -1]]
bins = tfp.stats.find_bins(x, edges, dtype=tf.int32)
self.assertDTypeEqual(bins, np.int32)
self.assertAllEqual((2, 3), bins.shape)
bins_ = self.evaluate(bins)
self.assertAllEqual(expected_bins, bins_)
def test_3d_array_has_expected_bins(self):
x = np.linspace(0., 1000, 1000, dtype=np.float32).reshape(10, 10, 10)
edges = [0., 500., 1000.]
bins = tfp.stats.find_bins(x, edges)
self.assertAllEqual(x.shape, bins.shape)
self.assertDTypeEqual(bins, np.float32)
flat_bins_ = np.ravel(self.evaluate(bins))
# Demonstrate that x crosses the 500 threshold at index 500
self.assertLess(x.ravel()[499], 500)
self.assertGreater(x.ravel()[500], 500)
self.assertAllEqual(np.zeros((500,)), flat_bins_[:500])
self.assertAllEqual(np.ones((500,)), flat_bins_[500:])
def test_large_random_array_has_expected_bin_fractions(self):
x = rng.rand(100, 99, 98)
edges = np.linspace(0., 1., 11) # Deciles
edges = edges.reshape(11, 1, 1) + np.zeros((99, 98))
bins = tfp.stats.find_bins(x, edges)
self.assertAllEqual(x.shape, bins.shape)
self.assertDTypeEqual(bins, np.float64)
bins_ = self.evaluate(bins)
self.assertAllClose((bins_ == 0).mean(), 0.1, rtol=0.05)
self.assertAllClose((bins_ == 1).mean(), 0.1, rtol=0.05)
self.assertAllClose((bins_ == 2).mean(), 0.1, rtol=0.05)
mask = (0.3 <= x) & (x < 0.4)
self.assertAllEqual(3. * np.ones((mask.sum(),)), bins_[mask])
def test_large_random_array_has_expected_bin_fractions_with_broadcast(self):
x = rng.rand(100, 99, 98)
# rank(edges) < rank(x), so it will broadcast.
edges = np.linspace(0., 1., 11) # Deciles
bins = tfp.stats.find_bins(x, edges)
self.assertAllEqual(x.shape, bins.shape)
self.assertDTypeEqual(bins, np.float64)
bins_ = self.evaluate(bins)
self.assertAllClose((bins_ == 0).mean(), 0.1, rtol=0.05)
self.assertAllClose((bins_ == 1).mean(), 0.1, rtol=0.05)
self.assertAllClose((bins_ == 2).mean(), 0.1, rtol=0.05)
mask = (0.3 <= x) & (x < 0.4)
self.assertAllEqual(3. * np.ones((mask.sum(),)), bins_[mask])
def test_too_few_edges_raises(self):
x = [1., 2., 3., 4.]
edges = [2.]
with self.assertRaisesRegexp(ValueError, '1 or more bin'):
tfp.stats.find_bins(x, edges)
@test_util.run_all_in_graph_and_eager_modes
class HistogramTest(tfp_test_util.TestCase):
def test_uniform_dist_in_1d_specify_extend_interval_and_dtype(self):
n_samples = 1000
x = rng.rand(n_samples)
counts = tfp.stats.histogram(
x,
edges=[-1., 0., 0.25, 0.5, 0.75, 0.9],
# Lowest intervals are [-1, 0), [0, 0.25), [0.25, 0.5)
extend_lower_interval=False,
# Highest intervals are [0.5, 0.75), [0.75, inf). Note the 0.9 is
# effectively ignored because we extend the upper interval.
extend_upper_interval=True,
dtype=tf.int32)
self.assertAllEqual((5,), counts.shape)
self.assertDTypeEqual(counts, np.int32)
self.assertAllClose(
[
# [-1.0, 0.0) [0.0, 0.25) [0.25, 0.5)
0. * n_samples, 0.25 * n_samples, 0.25 * n_samples,
# [0.5, 0.75) [0.75, inf) (the inf due to extension).
0.25 * n_samples, 0.25 * n_samples],
self.evaluate(counts),
# Standard error for each count is Sqrt[p * (1 - p) / (p * N)],
# which is approximately Sqrt[1 / (p * N)]. Bound by 4 times this,
# which means an unseeded test fails with probability 3e-5.
rtol=4 / np.sqrt(0.25 * n_samples))
def test_2d_uniform_reduce_axis_0(self):
n_samples = 1000
# Shape [n_samples, 2]
x = np.stack([rng.rand(n_samples), 1 + rng.rand(n_samples)], axis=-1)
# Intervals are:
edges = np.float64([-1., 0., 0.5, 1.0, 1.5, 2.0, 2.5])
counts = tfp.stats.histogram(x, edges=edges, axis=0)
self.assertAllEqual((6, 2), counts.shape)
self.assertDTypeEqual(counts, np.float64)
# x[:, 0] ~ Uniform(0, 1)
event_0_expected_counts = [
# [-1, 0) [0, 0.5) [0.5, 1)
0.0 * n_samples, 0.5 * n_samples, 0.5 * n_samples,
# [1, 1.5) [1.5, 2) [2, 2.5]
0.0 * n_samples, 0.0 * n_samples, 0.0 * n_samples,
]
# x[:, 1] ~ Uniform(1, 2)
event_1_expected_counts = [
# [-1, 0) [0, 0.5) [0.5, 1)
0.0 * n_samples, 0.0 * n_samples, 0.0 * n_samples,
# [1, 1.5) [1.5, 2) [2, 2.5]
0.5 * n_samples, 0.5 * n_samples, 0.0 * n_samples,
]
expected_counts = np.stack([event_0_expected_counts,
event_1_expected_counts], axis=-1)
self.assertAllClose(
expected_counts,
self.evaluate(counts),
# Standard error for each count is Sqrt[p * (1 - p) / (p * N)],
# which is approximately Sqrt[1 / (p * N)]. Bound by 4 times this,
# which means an unseeded test fails with probability 3e-5.
rtol=4 / np.sqrt(0.25 * n_samples))
def test_2d_uniform_reduce_axis_1_and_change_dtype(self):
n_samples = 1000
# Shape [2, n_samples]
x = np.stack([rng.rand(n_samples), 1 + rng.rand(n_samples)], axis=0)
# Intervals are:
edges = np.float64([-1., 0., 0.5, 1.0, 1.5, 2.0, 2.5])
counts = tfp.stats.histogram(x, edges=edges, axis=1, dtype=np.float32)
self.assertAllEqual((6, 2), counts.shape)
self.assertDTypeEqual(counts, np.float32)
# x[:, 0] ~ Uniform(0, 1)
event_0_expected_counts = [
# [-1, 0) [0, 0.5) [0.5, 1)
0.0 * n_samples, 0.5 * n_samples, 0.5 * n_samples,
# [1, 1.5) [1.5, 2) [2, 2.5]
0.0 * n_samples, 0.0 * n_samples, 0.0 * n_samples,
]
# x[:, 1] ~ Uniform(1, 2)
event_1_expected_counts = [
# [-1, 0) [0, 0.5) [0.5, 1)
0.0 * n_samples, 0.0 * n_samples, 0.0 * n_samples,
# [1, 1.5) [1.5, 2) [2, 2.5]
0.5 * n_samples, 0.5 * n_samples, 0.0 * n_samples,
]
expected_counts = np.stack([event_0_expected_counts,
event_1_expected_counts], axis=-1)
self.assertAllClose(
expected_counts,
self.evaluate(counts),
# Standard error for each count is Sqrt[p * (1 - p) / (p * N)],
# which is approximately Sqrt[1 / (p * N)]. Bound by 4 times this,
# which means an unseeded test fails with probability 3e-5.
rtol=4 / np.sqrt(0.25 * n_samples))
def test_2d_uniform_reduce_axis_0_edges_is_2d(self):
n_samples = 1000
# Shape [n_samples, 2]
x = np.stack([rng.rand(n_samples), 1 + rng.rand(n_samples)], axis=-1)
# Intervals are:
edges = np.float64([
# Edges for x[:, 0]
[0.0, 0.2, 0.7, 1.0],
# Edges for x[:, 1]
[1.1, 1.3, 2.0, 3.0],
])
# Now, edges.shape = [4, 2], so edges[:, i] is for x[:, i], i = 0, 1.
edges = edges.T
counts = tfp.stats.histogram(
x, edges=edges, axis=0, extend_lower_interval=True)
self.assertAllEqual((3, 2), counts.shape)
self.assertDTypeEqual(counts, np.float64)
# x[:, 0] ~ Uniform(0, 1)
event_0_expected_counts = [
# (-inf, 0.2) [0.2, 0.7) [0.7, 1)
0.2 * n_samples, 0.5 * n_samples, 0.3 * n_samples,
]
# x[:, 1] ~ Uniform(1, 2)
event_1_expected_counts = [
# (-inf, 1.3) [1.3, 2.0) [2.0, 3.0)
0.3 * n_samples, 0.7 * n_samples, 0.0 * n_samples,
]
expected_counts = np.stack([event_0_expected_counts,
event_1_expected_counts], axis=-1)
self.assertAllClose(
expected_counts,
self.evaluate(counts),
# Standard error for each count is Sqrt[p * (1 - p) / (p * N)],
# which is approximately Sqrt[1 / (p * N)]. Bound by 4 times this,
# which means an unseeded test fails with probability 3e-5.
rtol=4 / np.sqrt(0.25 * n_samples))
@test_util.run_all_in_graph_and_eager_modes
class PercentileTestWithLowerInterpolation(tfp_test_util.TestCase):
_interpolation = 'lower'
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
pct = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((), pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_one_dim_odd_input_vector_q(self):
x = [1., 5., 3., 2., 4.]
q = np.array([0, 10, 25, 49.9, 50, 50.01, 90, 95, 100])
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
pct = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual(q.shape, pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
pct = tfp.stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_two_dim_odd_input_axis_0(self):
x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
# Get dim 1 with negative and positive indices.
pct_neg_index = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
pct_pos_index = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct_neg_index.shape)
self.assertAllEqual((2,), pct_pos_index.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct_neg_index))
self.assertAllClose(expected_percentile, self.evaluate(pct_pos_index))
def test_simple(self):
# Simple test that exposed something the other 1-D tests didn't.
x = np.array([1., 2., 4., 50.])
q = 10
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
pct = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_two_dim_even_axis_0(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
pct = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_two_dim_even_input_and_keep_dims_true(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, keepdims=True, axis=0)
pct = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, keep_dims=True, axis=[0])
self.assertAllEqual((1, 2), pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_four_dimensional_input(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
pct = tfp.stats.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_four_dimensional_input_q_vector(self):
x = rng.rand(3, 4, 5, 6)
q = [0.25, 0.75]
for axis in [None, 0, (-1, 1)]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=axis)
pct = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_four_dimensional_input_q_vector_and_keepdims(self):
x = rng.rand(3, 4, 5, 6)
q = [0.25, 0.75]
for axis in [None, 0, (-1, 1)]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=axis, keepdims=True)
pct = tfp.stats.percentile(
x, q=q, interpolation=self._interpolation, axis=axis, keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_four_dimensional_input_and_keepdims(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
pct = tfp.stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_four_dimensional_input_x_static_ndims_but_dynamic_sizes(self):
x = rng.rand(2, 3, 4, 5)
x_ph = tf1.placeholder_with_default(x, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
pct = tfp.stats.percentile(
x_ph, q=0.77, interpolation=self._interpolation, axis=axis)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self):
x = rng.rand(2, 3, 4, 5)
x_ph = tf1.placeholder_with_default(x, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
pct = tfp.stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_with_integer_dtype(self):
if self._interpolation in {'linear', 'midpoint'}:
self.skipTest('Skipping integer dtype test for interpolation {}'.format(
self._interpolation))
x = [1, 5, 3, 2, 4]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
pct = tfp.stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertEqual(tf.int32, pct.dtype)
self.assertAllEqual((), pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_nan_propagation(self):
qs = [0, 10, 20, 49, 50, 51, 60, 80, 100]
for xs in [[float('nan'), 1.0],
[1.0, float('nan')],
[1.0, 2.0, 3.0, 4.0, float('nan')],
[1.0, float('nan'), 2.0, 3.0, 4.0]]:
# Test each percentile individually
for q in qs:
expected_percentile = np.percentile(
xs, q=q, interpolation=self._interpolation)
self.assertTrue(np.isnan(expected_percentile))
pct = tfp.stats.percentile(xs, q=q, interpolation=self._interpolation)
self.assertTrue(self.evaluate(tf.math.is_nan(pct)))
# Test vector percentile as well
expected_percentiles = np.percentile(
xs, q=qs, interpolation=self._interpolation)
pcts = tfp.stats.percentile(xs, q=qs, interpolation=self._interpolation)
self.assertAllEqual(expected_percentiles, pcts)
class PercentileTestWithLinearInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = 'linear'
def test_integer_dtype_raises(self):
with self.assertRaisesRegexp(TypeError, 'not allowed with dtype'):
tfp.stats.percentile(x=[1, 2], q=30, interpolation='linear')
def test_grads_at_sample_pts_with_no_preserve_gradients(self):
dist = tfp.distributions.Normal(np.float64(0), np.float64(1))
x = dist.sample(10001, seed=0)
# 50th quantile will lie exactly on a data point.
# 49.123... will not
q = tf.constant(np.array([50, 49.123456789])) # Percentiles, in [0, 100]
analytic_pct, grad_analytic_pct = tfp.math.value_and_gradient(
lambda q_: dist.quantile(q_ / 100.), q)
sample_pct, grad_sample_pct = tfp.math.value_and_gradient(
lambda q_: tfp.stats.percentile( # pylint: disable=g-long-lambda
x, q_, interpolation='linear', preserve_gradients=False),
q)
[
analytic_pct,
d_analytic_pct_dq,
sample_pct,
d_sample_pct_dq,
] = self.evaluate([
analytic_pct,
grad_analytic_pct,
sample_pct,
grad_sample_pct,
])
self.assertAllClose(analytic_pct, sample_pct, atol=0.05)
# Near the median, the normal PDF is approximately constant C, with
# C = 1 / sqrt(2 * pi). So the cdf is approximately F(x) = x / C.
# Thus the quantile function is approximately F^{-1}(y) = C * y.
self.assertAllClose(np.sqrt(2 * np.pi) / 100 * np.ones([2]),
d_analytic_pct_dq, atol=1e-4)
# At the 50th percentile exactly, the sample gradient is exactly zero!
# This is due to preserve_gradient == False.
self.assertAllEqual(0., d_sample_pct_dq[0])
# Tolerance at the other point is terrible (2x), but this is a sample
# quantile based gradient.
self.assertAllClose(
d_analytic_pct_dq[1], d_sample_pct_dq[1], atol=0, rtol=2)
# The absolute values are close though (but tiny).
self.assertAllClose(
d_analytic_pct_dq[1], d_sample_pct_dq[1], atol=0.05, rtol=0)
def test_grads_at_sample_pts_with_yes_preserve_gradients(self):
dist = tfp.distributions.Normal(np.float64(0), np.float64(1))
x = dist.sample(10001, seed=0)
# 50th quantile will lie exactly on a data point.
# 49.123... will not
q = tf.constant(np.array([50, 49.123456789])) # Percentiles, in [0, 100]
analytic_pct, grad_analytic_pct = tfp.math.value_and_gradient(
lambda q_: dist.quantile(q_ / 100.), q)
sample_pct, grad_sample_pct = tfp.math.value_and_gradient(
lambda q_: tfp.stats.percentile( # pylint: disable=g-long-lambda
x, q_, interpolation='linear', preserve_gradients=True),
q)
[
analytic_pct,
d_analytic_pct_dq,
sample_pct,
d_sample_pct_dq,
] = self.evaluate([
analytic_pct,
grad_analytic_pct,
sample_pct,
grad_sample_pct,
])
self.assertAllClose(analytic_pct, sample_pct, atol=0.05)
# Near the median, the normal PDF is approximately constant C, with
# C = 1 / sqrt(2 * pi). So the cdf is approximately F(x) = x / C.
# Thus the quantile function is approximately F^{-1}(y) = C * y.
self.assertAllClose(np.sqrt(2 * np.pi) / 100 * np.ones([2]),
d_analytic_pct_dq, atol=1e-4)
# At the 50th percentile exactly, the sample gradient not exactly zero!
# This is due to preserve_gradient == True.
self.assertNotEqual(0., d_sample_pct_dq[0])
# Tolerance is terrible (2x), but this is a sample quantile based gradient.
self.assertAllClose(d_analytic_pct_dq, d_sample_pct_dq, atol=0, rtol=2)
# The absolute values are close though (but tiny).
self.assertAllClose(d_analytic_pct_dq, d_sample_pct_dq, atol=0.1, rtol=0)
class PercentileTestWithMidpointInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = 'midpoint'
def test_integer_dtype_raises(self):
with self.assertRaisesRegexp(TypeError, 'not allowed with dtype'):
tfp.stats.percentile(x=[1, 2], q=30, interpolation='midpoint')
class PercentileTestWithHigherInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = 'higher'
class PercentileTestWithNearestInterpolation(tfp_test_util.TestCase):
"""Test separately because np.round and tf.round make different choices."""
_interpolation = 'nearest'
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
pct = tfp.stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
pct = tfp.stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.shape)
self.assertAllClose(expected_percentile, self.evaluate(pct))
def test_invalid_interpolation_raises(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, 'interpolation'):
tfp.stats.percentile(x, q=0.5, interpolation='bad')
def test_2d_q_raises_static(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, 'Expected.*ndims'):
tfp.stats.percentile(x, q=[[0.5]])
def test_2d_q_raises_dynamic(self):
if tf.executing_eagerly(): return
x = [1., 5., 3., 2., 4.]
q_ph = tf1.placeholder_with_default([[0.5]], shape=None)
pct = tfp.stats.percentile(x, q=q_ph, validate_args=True,
interpolation=self._interpolation)
with self.assertRaisesOpError('rank'):
self.evaluate(pct)
def test_finds_max_of_long_array(self):
# d - 1 == d in float32 and d = 3e7.
# So this test only passes if we use double for the percentile indices.
# If float is used, it fails with InvalidArgumentError about an index out of
# bounds.
x = tf.linspace(0., 3e7, num=int(3e7))
minval = tfp.stats.percentile(x, q=0, validate_args=True,
interpolation=self._interpolation)
self.assertAllEqual(0, self.evaluate(minval))
@test_util.run_all_in_graph_and_eager_modes
class QuantilesTest(tfp_test_util.TestCase):
"""Test for quantiles. Most functionality tested implicitly via percentile."""
def test_quartiles_of_vector(self):
x = tf.linspace(0., 1000., 10000)
cut_points = tfp.stats.quantiles(x, num_quantiles=4)
self.assertAllEqual((5,), cut_points.shape)
cut_points_ = self.evaluate(cut_points)
self.assertAllClose([0., 250., 500., 750., 1000.], cut_points_, rtol=0.002)
def test_deciles_of_rank_3_tensor(self):
x = rng.rand(3, 100000, 2)
cut_points = tfp.stats.quantiles(x, num_quantiles=10, axis=1)
self.assertAllEqual((11, 3, 2), cut_points.shape)
cut_points_ = self.evaluate(cut_points)
# cut_points_[:, i, j] should all be about the same.
self.assertAllClose(np.linspace(0, 1, 11), cut_points_[:, 0, 0], atol=0.03)
self.assertAllClose(np.linspace(0, 1, 11), cut_points_[:, 1, 1], atol=0.03)
if __name__ == '__main__':
tf.test.main()
| 39.089189
| 95
| 0.635795
|
4a17d48d8171de59d4caa73fd13e0ed15a3f160d
| 18,819
|
py
|
Python
|
dev/merge_spark_pr.py
|
irgb/spark
|
ec8973d1245d4a99edeb7365d7f4b0063ac31ddf
|
[
"Apache-2.0",
"MIT"
] | 1
|
2015-07-17T14:30:08.000Z
|
2015-07-17T14:30:08.000Z
|
dev/merge_spark_pr.py
|
newscred/spark
|
257236c3e17906098f801cbc2059e7a9054e8cab
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-06-23T21:23:30.000Z
|
2021-06-23T21:23:30.000Z
|
dev/merge_spark_pr.py
|
newscred/spark
|
257236c3e17906098f801cbc2059e7a9054e8cab
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-07-23T14:03:42.000Z
|
2020-07-23T14:03:42.000Z
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# This utility assumes you already have local a Spark git folder and that you
# have added remotes corresponding to both (i) the github apache Spark
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Spark git development area
SPARK_HOME = os.environ.get("SPARK_HOME", os.getcwd())
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/apache/spark/pull"
GITHUB_API_BASE = "https://api.github.com/repos/apache/spark"
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print "Exceeded the GitHub API rate limit; see the instructions in " + \
"dev/merge_spark_pr.py to configure an OAuth token for making authenticated " + \
"GitHub requests."
else:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print cmd
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Spark.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:" % (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("SPARK")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions = jira_fix_versions,
comment = comment, resolution = {'id': resolution.raw['id']})
print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("SPARK-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX] [MLLIB] Issue"
>>> standardize_jira_ref("[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref("[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123] [PROJECT INFRA] [WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954] [MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref("SPARK-1094 Support MiMa for reporting binary compatibility accross versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility accross versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146] [WIP] Vagrant support for Spark'
>>> standardize_jira_ref("SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref("[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250] [SPARK-6146] [SPARK-5911] [SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\] (\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ' '.join(jira_refs).strip() + " " + ' '.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def main():
global original_head
os.chdir(SPARK_HOME)
original_head = run_cmd("git rev-parse HEAD")[:8]
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print "I've re-written the title as follows to match the standard format:"
print "Original: %s" % pr["title"]
print "Modified: %s" % modified_title
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print "Using modified title:"
else:
title = pr["title"]
print "Using original title:"
print title
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print "JIRA_USERNAME and JIRA_PASSWORD not set"
print "Exiting without trying to close the associated JIRA."
else:
print "Could not find jira-python library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
main()
| 41.543046
| 128
| 0.657368
|
4a17d4d5eed5e9470ea73849fcfd33635560e147
| 693
|
py
|
Python
|
data_statistics/cambridge.py
|
splitstrument/dataset-creation
|
c3fadf58ce3cb74d2edb1832df3f01de3d709061
|
[
"MIT"
] | null | null | null |
data_statistics/cambridge.py
|
splitstrument/dataset-creation
|
c3fadf58ce3cb74d2edb1832df3f01de3d709061
|
[
"MIT"
] | null | null | null |
data_statistics/cambridge.py
|
splitstrument/dataset-creation
|
c3fadf58ce3cb74d2edb1832df3f01de3d709061
|
[
"MIT"
] | null | null | null |
import os
import group_suggester
def parse_tracks(cambridge_path, target_instruments, track_database):
track_counter = 0
for track_name in os.listdir(cambridge_path):
if not track_database.track_known(track_name):
track_folder = os.path.join(cambridge_path, track_name)
available_instruments, stems = group_suggester.get_stems(target_instruments, track_database, track_folder)
track_database.save_track({
'name': track_name,
'instruments': available_instruments,
'stems': stems
})
track_counter += 1
print('Parsed {0} Cambridge tracks'.format(track_counter))
| 31.5
| 118
| 0.666667
|
4a17d5a3aaf74aaed420b27ba3cc2ffc318396d4
| 67,065
|
py
|
Python
|
agdc/stacker.py
|
alex-ip/agdc
|
9e9eb556c33792440a3736f64cd5f628cf3a1385
|
[
"BSD-3-Clause"
] | null | null | null |
agdc/stacker.py
|
alex-ip/agdc
|
9e9eb556c33792440a3736f64cd5f628cf3a1385
|
[
"BSD-3-Clause"
] | null | null | null |
agdc/stacker.py
|
alex-ip/agdc
|
9e9eb556c33792440a3736f64cd5f628cf3a1385
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Stacker class implementation to create temporal stacks. Virtual stack_derived() function
creates un-masked temporal stacks for all available bands.
Should be subclassed for custom algorithms.
Created on 05/10/2012
@author: Alex Ip
'''
from __future__ import absolute_import
import os
import sys
import argparse
import logging
import re
from osgeo import gdal
from copy import copy
from datetime import datetime
from datetime import time
from scipy import ndimage
import numpy
from time import sleep
from eotools.execute import execute
from eotools.utils import log_multiline
from agdc import DataCube
from agdc.band_lookup import BandLookup
PQA_CONTIGUITY = 256 # contiguity = bit 8
DEFAULT_BAND_LOOKUP_SCHEME = 'LANDSAT-UNADJUSTED'
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
class Stacker(DataCube):
def parse_args(self):
"""Parse the command line arguments.
Returns:
argparse namespace object
"""
logger.debug(' Calling parse_args()')
_arg_parser = argparse.ArgumentParser('stacker')
# N.B: modtran_root is a direct overrides of config entries
# and its variable name must be prefixed with "_" to allow lookup in conf file
_arg_parser.add_argument('-C', '--config', dest='config_file',
default=os.path.join(self.agdc_root, 'agdc_default.conf'),
help='Stacker configuration file')
_arg_parser.add_argument('-d', '--debug', dest='debug',
default=False, action='store_const', const=True,
help='Debug mode flag')
_arg_parser.add_argument('-x', '--x_index', dest='x_index',
required=False, default=None,
help='x-index of tile to be stacked')
_arg_parser.add_argument('-y', '--y_index', dest='y_index',
required=False, default=None,
help='y-index of tile to be stacked')
_arg_parser.add_argument('-o', '--output', dest='output_dir',
required=False, default=1,
help='Output directory path')
_arg_parser.add_argument('-s', '--start_date', dest='start_date',
required=False, default=None,
help='Start Date in dd/mm/yyyy format')
_arg_parser.add_argument('-e', '--end_date', dest='end_date',
required=False, default=None,
help='End Date in dd/mm/yyyy format')
_arg_parser.add_argument('-a', '--satellite', dest='satellite',
required=False, default=None,
help='Short Satellite name (e.g. LS5, LS7)')
_arg_parser.add_argument('-n', '--sensor', dest='sensor',
required=False, default=None,
help='Sensor Name (e.g. TM, ETM+)')
_arg_parser.add_argument('-t', '--tile_type', dest='default_tile_type_id',
required=False, default=None,
help='Tile type ID of tiles to be stacked')
_arg_parser.add_argument('-p', '--path', dest='path',
required=False, default=None,
help='WRS path of tiles to be stacked')
_arg_parser.add_argument('-r', '--row', dest='row',
required=False, default=None,
help='WRS row of tiles to be stacked')
_arg_parser.add_argument('--refresh', dest='refresh',
default=False, action='store_const', const=True,
help='Refresh mode flag to force updating of existing files')
_arg_parser.add_argument('-of', '--out_format', dest='out_format',
required=False, default=None,
help='Specify a GDAL complient output format for the file to be physically generated. If unset, then only the VRT will be generated. Example use -of ENVI')
_arg_parser.add_argument('-sfx', '--suffix', dest='suffix',
required=False, default=None,
help='Specify an output suffix for the physically generated file. Is only applied when -of <FORMAT> is set.')
_arg_parser.add_argument('-b', '--band_lookup_scheme', dest='band_lookup_scheme',
required=False, default=DEFAULT_BAND_LOOKUP_SCHEME,
help='Specify a valid band lookup scheme name (default="%s")' % DEFAULT_BAND_LOOKUP_SCHEME)
_arg_parser.add_argument('-c', '--complete-only', dest='complete_only',
default=False, action='store_const', const=True,
help='Only return complete sets (i.e NBAR & PQ)')
_arg_parser.add_argument('-l', '--levels', dest='levels',
required=False, default=None,
help='Comma-separated list of level names which must be present for a timeslice to be included, e.g: NBAR,PQA')
args, unknown_args = _arg_parser.parse_known_args()
return args
def __init__(self, source_datacube=None, default_tile_type_id=None, config=None):
"""Constructor
Arguments:
source_datacube: Optional DataCube object whose connection and data will be shared
tile_type_id: Optional tile_type_id value (defaults to config file value)
config: Optional configuration file path. Will use command line supplied value or default if not given
"""
if source_datacube:
# Copy values from source_datacube and then override command line args
self.__dict__ = copy(source_datacube.__dict__)
args = self.parse_args()
# Set instance attributes for every value in command line arguments file
for attribute_name in args.__dict__.keys():
attribute_value = args.__dict__[attribute_name]
self.__setattr__(attribute_name, attribute_value)
else:
DataCube.__init__(self, config) # Call inherited constructor
if self.debug:
console_handler.setLevel(logging.DEBUG)
# Attempt to parse dates from command line arguments or config file
self.default_tile_type_id = default_tile_type_id or int(self.default_tile_type_id)
try:
self.start_date = datetime.strptime(self.start_date, '%Y%m%d').date()
except:
try:
self.start_date = datetime.strptime(self.start_date, '%d/%m/%Y').date()
except:
self.start_date = None
try:
self.end_date = datetime.strptime(self.end_date, '%Y%m%d').date()
except:
try:
self.end_date = datetime.strptime(self.end_date, '%d/%m/%Y').date()
except:
self.end_date= None
try:
self.x_index = int(self.x_index)
except:
self.x_index = None
try:
self.y_index = int(self.y_index)
except:
self.y_index = None
# Path/Row values to permit single-scene stacking
try:
self.path = int(self.path)
except:
self.path = None
try:
self.row = int(self.row)
except:
self.row = None
# Other variables set from config file only - not used
try:
self.min_path = int(self.min_path)
except:
self.min_path = None
try:
self.max_path = int(self.max_path)
except:
self.max_path = None
try:
self.min_row = int(self.min_row)
except:
self.min_row = None
try:
self.max_row = int(self.max_row)
except:
self.max_row = None
# Convert comma-separated strings into list
if self.levels:
self.levels = self.levels.split(',')
# Create nested dict for given lookup_scheme_name with levels keyed by:
# tile_type_id, satellite_tag, sensor_name, level_name, band_tag
band_lookup = BandLookup(self) # Don't bother initialising it - we only want the lookup dict
self.band_lookup_dict = band_lookup.band_lookup_dict[self.band_lookup_scheme]
def stack_files(self, timeslice_info_list, stack_dataset_path, band1_vrt_path=None, overwrite=False):
if os.path.exists(stack_dataset_path) and not overwrite:
logger.debug('Stack VRT file %s already exists', stack_dataset_path)
return
band_no = timeslice_info_list[0]['tile_layer'] # Should all be the same
build_vrt = True
if band_no == 1: # First band
intermediate_path = stack_dataset_path # No post-processing required
elif band1_vrt_path: # band1_vrt_path provided - use this as source for new VRT
intermediate_path = band1_vrt_path
build_vrt = False
else: # No band1_vrt_path provided
intermediate_path = re.sub('\.vrt$', '.tmp', stack_dataset_path)
file_list_path = re.sub('\.vrt$', '.txt', stack_dataset_path)
if build_vrt:
logger.info('Creating %d layer stack VRT file %s', len(timeslice_info_list), stack_dataset_path)
list_file = open(file_list_path, 'w')
list_file.write('\n'.join([timeslice_info['tile_pathname'] for timeslice_info in timeslice_info_list]))
list_file.close()
del list_file
command_string = 'gdalbuildvrt'
if not self.debug:
command_string += ' -q'
# command_string += ' -separate -overwrite %s \\\n%s' % (
# intermediate_path,
# ' \\\n'.join([timeslice_info['tile_pathname'] for timeslice_info in timeslice_info_list])
# )
command_string += ' -separate -input_file_list %s -overwrite %s' % (
file_list_path,
intermediate_path
)
if not self.debug:
command_string += '\nrm %s' % file_list_path
else:
command_string = ''
if band_no > 1: # Need to post process intermediate VRT file
if command_string:
command_string += '\n'
command_string += 'cat %s | sed s/\<SourceBand\>1\<\\\\/SourceBand\>/\<SourceBand\>%d\<\\\\/SourceBand\>/g > %s' % (intermediate_path, band_no, stack_dataset_path)
# command_string += '\nchmod 777 %s' % stack_dataset_path
if build_vrt: # Intermediate file created for band > 1
if not self.debug: # Remove temporary intermediate file just created
command_string += '\nrm %s' % intermediate_path
else:
logger.info('Creating %d layer stack VRT file %s from %s', len(timeslice_info_list), stack_dataset_path, intermediate_path)
logger.debug('command_string = %s', command_string)
result = execute(command_string=command_string)
if result['stdout']:
log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\t')
if result['stderr']:
log_multiline(logger.debug, result['stderr'], 'stderr from ' + command_string, '\t')
if result['returncode']:
raise Exception('%s failed', command_string)
temporal_stack_dataset = gdal.Open(stack_dataset_path)
assert temporal_stack_dataset, 'Unable to open VRT %s' % stack_dataset_path
for band_index in range(len(timeslice_info_list)):
band = temporal_stack_dataset.GetRasterBand(band_index + 1)
# Copy dict and convert to strings for metadata
metadata_dict = dict(timeslice_info_list[band_index])
for key in metadata_dict.keys():
metadata_dict[key] = str(metadata_dict[key])
band.SetMetadata(metadata_dict)
log_multiline(logger.debug, band.GetMetadata(), 'band.GetMetadata()', '\t')
# Need to set nodata values for each band - can't seem to do it in gdalbuildvrt
nodata_value = timeslice_info_list[band_index]['nodata_value']
if nodata_value is not None:
logger.debug('nodata_value = %s', nodata_value)
band.SetNoDataValue(nodata_value)
temporal_stack_dataset.FlushCache()
def stack_tile(self, x_index, y_index, stack_output_dir=None,
start_datetime=None, end_datetime=None,
satellite=None, sensor=None,
tile_type_id=None,
path=None,
row=None,
create_band_stacks=True,
disregard_incomplete_data=False,
levels=[]
):
"""
Function which returns a data structure and optionally creates band-wise VRT dataset stacks
Arguments:
x_index, y_index: Integer indices of tile to stack
stack_output_dir: String defining output directory for band stacks
(not used if create_band_stacks == False)
start_datetime, end_datetime: Optional datetime objects delineating temporal range
satellite, sensor: Optional satellite and sensor string parameters to filter result set
tile_type_id: Integer value of tile_type_id to search
path: WRS path of source scenes
row: WRS row of source scenes
create_band_stacks: Boolean flag indicating whether band stack VRT files should be produced
disregard_incomplete_data: Boolean flag indicating whether to constrain results to tiles with
complete L1T, NBAR and PQA data. This ensures identical numbers of stack layers but
introduces a hard-coded constraint around processing levels.
"""
assert stack_output_dir or not create_band_stacks, 'Output directory must be supplied for temporal stack generation'
tile_type_id = tile_type_id or self.default_tile_type_id
#
# stack_tile local functions
#
#===============================================================================
# def cache_mosaic_files(mosaic_file_list, mosaic_dataset_path, overwrite=False, pqa_data=False):
# logger.debug('cache_mosaic_files(mosaic_file_list=%s, mosaic_dataset_path=%s, overwrite=%s, pqa_data=%s) called', mosaic_file_list, mosaic_dataset_path, overwrite, pqa_data)
#
# if pqa_data: # Need to handle PQA datasets manually and produce a real output file
# tile_type_info = self.tile_type_dict[tile_type_id]
#
# # Change the output file extension to match the source (This is a bit ugly)
# mosaic_dataset_path = re.sub('\.\w+$',
# tile_type_info['file_extension'],
# mosaic_dataset_path)
#
# if os.path.exists(mosaic_dataset_path) and not overwrite:
# logger.debug('Mosaic file %s already exists', mosaic_dataset_path)
# return mosaic_dataset_path
#
# logger.info('Creating PQA mosaic file %s', mosaic_dataset_path)
#
# #MPH commented this out since we are working with a file path rather than a database connection
# #assert self.lock_object(mosaic_dataset_path), 'Unable to acquire lock for %s' % mosaic_dataset_path
#
# template_dataset = gdal.Open(mosaic_file_list[0])
#
# gdal_driver = gdal.GetDriverByName(tile_type_info['file_format'])
#
# #Set datatype formats appropriate to Create() and numpy
# gdal_dtype = template_dataset.GetRasterBand(1).DataType
# numpy_dtype = gdal.GetDataTypeName(gdal_dtype)
#
# mosaic_dataset = gdal_driver.Create(mosaic_dataset_path,
# template_dataset.RasterXSize, template_dataset.RasterYSize,
# 1, gdal_dtype,
# tile_type_info['format_options'].split(','),
# )
# assert mosaic_dataset, 'Unable to open output dataset %s'% output_dataset
#
# mosaic_dataset.SetGeoTransform(template_dataset.GetGeoTransform())
# mosaic_dataset.SetProjection(template_dataset.GetProjection())
#
# # if tile_type_info['file_format'] == 'netCDF':
# # pass #TODO: make vrt here - not really needed for single-layer file
#
# output_band = mosaic_dataset.GetRasterBand(1)
# # Set all background values of data_array to FFFF (i.e. all ones)
# data_array=numpy.ones(shape=(template_dataset.RasterYSize, template_dataset.RasterXSize),dtype=numpy_dtype) * -1
# # Set all background values of no_data_array to 0 (i.e. all zeroes)
# no_data_array=numpy.zeros(shape=(template_dataset.RasterYSize, template_dataset.RasterXSize),dtype=numpy_dtype)
#
# overall_data_mask = numpy.zeros((mosaic_dataset.RasterYSize,
# mosaic_dataset.RasterXSize),
# dtype=numpy.bool)
# del template_dataset
#
# # Populate data_array with -masked PQA data
# for pqa_dataset_index in range(len(mosaic_file_list)):
# pqa_dataset_path = mosaic_file_list[pqa_dataset_index]
# pqa_dataset = gdal.Open(pqa_dataset_path)
# assert pqa_dataset, 'Unable to open %s' % pqa_dataset_path
# pqa_array = pqa_dataset.ReadAsArray()
# del pqa_dataset
# logger.debug('Opened %s', pqa_dataset_path)
#
# # Treat contiguous and non-contiguous pixels separately
# # Set all contiguous pixels to true in data_mask
# pqa_data_mask = (pqa_array & PQA_CONTIGUITY).astype(numpy.bool)
# # Expand overall_data_mask to true for any contiguous pixels
# overall_data_mask = overall_data_mask | pqa_data_mask
# # Perform bitwise-and on contiguous pixels in data_array
# data_array[pqa_data_mask] &= pqa_array[pqa_data_mask]
# # Perform bitwise-or on non-contiguous pixels in no_data_array
# no_data_array[~pqa_data_mask] |= pqa_array[~pqa_data_mask]
#
# log_multiline(logger.debug, pqa_array, 'pqa_array', '\t')
# log_multiline(logger.debug, pqa_data_mask, 'pqa_data_mask', '\t')
# log_multiline(logger.debug, overall_data_mask, 'overall_data_mask', '\t')
# log_multiline(logger.debug, data_array, 'data_array', '\t')
# log_multiline(logger.debug, no_data_array, 'no_data_array', '\t')
#
# # Set all pixels which don't contain data to combined no-data values (should be same as original no-data values)
# data_array[~overall_data_mask] = no_data_array[~overall_data_mask]
#
# log_multiline(logger.debug, data_array, 'FINAL data_array', '\t')
#
# output_band.WriteArray(data_array)
# mosaic_dataset.FlushCache()
#
# else: # Anything other than PQA
# if os.path.exists(mosaic_dataset_path) and not overwrite:
# logger.debug('Mosaic VRT file %s already exists', mosaic_dataset_path)
# return mosaic_dataset_path
#
# logger.info('Creating mosaic VRT file %s', mosaic_dataset_path)
# assert self.lock_object(mosaic_dataset_path), 'Unable to acquire lock for %s' % mosaic_dataset_path
#
# command_string = 'gdalbuildvrt'
# if not self.debug:
# command_string += ' -q'
# command_string += ' -overwrite %s \\\n%s' % (
# mosaic_dataset_path,
# ' \\\n'.join(mosaic_file_list)
# )
# command_string += '\nchmod 777 %s' % mosaic_dataset_path
#
# logger.debug('command_string = %s', command_string)
#
# result = execute(command_string=command_string)
#
# if result['stdout']:
# log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\t')
#
# if result['stderr']:
# log_multiline(logger.debug, result['stderr'], 'stderr from ' + command_string, '\t')
#
# if result['returncode']:
# raise Exception('%s failed', command_string)
#
# # Check for corrupted file and remove it
# try:
# assert gdal.Open(mosaic_dataset_path), 'Unable to open mosaic dataset %s. Attempting to remove it.' % mosaic_dataset_path
# except:
# self.remove(mosaic_dataset_path)
# raise
#
# self.unlock_object(mosaic_dataset_path)
#
# return mosaic_dataset_path # Return potentially modified filename
#
# def create_mosaic_dir(mosaic_dir):
# command_string = 'mkdir -p %s' % mosaic_dir
# command_string += '\nchmod 777 %s' % mosaic_dir
#
# logger.debug('command_string = %s', command_string)
#
# result = execute(command_string=command_string)
#
# if result['stdout']:
# log_multiline(logger.debug, result['stdout'], 'stdout from ' + command_string, '\t')
#
# if result['returncode']:
# log_multiline(logger.error, result['stderr'], 'stderr from ' + command_string, '\t')
# raise Exception('%s failed', command_string)
#
#
# def record_timeslice_information(timeslice_info, mosaic_file_list, stack_dict):
#
# if len(mosaic_file_list) > 1: # Mosaic required - cache under tile directory
# mosaic_dir = os.path.join(os.path.dirname(timeslice_info['tile_pathname']),
# 'mosaic_cache')
# if not os.path.isdir(mosaic_dir):
# create_mosaic_dir(mosaic_dir)
#
# timeslice_info['tile_pathname'] = os.path.join(
# mosaic_dir,
# re.sub(r'\.\w+$', '.vrt', os.path.basename(timeslice_info['tile_pathname']))
# )
#
# # N.B: cache_mosaic_files function may modify filename
# timeslice_info['tile_pathname'] = \
# cache_mosaic_files(mosaic_file_list, timeslice_info['tile_pathname'],
# overwrite=self.refresh, pqa_data=(timeslice_info['level_name'] == 'PQA'))
#
# stack_dict[timeslice_info['start_datetime']] = timeslice_info
#===============================================================================
#
# stack_tile method body
#
db_cursor2 = self.db_connection.cursor()
# Compose tuples from single values (TEMPORARY ONLY)
#TODO: Change stack_tile parameters to allow multi-value tuples
tile_type_ids_tuple = (tile_type_id,) if tile_type_id is not None else None
tile_indices_tuple = ((x_index, y_index),) if x_index is not None and y_index is not None else None
satellites_tuple = (satellite,) if satellite is not None else None
sensors_tuple = (sensor,) if sensor is not None else None
paths_tuple = (path,) if path is not None else None
rows_tuple = (row,) if row is not None else None
params = {'tile_type_ids': tile_type_ids_tuple,
'tile_indices': tile_indices_tuple,
'satellites': satellites_tuple,
'sensors': sensors_tuple,
'x_refs': paths_tuple,
'y_refs': rows_tuple,
'start_datetime': start_datetime,
'end_datetime': end_datetime
}
log_multiline(logger.debug, params, 'params', '\t')
sql = """-- Retrieve all tile details for specified tile range
select
tile_type_id,
x_index,
y_index,
start_datetime,
end_datetime,
satellite_tag,
sensor_name,
tile_pathname,
x_ref as path,
y_ref as start_row,
case when tile_class_id = 4 then y_ref + 1 else y_ref end as end_row, -- This will not work for mosaics with >2 source tiles
level_name,
nodata_value,
gcp_count,
cloud_cover
from acquisition
join dataset using(acquisition_id)
join tile using(dataset_id)
join satellite using(satellite_id)
join sensor using(satellite_id, sensor_id)
join processing_level using(level_id)
where (tile_class_id = 1 or tile_class_id = 4) -- Only good non-overlapped and mosaic tiles"""
if params['tile_type_ids']:
sql += """
and tile_type_id in %(tile_type_ids)s"""
if params['tile_indices']:
sql += """
and (x_index, y_index) in %(tile_indices)s"""
if params['satellites']:
sql += """
and satellite_tag in %(satellites)s"""
if params['sensors']:
sql += """
and sensor_name in %(sensors)s"""
if params['x_refs']:
sql += """
and x_ref in %(x_refs)s"""
if params['y_refs']:
sql += """
and y_ref in %(y_refs)s"""
sql += """
and (%(start_datetime)s is null or start_datetime >= %(start_datetime)s)
and (%(end_datetime)s is null or end_datetime < %(end_datetime)s)
order by
tile_type_id,
x_index,
y_index,
start_datetime,
end_datetime,
level_name,
satellite_tag,
sensor_name;
"""
log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\t')
db_cursor2.execute(sql, params)
stack_info_dict = {}
for record in db_cursor2:
assert record, 'No data found for this tile and temporal range'
tile_info = {'tile_type_id': record[0],
'x_index': record[1],
'y_index': record[2],
'start_datetime': record[3],
'end_datetime': record[4],
'satellite_tag': record[5],
'sensor_name': record[6],
'tile_pathname': record[7],
'path': record[8],
'start_row': record[9],
'end_row': record[10], # Copy of row field
'level_name': record[11],
'nodata_value': record[12],
'gcp_count': record[13],
'cloud_cover': record[14]
}
# log_multiline(logger.debug, band_tile_info, 'band_tile_info', '\t')
assert os.path.exists(tile_info['tile_pathname']), 'File for tile %s does not exist' % tile_info['tile_pathname']
# Create nested dict keyed by start_datetime and level_name
timeslice_dict = stack_info_dict.get(tile_info['start_datetime']) or {}
if not timeslice_dict:
stack_info_dict[tile_info['start_datetime']] = timeslice_dict
level_dict = timeslice_dict.get(tile_info['level_name']) or {}
if not level_dict:
level_dict = tile_info
timeslice_dict[tile_info['level_name']] = level_dict
#===================================================================
# # If this tile is NOT a continuation of the last one
# if (not last_band_tile_info # First tile
# or (band_tile_info['band_tag'] != last_band_tile_info['band_tag'])
# or (band_tile_info['satellite_tag'] != last_band_tile_info['satellite_tag'])
# or (band_tile_info['sensor_name'] != last_band_tile_info['sensor_name'])
# or (band_tile_info['path'] != last_band_tile_info['path'])
# or ((band_tile_info['start_datetime'] - last_band_tile_info['end_datetime']) > timedelta(0, 3600)) # time difference > 1hr
# ):
# # Record timeslice information for previous timeslice if it exists
# if timeslice_info:
# record_timeslice_information(timeslice_info, mosaic_file_list, stack_dict)
#
# # Start recording a new band if necessary
# if (not last_band_tile_info or (band_tile_info['band_tag'] != last_band_tile_info['band_tag'])):
# stack_dict = {}
# level_dict = band_stack_dict.get(band_tile_info['level_name']) or {}
# if not level_dict:
# band_stack_dict[band_tile_info['level_name']] = level_dict
#
# level_dict[band_tile_info['band_tag']] = stack_dict
#
# # Start a new timeslice
# mosaic_file_list = [band_tile_info['tile_pathname']]
# timeslice_info = band_tile_info
# else: # Tile IS a continuation of the last one - same timeslice
# mosaic_file_list.append(band_tile_info['tile_pathname'])
# timeslice_info['end_datetime'] = band_tile_info['end_datetime']
# timeslice_info['end_row'] = band_tile_info['end_row']
#
# last_band_tile_info = band_tile_info
#===================================================================
#=======================================================================
# # Check for no results, otherwise record the last timeslice
# if not timeslice_info:
# return {}
# else:
# record_timeslice_information(timeslice_info, mosaic_file_list, stack_dict)
#
# log_multiline(logger.debug, band_stack_dict, 'band_stack_dict', '\t')
#=======================================================================
log_multiline(logger.debug, stack_info_dict, 'stack_info_dict', '\t')
logger.debug('stack_info_dict has %s timeslices', len(stack_info_dict))
if disregard_incomplete_data:
stack_info_dict = {start_datetime: stack_info_dict[start_datetime]
for start_datetime in stack_info_dict.keys()
# if {'L1T', 'ORTHO'} & set(stack_info_dict[start_datetime].keys()) # Either L1T or ORTHO
# and {'NBAR','PQA'} <= set(stack_info_dict[start_datetime].keys()) # Both NBAR & PQA
if {'NBAR','PQA'} <= set(stack_info_dict[start_datetime].keys()) # Both NBAR & PQA
}
logger.debug('stack_info_dict has %s timeslices after removal of incomplete datasets', len(stack_info_dict))
if levels:
stack_info_dict = {start_datetime: stack_info_dict[start_datetime]
for start_datetime in stack_info_dict.keys()
if set(levels) <= set(stack_info_dict[start_datetime].keys()) # All specified levels exist
}
if (stack_output_dir):
self.create_directory(stack_output_dir)
if create_band_stacks:
band_stack_dict = {}
for start_datetime in sorted(stack_info_dict.keys()):
logger.debug('start_datetime = %s', start_datetime)
timeslice_dict = stack_info_dict[start_datetime]
log_multiline(logger.debug, timeslice_dict, 'timeslice_dict', '\t')
# Use any processing level to obtain lookup values - All levels should all have same values
tile_info = timeslice_dict.values()[0]
# self.band_lookup_dict is keyed by tile_type_id, satellite_tag, sensor_name, level_name, band_tag
log_multiline(logger.debug, self.band_lookup_dict, 'self.band_lookup_dict', '\t')
band_lookup_dict = (self.band_lookup_dict[tile_info['tile_type_id']]
[tile_info['satellite_tag']]
[tile_info['sensor_name']]
)
log_multiline(logger.debug, band_lookup_dict, 'band_lookup_dict', '\t')
# Combine derived bands with lookup-sourced band info - this is a bit ugly
derived_band_dict = {key[1]: self.bands[tile_info['tile_type_id']][key] for key in self.bands[tile_info['tile_type_id']].keys() if key[0] == 'DERIVED'}
log_multiline(logger.debug, derived_band_dict, 'derived_band_dict', '\t')
derived_band_dict = {level_name: {value['band_tag']: value for value in derived_band_dict[level_name].values()}
for level_name in derived_band_dict.keys()}
log_multiline(logger.debug, derived_band_dict, 'modified derived_band_dict', '\t')
band_lookup_dict.update(derived_band_dict)
log_multiline(logger.debug, band_lookup_dict, 'modified band_lookup_dict', '\t')
# Iterate through the available processing levels
for level_name in sorted(timeslice_dict.keys()): # Sorting is not really necessary
logger.debug('level_name = %s', level_name)
level_band_dict = band_lookup_dict.get(level_name)
log_multiline(logger.debug, level_band_dict, 'level_band_dict', '\t')
if not level_band_dict: # Don't process this level if there are no bands to be processed
continue
tile_info_dict = timeslice_dict[level_name]
# Iterate through all bands for this processing level
for band_tag in level_band_dict:
# Combine tile and band info into one dict
band_tile_info = {start_datetime: dict(tile_info_dict)}
band_tile_info[start_datetime].update(level_band_dict[band_tag])
# log_multiline(logger.debug, band_tile_info, 'band_tile_info for %s' % band_info['band_tag'], '\t')
band_tile_dict = band_stack_dict.get((tile_info_dict['tile_type_id'],
tile_info_dict['x_index'],
tile_info_dict['y_index'],
level_name,
band_tag))
if not band_tile_dict: # No entry found for this level_name & band_tag
# Create the first entry
band_stack_dict[(tile_info_dict['tile_type_id'],
tile_info_dict['x_index'],
tile_info_dict['y_index'],
level_name,
band_tag)
] = band_tile_info
else:
band_tile_dict.update(band_tile_info)
log_multiline(logger.debug, band_stack_dict, 'band_stack_dict', '\t')
# Create VRT files
#TODO: Make this cater for multiple tile types
for tile_type_id, x_index, y_index, level_name, band_tag in band_stack_dict.keys(): # Every stack file
file_stack_dict = band_stack_dict[(tile_type_id, x_index, y_index, level_name, band_tag)]
stack_filename = os.path.join(stack_output_dir,
'_'.join((level_name,
re.sub('\+', '', '%+04d_%+04d' % (x_index, y_index)),
band_tag)) + '.vrt')
logger.debug('stack_filename = %s', stack_filename)
# Open the first tile as the template dataset
template_dataset = gdal.Open(file_stack_dict.values()[0]['tile_pathname'])
raster_size = {'x': template_dataset.RasterXSize, 'y': template_dataset.RasterYSize}
block_size = dict(zip(['x','y'], template_dataset.GetRasterBand(1).GetBlockSize()))
gdal_driver = gdal.GetDriverByName("VRT")
#Set datatype formats appropriate to Create() and numpy
gdal_dtype = template_dataset.GetRasterBand(1).DataType
dtype_name = gdal.GetDataTypeName(gdal_dtype)
vrt_dataset = gdal_driver.Create(stack_filename,
raster_size['x'],
raster_size['y'],
0)
vrt_dataset.SetGeoTransform(template_dataset.GetGeoTransform())
vrt_dataset.SetProjection(template_dataset.GetProjection())
del template_dataset # All values read - not needed any more
for start_datetime in sorted(file_stack_dict.keys()):
tile_info = file_stack_dict[start_datetime]
vrt_dataset.AddBand(gdal_dtype)
output_band = vrt_dataset.GetRasterBand(vrt_dataset.RasterCount)
complex_source = '<ComplexSource>' + \
'<SourceFilename relativeToVRT="0">%s</SourceFilename>' % tile_info['tile_pathname'] + \
'<SourceBand>%i</SourceBand>' % tile_info['tile_layer'] + \
'<SourceProperties RasterXSize="%i" RasterYSize="%i" DataType="%s" BlockXSize="%i" BlockYSize="%i"/>' % (raster_size['x'], raster_size['y'],
dtype_name, block_size['x'],
block_size['y']) + \
'<SrcRect xOff="%i" yOff="%i" xSize="%i" ySize="%i"/>' % (0, 0, raster_size['x'], raster_size['y']) + \
'<DstRect xOff="%i" yOff="%i" xSize="%i" ySize="%i"/>' % (0, 0, raster_size['x'], raster_size['y']) + \
('<NODATA>%d</NODATA>' % tile_info['nodata_value'] if tile_info['nodata_value'] is not None else "") + \
'</ComplexSource>'
log_multiline(logger.debug, complex_source, 'complex_source', '\t')
output_band.SetMetadataItem("source_0", complex_source, "new_vrt_sources")
output_band.SetMetadata({key: str(tile_info[key]) for key in tile_info.keys()})
# No data value needs to be set separately
if tile_info['nodata_value'] is not None:
output_band.SetNoDataValue(tile_info['nodata_value'])
return stack_info_dict
def get_pqa_mask(self, pqa_dataset_path, good_pixel_masks=[32767,16383,2457], dilation=3):
pqa_gdal_dataset = gdal.Open(pqa_dataset_path)
assert pqa_gdal_dataset, 'Unable to open PQA GeoTIFF file %s' % pqa_dataset_path
pqa_array = pqa_gdal_dataset.GetRasterBand(1).ReadAsArray()
del pqa_gdal_dataset
log_multiline(logger.debug, pqa_array, 'pqa_array', '\t')
# Ignore bit 6 (saturation for band 62) - always 0 for Landsat 5
pqa_array = pqa_array | 64
# logger.debug('pqa_array = %s', pqa_array)
# Dilating both the cloud and cloud shadow masks
s = [[1,1,1],[1,1,1],[1,1,1]]
acca = (pqa_array & 1024) >> 10
erode = ndimage.binary_erosion(acca, s, iterations=dilation, border_value=1)
dif = erode - acca
dif[dif < 0] = 1
pqa_array += (dif << 10)
del acca
fmask = (pqa_array & 2048) >> 11
erode = ndimage.binary_erosion(fmask, s, iterations=dilation, border_value=1)
dif = erode - fmask
dif[dif < 0] = 1
pqa_array += (dif << 11)
del fmask
acca_shad = (pqa_array & 4096) >> 12
erode = ndimage.binary_erosion(acca_shad, s, iterations=dilation, border_value=1)
dif = erode - acca_shad
dif[dif < 0] = 1
pqa_array += (dif << 12)
del acca_shad
fmask_shad = (pqa_array & 8192) >> 13
erode = ndimage.binary_erosion(fmask_shad, s, iterations=dilation, border_value=1)
dif = erode - fmask_shad
dif[dif < 0] = 1
pqa_array += (dif << 13)
#=======================================================================
# pqa_mask = ma.getmask(ma.masked_equal(pqa_array, int(good_pixel_masks[0])))
# for good_pixel_mask in good_pixel_masks[1:]:
# pqa_mask = ma.mask_or(pqa_mask, ma.getmask(ma.masked_equal(pqa_array, int(good_pixel_mask))))
#=======================================================================
pqa_mask = numpy.zeros(pqa_array.shape, dtype=numpy.bool)
for good_pixel_mask in good_pixel_masks:
pqa_mask[pqa_array == good_pixel_mask] = True
return pqa_mask
def apply_pqa_mask(self, data_array, pqa_mask, no_data_value):
assert len(data_array.shape) == 2, 'apply_pqa_mask can only be applied to 2D arrays'
assert data_array.shape == pqa_mask.shape, 'Mis-matched data_array and pqa_mask'
data_array[~pqa_mask] = no_data_value
def get_static_info(self, level_name=None, x_index=None, y_index=None, tile_type_id=None):
"""Retrieve static (i.e. not time varying) data for specified processing level(s) (e.g. 'DSM')"""
x_index = x_index or self.x_index
y_index = y_index or self.y_index
tile_type_id = tile_type_id or self.default_tile_type_id
db_cursor2 = self.db_connection.cursor()
sql = """-- Retrieve all tile details for static data
select distinct
level_name,
dataset_path,
tile_pathname
from dataset
inner join processing_level using(level_id)
inner join tile t using (dataset_id)
inner join tile_footprint tf using (x_index, y_index, tile_type_id)
where tile_type_id = %(tile_type_id)s
and tile_class_id = 1 -- Select only valid tiles
and (%(level_name)s is null or level_name = %(level_name)s)
and x_index = %(x_index)s
and y_index = %(y_index)s
and acquisition_id is null -- No acquisition for static data
order by
level_name,
dataset_path;
"""
params = {'level_name': level_name,
'x_index': x_index,
'y_index': y_index,
'tile_type_id': tile_type_id,
}
log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\t')
db_cursor2.execute(sql, params)
static_info_dict = {}
last_level_name = ''
for record in db_cursor2:
static_data = {'level_name': record[0],
'dataset_path': record[1],
'tile_pathname': record[2]
}
assert static_data['level_name'] != last_level_name, 'Duplicate data source found for level %s' % static_data['level_name']
band_info = self.bands[tile_type_id].get(('DERIVED', static_data['level_name']))
static_info_dict[static_data['level_name']] = {
'level_name': static_data['level_name'],
'nodata_value': band_info.values()[0]['nodata_value'], # All values the same for the one level
'tile_pathname': static_data['tile_pathname'],
'x_index': x_index,
'y_index': y_index
#====================
# 'band_name': None,
# 'band_tag': None,
# 'end_datetime': None,
# 'end_row': None,
# 'path': None,
# 'satellite_tag': None,
# 'sensor_name': None,
# 'start_datetime': None,
# 'start_row': None,
# 'tile_layer': 1,
# 'gcp_count': None,
# 'cloud_cover': None
#====================
}
return static_info_dict
def stack_derived(self, x_index, y_index, stack_output_dir,
start_datetime=None, end_datetime=None,
satellite=None, sensor=None,
tile_type_id=None,
create_stacks=True):
tile_type_id = tile_type_id or self.default_tile_type_id
tile_type_info = self.tile_type_dict[tile_type_id]
stack_output_info = {'x_index': x_index,
'y_index': y_index,
'stack_output_dir': stack_output_dir,
'start_datetime': start_datetime,
'end_datetime': end_datetime,
'satellite': satellite,
'sensor': sensor}
# Create intermediate mosaics and return dict with stack info
stack_info_dict = self.stack_tile(x_index=x_index,
y_index=y_index,
stack_output_dir=stack_output_dir,
start_datetime=start_datetime,
end_datetime=end_datetime,
satellite=satellite,
sensor=sensor,
tile_type_id=None,
create_band_stacks=False,
disregard_incomplete_data=False)
# Create intermediate mosaics and return dict with stack info
logger.debug('self.stack_tile(x_index=%s, y_index=%s, stack_output_dir=%s, start_datetime=%s, end_datetime=%s, satellite=%s, sensor=%s, tile_type_id=%s, create_band_stacks=%s, disregard_incomplete_data=%s) called',
x_index, y_index,
stack_output_dir,
start_datetime,
end_datetime,
satellite,
sensor,
None,
False,
False)
log_multiline(logger.debug, stack_info_dict, 'stack_info_dict', '\t')
static_info_dict = self.get_static_info(level_name=None, x_index=x_index, y_index=y_index) # Get info for all static data
log_multiline(logger.debug, static_info_dict, 'static_info_dict', '\t')
# Find all datetimes
start_datetimes = sorted(stack_info_dict.keys())
# Iterate through sorted start_datetimes
derived_stack_dict = {}
for start_datetime in start_datetimes:
# Create input_dataset_dict dict for deriver_function
input_dataset_dict = dict(stack_info_dict[start_datetime])
input_dataset_dict.update(static_info_dict) # Add static data to dict passed to function
# Create derived datasets and receive name(s) of timeslice file(s) keyed by stack file name(s)
output_dataset_info = self.derive_datasets(input_dataset_dict, stack_output_info, tile_type_info)
if output_dataset_info is not None:
for output_stack_path in output_dataset_info:
# Create a new list for each stack if it doesn't already exist
stack_list = derived_stack_dict.get(output_stack_path, [])
if not stack_list:
derived_stack_dict[output_stack_path] = stack_list
stack_list.append(output_dataset_info[output_stack_path])
log_multiline(logger.debug, derived_stack_dict, 'derived_stack_dict', '\t')
# Individual tile processing is finished. now build stack(s)
if create_stacks:
for output_stack_path in sorted(derived_stack_dict.keys()):
if os.path.exists(output_stack_path) and not self.refresh:
logger.info('Skipped existing stack file %s', output_stack_path)
continue
if (self.lock_object(output_stack_path)):
logger.debug('Creating temporal stack %s', output_stack_path)
self.stack_files(timeslice_info_list=derived_stack_dict[output_stack_path],
stack_dataset_path=output_stack_path,
band1_vrt_path=None, overwrite=True)
self.unlock_object(output_stack_path)
logger.info('VRT stack file %s created', output_stack_path)
return derived_stack_dict
def derive_datasets(self, input_dataset_dict, stack_output_info, tile_type_info):
""" Abstract function for calling in stack_derived() function. Should be overridden
in a descendant class.
Arguments:
input_dataset_dict: Dict keyed by processing level (e.g. ORTHO, NBAR, PQA, DEM)
containing all tile info which can be used within the function
A sample is shown below (including superfluous band-specific information):
{
'NBAR': {'band_name': 'Visible Blue',
'band_tag': 'B10',
'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217),
'end_row': 77,
'level_name': 'NBAR',
'nodata_value': -999L,
'path': 91,
'satellite_tag': 'LS7',
'sensor_name': 'ETM+',
'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217),
'start_row': 77,
'tile_layer': 1,
'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_NBAR_150_-025_2000-02-09T23-46-12.722217.tif',
'x_index': 150,
'y_index': -25},
'ORTHO': {'band_name': 'Thermal Infrared (Low Gain)',
'band_tag': 'B61',
'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217),
'end_row': 77,
'level_name': 'ORTHO',
'nodata_value': 0L,
'path': 91,
'satellite_tag': 'LS7',
'sensor_name': 'ETM+',
'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217),
'start_row': 77,
'tile_layer': 1,
'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_ORTHO_150_-025_2000-02-09T23-46-12.722217.tif',
'x_index': 150,
'y_index': -25},
'PQA': {'band_name': 'Pixel Quality Assurance',
'band_tag': 'PQA',
'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217),
'end_row': 77,
'level_name': 'PQA',
'nodata_value': None,
'path': 91,
'satellite_tag': 'LS7',
'sensor_name': 'ETM+',
'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217),
'start_row': 77,
'tile_layer': 1,
'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_PQA_150_-025_2000-02-09T23-46-12.722217.tif,
'x_index': 150,
'y_index': -25}
}
Arguments (Cont'd):
stack_output_info: dict containing stack output information.
Obtained from stacker object.
A sample is shown below
stack_output_info = {'x_index': 144,
'y_index': -36,
'stack_output_dir': '/g/data/v10/tmp/ndvi',
'start_datetime': None, # Datetime object or None
'end_datetime': None, # Datetime object or None
'satellite': None, # String or None
'sensor': None} # String or None
Arguments (Cont'd):
tile_type_info: dict containing tile type information.
Obtained from stacker object (e.g: stacker.tile_type_dict[tile_type_id]).
A sample is shown below
{'crs': 'EPSG:4326',
'file_extension': '.tif',
'file_format': 'GTiff',
'format_options': 'COMPRESS=LZW,BIGTIFF=YES',
'tile_directory': 'EPSG4326_1deg_0.00025pixel',
'tile_type_id': 1L,
'tile_type_name': 'Unprojected WGS84 1-degree at 4000 pixels/degree',
'unit': 'degree',
'x_origin': 0.0,
'x_pixel_size': Decimal('0.00025000000000000000'),
'x_pixels': 4000L,
'x_size': 1.0,
'y_origin': 0.0,
'y_pixel_size': Decimal('0.00025000000000000000'),
'y_pixels': 4000L,
'y_size': 1.0}
Function must create one or more GDAL-supported output datasets. Useful functions in the
Stacker class include Stacker.get_pqa_mask(), but it is left to the coder to produce exactly
what is required for a single slice of the temporal stack of derived quantities.
Returns:
output_dataset_info: Dict keyed by stack filename
containing metadata info for GDAL-supported output datasets created by this function.
Note that the key(s) will be used as the output filename for the VRT temporal stack
and each dataset created must contain only a single band. An example is as follows:
{'/g/data/v10/tmp/ndvi/NDVI_stack_150_-025.vrt':
{'band_name': 'Normalised Differential Vegetation Index with PQA applied',
'band_tag': 'NDVI',
'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217),
'end_row': 77,
'level_name': 'NDVI',
'nodata_value': None,
'path': 91,
'satellite_tag': 'LS7',
'sensor_name': 'ETM+',
'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217),
'start_row': 77,
'tile_layer': 1,
'tile_pathname': '/g/data/v10/tmp/ndvi/LS7_ETM_NDVI_150_-025_2000-02-09T23-46-12.722217.tif',
'x_index': 150,
'y_index': -25}
}
"""
assert type(input_dataset_dict) == dict, 'input_dataset_dict must be a dict'
log_multiline(logger.debug, input_dataset_dict, 'input_dataset_dict', '\t')
# Test function to copy ORTHO & NBAR band datasets with pixel quality mask applied
# to an output directory for stacking
output_dataset_dict = {}
for input_level in ['NBAR', 'ORTHO']:
input_dataset_info = input_dataset_dict[input_level]
input_path = input_dataset_info['tile_pathname']
# Generate sorted list of band info for this tile type, satellite and sensor
band_dict = self.bands[tile_type_info['tile_type_id']][(input_dataset_info['satellite_tag'], input_dataset_info['sensor_name'])]
band_info_list = [band_dict[tile_layer] for tile_layer in sorted(band_dict.keys()) if band_dict[tile_layer]['level_name'] == input_level]
# Get a boolean mask from the PQA dataset (use default parameters for mask and dilation)
pqa_mask = self.get_pqa_mask(input_dataset_dict['PQA']['tile_pathname'])
input_dataset = gdal.Open(input_path)
assert input_dataset, 'Unable to open dataset %s' % input_dataset
no_data_value = input_dataset_info['nodata_value']
# Create single-band output dataset for each band
for band_index in range(input_dataset.RasterCount):
# Define the output stack name (used as dict key)
output_stack_path = os.path.join(self.output_dir, '%s_%s_pqa_masked.vrt' % (input_level,
band_info_list[band_index]['band_tag']
)
)
output_tile_path = os.path.join(self.output_dir, re.sub('\.\w+$',
'_%s%s' % (band_info_list[band_index]['band_tag'],
tile_type_info['file_extension']),
os.path.basename(input_path)
)
)
# Copy metadata for eventual inclusion in stack file output
# This could also be written to the output tile if required
output_dataset_info = dict(input_dataset_info)
output_dataset_info['tile_pathname'] = output_tile_path # This is the most important modification - used to find
output_dataset_info['band_name'] = '%s with PQA mask applied' % band_info_list[band_index]['band_name']
output_dataset_info['band_tag'] = '%s-PQA' % band_info_list[band_index]['band_tag']
output_dataset_info['tile_layer'] = 1
# Check for existing, valid file
if self.refresh or not os.path.exists(output_tile_path) or not gdal.Open(output_tile_path):
if self.lock_object(output_tile_path):
input_band = input_dataset.GetRasterBand(band_index + 1)
gdal_driver = gdal.GetDriverByName(tile_type_info['file_format'])
output_dataset = gdal_driver.Create(output_tile_path,
input_dataset.RasterXSize, input_dataset.RasterYSize,
1, input_band.DataType,
tile_type_info['format_options'].split(','))
assert output_dataset, 'Unable to open output dataset %s'% output_dataset
output_dataset.SetGeoTransform(input_dataset.GetGeoTransform())
output_dataset.SetProjection(input_dataset.GetProjection())
output_band = output_dataset.GetRasterBand(1)
data_array = input_band.ReadAsArray()
self.apply_pqa_mask(data_array, pqa_mask, no_data_value)
output_band.WriteArray(data_array)
output_band.SetNoDataValue(no_data_value)
output_band.FlushCache()
# This is not strictly necessary - copy metadata to output dataset
output_dataset_metadata = input_dataset.GetMetadata()
output_dataset_metadata.update(input_band.GetMetadata())
if output_dataset_metadata:
output_dataset.SetMetadata(output_dataset_metadata)
log_multiline(logger.debug, output_dataset_metadata, 'output_dataset_metadata', '\t')
output_dataset.FlushCache()
self.unlock_object(output_tile_path)
logger.info('Finished writing dataset %s', output_tile_path)
else:
logger.info('Skipped locked dataset %s', output_tile_path)
sleep(5) #TODO: Find a nicer way of dealing with contention for the same output tile
else:
logger.info('Skipped existing, valid dataset %s', output_tile_path)
output_dataset_dict[output_stack_path] = output_dataset_info
# log_multiline(logger.debug, output_dataset_info, 'output_dataset_info', '\t')
log_multiline(logger.debug, output_dataset_dict, 'output_dataset_dict', '\t')
# Both NBAR & ORTHO datasets processed - return info for both
return output_dataset_dict
if __name__ == '__main__':
def date2datetime(input_date, time_offset=time.min):
if not input_date:
return None
return datetime.combine(input_date, time_offset)
stacker = Stacker()
# Check for required command line parameters
assert stacker.x_index, 'Tile X-index not specified (-x or --x_index)'
assert stacker.y_index, 'Tile Y-index not specified (-y or --y_index)'
assert stacker.output_dir, 'Output directory not specified (-o or --output)'
assert os.path.isdir(stacker.output_dir), 'Invalid output directory specified (-o or --output)'
stacker.output_dir = os.path.abspath(stacker.output_dir)
log_multiline(logger.debug, stacker.__dict__, 'stacker.__dict__', '\t')
# Stacker object already has command line parameters
# Note that disregard_incomplete_data is set to True for command line invokation
stack_info_dict = stacker.stack_tile(x_index=stacker.x_index,
y_index=stacker.y_index,
stack_output_dir=stacker.output_dir,
start_datetime=date2datetime(stacker.start_date, time.min),
end_datetime=date2datetime(stacker.end_date, time.max),
satellite=stacker.satellite,
sensor=stacker.sensor,
path=stacker.path,
row=stacker.row,
tile_type_id=None,
create_band_stacks=True,
disregard_incomplete_data=stacker.complete_only,
levels=stacker.levels
)
log_multiline(logger.debug, stack_info_dict, 'stack_info_dict', '\t')
logger.info('Finished creating %d temporal stack files in %s.', len(stack_info_dict), stacker.output_dir)
| 51.588462
| 223
| 0.550034
|
4a17d5bdcc5e654646c9592f4c5c57955def2685
| 4,944
|
py
|
Python
|
lte/gateway/python/integ_tests/s1aptests/test_attach_active_tau_with_combined_tala_update_reattach.py
|
nitinneet/test23
|
c44df1a3290195cd3fc59d3483ef640ca8aaeb1e
|
[
"BSD-3-Clause"
] | 1
|
2021-08-08T15:49:05.000Z
|
2021-08-08T15:49:05.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_attach_active_tau_with_combined_tala_update_reattach.py
|
nitinneet/test23
|
c44df1a3290195cd3fc59d3483ef640ca8aaeb1e
|
[
"BSD-3-Clause"
] | 151
|
2020-09-03T20:44:13.000Z
|
2022-03-31T20:28:52.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_attach_active_tau_with_combined_tala_update_reattach.py
|
kkahrs/magma
|
73e666627dc28e0c492feab7321bb7d6dd433b09
|
[
"BSD-3-Clause"
] | 2
|
2021-05-27T18:15:16.000Z
|
2021-05-27T18:41:39.000Z
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import time
import gpp_types
import s1ap_types
import s1ap_wrapper
class TestAttachActiveTauWithCombinedTalaUpdateReattach(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_attach_active_tau_with_combined_tala_update_reattach(self):
"""This test case validates reattach after active combined TAU reject:
1. End-to-end attach with attach type COMBINED_EPS_IMSI_ATTACH
2. Send active TAU request (Combined TALA update)
3. Receive TAU reject (Combined TALA update not supported)
4. Retry end-to-end combined EPS IMSI attach to verify if UE context
was released properly after combined TAU reject
"""
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
ue_id = req.ue_id
print(
"************************* Running End to End attach for UE id ",
ue_id,
)
# Now actually complete the attach
self._s1ap_wrapper._s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
eps_type=s1ap_types.TFW_EPS_ATTACH_TYPE_COMB_EPS_IMSI_ATTACH,
)
# Wait for EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# Delay to ensure S1APTester sends attach complete before sending UE
# context release
time.sleep(0.5)
print(
"************************* Sending UE context release request ",
"for UE id ",
ue_id,
)
# Send UE context release request to move UE to idle mode
req = s1ap_types.ueCntxtRelReq_t()
req.ue_Id = ue_id
req.cause.causeVal = gpp_types.CauseRadioNetwork.USER_INACTIVITY.value
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_CNTXT_REL_REQUEST, req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value
)
print(
"************************* Received UE context release indication"
)
print(
"************************* Sending active TAU request (Combined "
"TALA update) for UE id ",
ue_id,
)
# Send active TAU request with combined TALA update as update type
req = s1ap_types.ueTauReq_t()
req.ue_Id = ue_id
req.type = s1ap_types.Eps_Updt_Type.TFW_COMB_TALA_UPDATING.value
req.Actv_flag = True
req.ueMtmsi.pres = False
self._s1ap_wrapper.s1_util.issue_cmd(s1ap_types.tfwCmd.UE_TAU_REQ, req)
# Waiting for TAU Reject Indication -Combined TALA update not supported
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_TAU_REJECT_IND.value
)
print(
"************************* Received Tracking Area Update Reject "
"Indication"
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value
)
print(
"************************* Received UE context release indication"
)
print(
"************************* Running End to End attach to verify if "
"UE context was released properly after combined TAU reject for "
"UE id ",
ue_id,
)
# Now actually complete the attach
self._s1ap_wrapper._s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
eps_type=s1ap_types.TFW_EPS_ATTACH_TYPE_COMB_EPS_IMSI_ATTACH,
)
# Wait for EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
print("************************* Running UE detach for UE id", ue_id)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
ue_id, s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value, True
)
if __name__ == "__main__":
unittest.main()
| 35.314286
| 79
| 0.627023
|
4a17d61d65223659f13f4d3e074dcb5b8f6cbfb5
| 784
|
py
|
Python
|
AppEngine/templates2/main.py
|
topquark28/CSSI-2016
|
42b82462fc58fd3f454e2529e860d5edc0548e7f
|
[
"MIT"
] | null | null | null |
AppEngine/templates2/main.py
|
topquark28/CSSI-2016
|
42b82462fc58fd3f454e2529e860d5edc0548e7f
|
[
"MIT"
] | null | null | null |
AppEngine/templates2/main.py
|
topquark28/CSSI-2016
|
42b82462fc58fd3f454e2529e860d5edc0548e7f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import jinja2
import logging
import os
import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
logging.info("%s.get()" % self.__class__.__name__)
template = jinja_environment.get_template('hello.html')
self.response.write(template.render())
class GoodByeHandler(webapp2.RequestHandler):
def get(self):
logging.info("%s.get()" % self.__class__.__name__)
self.response.write("Bye!")
my_dir = os.path.dirname(__file__)
template_dir = os.path.join(my_dir, 'templates')
jinja_loader=jinja2.FileSystemLoader(template_dir)
jinja_environment = jinja2.Environment(loader=jinja_loader)
routes = [
('/', MainHandler),
('/bye', GoodByeHandler),
]
app = webapp2.WSGIApplication(routes, debug=True)
| 24.5
| 63
| 0.718112
|
4a17d63ac42d8a804a4ccdb862e9cf4f0323cba8
| 7,148
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2015-05-04T12:19:05.000Z
|
2015-05-04T12:19:05.000Z
|
ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2021-01-07T08:55:01.000Z
|
2021-01-07T08:55:01.000Z
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import json
class TestSqoop(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "SQOOP/1.4.4.2.0/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/sqoop_client.py",
classname = "SqoopClient",
command = "configure",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
to = '/usr/share/java/mysql-connector-java.jar',)
self.assertResourceCalled('Directory', '/usr/lib/sqoop/conf',
recursive = True,
owner = 'sqoop',
group = 'hadoop',)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env.sh',
owner = 'sqoop',
group = 'hadoop',
content = InlineTemplate(self.getConfig()['configurations']['sqoop-env']['content'])
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env-template.sh',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-env-template.sh',
owner = 'sqoop',
group = 'hadoop',)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site-template.xml',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site-template.xml',
owner = 'sqoop',
group = 'hadoop',)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site.xml',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site.xml',
owner = 'sqoop',
group = 'hadoop',)
self.assertNoMoreResources()
def test_configure_add_jdbc(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
with open(config_file, "r") as f:
loaded_json = json.load(f)
loaded_json['configurations']['sqoop-env']['jdbc_drivers'] = 'org.postgresql.Driver, oracle.jdbc.driver.OracleDriver'
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/sqoop_client.py",
classname = "SqoopClient",
command = "configure",
config_dict = loaded_json,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
to = '/usr/share/java/mysql-connector-java.jar',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/lib/ojdbc.jar',
content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//oracle-jdbc-driver.jar'),
mode = 0644,
)
self.assertResourceCalled('File', '/usr/lib/sqoop/lib/postgresql-jdbc.jar',
content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//postgres-jdbc-driver.jar'),
mode = 0644,
)
self.assertResourceCalled('Directory', '/usr/lib/sqoop/conf',
owner = 'sqoop',
group = 'hadoop',
recursive = True,
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['sqoop-env']['content']),
owner = 'sqoop',
group = 'hadoop',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env-template.sh',
owner = 'sqoop',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-env-template.sh',
group = 'hadoop',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site-template.xml',
owner = 'sqoop',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site-template.xml',
group = 'hadoop',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site.xml',
owner = 'sqoop',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site.xml',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_pre_rolling_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/sqoop_client.py",
classname = "SqoopClient",
command = "pre_rolling_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalled("Execute", "hdp-select set sqoop-client %s" % version)
self.assertEquals(2, mocks_dict['call'].call_count)
self.assertEquals(
"conf-select create-conf-dir --package sqoop --stack-version 2.3.0.0-1234 --conf-version 0",
mocks_dict['call'].call_args_list[0][0][0])
self.assertEquals(
"conf-select set-conf-dir --package sqoop --stack-version 2.3.0.0-1234 --conf-version 0",
mocks_dict['call'].call_args_list[1][0][0])
| 50.695035
| 130
| 0.551623
|
4a17d6d51c31e953cb34097cb4b9e2873f896fa3
| 4,219
|
py
|
Python
|
domato/canvas/generator.py
|
BOB-Jour/Domino_Fuzzer
|
82afb7b6e9b74235819e6d170bf85bde5d6d4213
|
[
"Apache-2.0"
] | 4
|
2021-12-21T23:52:44.000Z
|
2021-12-23T19:19:21.000Z
|
domato/canvas/generator.py
|
BOB-Jour/Domino_Fuzzer
|
82afb7b6e9b74235819e6d170bf85bde5d6d4213
|
[
"Apache-2.0"
] | null | null | null |
domato/canvas/generator.py
|
BOB-Jour/Domino_Fuzzer
|
82afb7b6e9b74235819e6d170bf85bde5d6d4213
|
[
"Apache-2.0"
] | 1
|
2022-01-05T09:09:39.000Z
|
2022-01-05T09:09:39.000Z
|
# Domato - main generator script
# -------------------------------
#
# Written and maintained by Ivan Fratric <ifratric@google.com>
#
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import random
import sys
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(parent_dir)
from grammar import Grammar
_N_MAIN_LINES = 1000
_N_EVENTHANDLER_LINES = 500
def generate_function_body(jsgrammar, num_lines):
js = ''
js += jsgrammar._generate_code(num_lines)
return js
def GenerateNewSample(template, jsgrammar):
"""Parses grammar rules from string.
Args:
template: A template string.
htmlgrammar: Grammar for generating HTML code.
cssgrammar: Grammar for generating CSS code.
jsgrammar: Grammar for generating JS code.
Returns:
A string containing sample data.
"""
result = template
handlers = False
while '<canvasfuzz>' in result:
numlines = _N_MAIN_LINES
if handlers:
numlines = _N_EVENTHANDLER_LINES
else:
handlers = True
result = result.replace(
'<canvasfuzz>',
generate_function_body(jsgrammar, numlines),
1
)
return result
def generate_samples(grammar_dir, outfiles):
"""Generates a set of samples and writes them to the output files.
Args:
grammar_dir: directory to load grammar files from.
outfiles: A list of output filenames.
"""
f = open(os.path.join(grammar_dir, 'template.html'))
template = f.read()
f.close()
jsgrammar = Grammar()
err = jsgrammar.parse_from_file(os.path.join(grammar_dir, 'canvas.txt'))
if err > 0:
print('There were errors parsing grammar')
return
for outfile in outfiles:
result = GenerateNewSample(template, jsgrammar)
if result is not None:
print('Writing a sample to ' + outfile)
try:
f = open(outfile, 'w')
f.write(result)
f.close()
except IOError:
print('Error writing to output')
def get_option(option_name):
for i in range(len(sys.argv)):
if (sys.argv[i] == option_name) and ((i + 1) < len(sys.argv)):
return sys.argv[i + 1]
elif sys.argv[i].startswith(option_name + '='):
return sys.argv[i][len(option_name) + 1:]
return None
def main():
fuzzer_dir = os.path.dirname(__file__)
multiple_samples = False
for a in sys.argv:
if a.startswith('--output_dir='):
multiple_samples = True
if '--output_dir' in sys.argv:
multiple_samples = True
if multiple_samples:
print('Running on ClusterFuzz')
out_dir = get_option('--output_dir')
nsamples = int(get_option('--no_of_files'))
print('Output directory: ' + out_dir)
print('Number of samples: ' + str(nsamples))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
outfiles = []
for i in range(nsamples):
outfiles.append(os.path.join(out_dir, 'fuzz-' + str(i).zfill(5) + '.html'))
generate_samples(fuzzer_dir, outfiles)
elif len(sys.argv) > 1:
outfile = sys.argv[1]
generate_samples(fuzzer_dir, [outfile])
else:
print('Arguments missing')
print("Usage:")
print("\tpython generator.py <output file>")
print("\tpython generator.py --output_dir <output directory> --no_of_files <number of output files>")
if __name__ == '__main__':
main()
| 28.506757
| 109
| 0.631192
|
4a17d72cb000d6b3526dbde3a9f7f3a350a2095c
| 1,038
|
py
|
Python
|
parse_osts-txt.py
|
glennklockwood/atgtools
|
d601f7acbb194c6e287ffd3f0d1bce66806a1ed4
|
[
"BSD-4-Clause-UC"
] | 8
|
2016-07-18T17:22:18.000Z
|
2021-01-05T18:24:50.000Z
|
parse_osts-txt.py
|
glennklockwood/atgtools
|
d601f7acbb194c6e287ffd3f0d1bce66806a1ed4
|
[
"BSD-4-Clause-UC"
] | null | null | null |
parse_osts-txt.py
|
glennklockwood/atgtools
|
d601f7acbb194c6e287ffd3f0d1bce66806a1ed4
|
[
"BSD-4-Clause-UC"
] | 4
|
2015-12-16T14:18:52.000Z
|
2019-05-08T21:40:47.000Z
|
#!/usr/bin/env python
#
# Parse and report on the osts.txt file dumped by the NERSC pyLMT
# hourly_archive.sh script (which itself just dumps lctl dl -t with a
# prefixed timestamp). See
#
# github.com/NERSC/pylmt/blob/master/share/nersc-deploy/hourly_archive.sh
#
import sys
import re
aggregate = { 'total': 0, 'used': 0, 'avail': 0 }
date = None
with open(sys.argv[1]) as ostfile:
for line in ostfile:
if line.startswith('BEGIN'):
if date is None:
date = line.split()[1]
else:
print date, aggregate['total'], aggregate['used'], aggregate['avail']
date = line.split()[1]
aggregate = { 'total': 0, 'used': 0, 'avail': 0 }
elif line.startswith('snx11168-OST'):
fields = line.split()
aggregate['total'] += long(fields[1])
aggregate['used'] += long(fields[2])
aggregate['avail'] += long(fields[3])
print date, aggregate['total'], aggregate['used'], aggregate['avail']
| 31.454545
| 85
| 0.579961
|
4a17d7c0944549c526e7de24cff88ac90db63e3f
| 158
|
py
|
Python
|
_9_EXERCISE_DATA_TYPES_AND_VARIABLES/_6_Triples_of_Latin_Letters.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
_9_EXERCISE_DATA_TYPES_AND_VARIABLES/_6_Triples_of_Latin_Letters.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
_9_EXERCISE_DATA_TYPES_AND_VARIABLES/_6_Triples_of_Latin_Letters.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
n = int(input())
for i in range(0, n):
for k in range(0, n):
for j in range(0, n):
print(f"{chr(97 + i)}{chr(97 + k)}{chr(97 + j)}")
| 22.571429
| 61
| 0.455696
|
4a17d931c317c63e86b02871c29efac34b4f7855
| 16,786
|
py
|
Python
|
tests/test-lz4-speed.py
|
Joyoe/lz4-a8e8
|
de5393b0d053e1836a068661033fdcf0193bb402
|
[
"MIT"
] | null | null | null |
tests/test-lz4-speed.py
|
Joyoe/lz4-a8e8
|
de5393b0d053e1836a068661033fdcf0193bb402
|
[
"MIT"
] | null | null | null |
tests/test-lz4-speed.py
|
Joyoe/lz4-a8e8
|
de5393b0d053e1836a068661033fdcf0193bb402
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
#
# Copyright (c) 2016-2020, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# Limitations:
# - doesn't support filenames with spaces
# - dir1/lz4 and dir2/lz4 will be merged in a single results file
import argparse
import os
import string
import subprocess
import time
import traceback
import hashlib
script_version = 'v1.7.2 (2016-11-08)'
default_repo_url = 'https://github.com/lz4/lz4.git'
working_dir_name = 'speedTest'
working_path = os.getcwd() + '/' + working_dir_name # /path/to/lz4/tests/speedTest
clone_path = working_path + '/' + 'lz4' # /path/to/lz4/tests/speedTest/lz4
email_header = 'lz4_speedTest'
pid = str(os.getpid())
verbose = False
clang_version = "unknown"
gcc_version = "unknown"
args = None
def hashfile(hasher, fname, blocksize=65536):
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(blocksize), b""):
hasher.update(chunk)
return hasher.hexdigest()
def log(text):
print(time.strftime("%Y/%m/%d %H:%M:%S") + ' - ' + text)
def execute(command, print_command=True, print_output=False, print_error=True, param_shell=True):
if print_command:
log("> " + command)
popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=param_shell, cwd=execute.cwd)
stdout_lines, stderr_lines = popen.communicate(timeout=args.timeout)
stderr_lines = stderr_lines.decode("utf-8")
stdout_lines = stdout_lines.decode("utf-8")
if print_output:
if stdout_lines:
print(stdout_lines)
if stderr_lines:
print(stderr_lines)
if popen.returncode is not None and popen.returncode != 0:
if stderr_lines and not print_output and print_error:
print(stderr_lines)
raise RuntimeError(stdout_lines + stderr_lines)
return (stdout_lines + stderr_lines).splitlines()
execute.cwd = None
def does_command_exist(command):
try:
execute(command, verbose, False, False)
except Exception:
return False
return True
def send_email(emails, topic, text, have_mutt, have_mail):
logFileName = working_path + '/' + 'tmpEmailContent'
with open(logFileName, "w") as myfile:
myfile.writelines(text)
myfile.close()
if have_mutt:
execute('mutt -s "' + topic + '" ' + emails + ' < ' + logFileName, verbose)
elif have_mail:
execute('mail -s "' + topic + '" ' + emails + ' < ' + logFileName, verbose)
else:
log("e-mail cannot be sent (mail or mutt not found)")
def send_email_with_attachments(branch, commit, last_commit, args, text, results_files,
logFileName, have_mutt, have_mail):
with open(logFileName, "w") as myfile:
myfile.writelines(text)
myfile.close()
email_topic = '[%s:%s] Warning for %s:%s last_commit=%s speed<%s ratio<%s' \
% (email_header, pid, branch, commit, last_commit,
args.lowerLimit, args.ratioLimit)
if have_mutt:
execute('mutt -s "' + email_topic + '" ' + args.emails + ' -a ' + results_files
+ ' < ' + logFileName)
elif have_mail:
execute('mail -s "' + email_topic + '" ' + args.emails + ' < ' + logFileName)
else:
log("e-mail cannot be sent (mail or mutt not found)")
def git_get_branches():
execute('git fetch -p', verbose)
branches = execute('git branch -rl', verbose)
output = []
for line in branches:
if ("HEAD" not in line) and ("coverity_scan" not in line) and ("gh-pages" not in line):
output.append(line.strip())
return output
def git_get_changes(branch, commit, last_commit):
fmt = '--format="%h: (%an) %s, %ar"'
if last_commit is None:
commits = execute('git log -n 10 %s %s' % (fmt, commit))
else:
commits = execute('git --no-pager log %s %s..%s' % (fmt, last_commit, commit))
return str('Changes in %s since %s:\n' % (branch, last_commit)) + '\n'.join(commits)
def get_last_results(resultsFileName):
if not os.path.isfile(resultsFileName):
return None, None, None, None
commit = None
csize = []
cspeed = []
dspeed = []
with open(resultsFileName, 'r') as f:
for line in f:
words = line.split()
if len(words) <= 4: # branch + commit + compilerVer + md5
commit = words[1]
csize = []
cspeed = []
dspeed = []
if (len(words) == 8) or (len(words) == 9): # results: "filename" or "XX files"
csize.append(int(words[1]))
cspeed.append(float(words[3]))
dspeed.append(float(words[5]))
return commit, csize, cspeed, dspeed
def benchmark_and_compare(branch, commit, last_commit, args, executableName, md5sum, compilerVersion, resultsFileName,
testFilePath, fileName, last_csize, last_cspeed, last_dspeed):
sleepTime = 30
while os.getloadavg()[0] > args.maxLoadAvg:
log("WARNING: bench loadavg=%.2f is higher than %s, sleeping for %s seconds"
% (os.getloadavg()[0], args.maxLoadAvg, sleepTime))
time.sleep(sleepTime)
start_load = str(os.getloadavg())
result = execute('programs/%s -rqi5b1e%s %s' % (executableName, args.lastCLevel, testFilePath), print_output=True)
end_load = str(os.getloadavg())
linesExpected = args.lastCLevel + 1
if len(result) != linesExpected:
raise RuntimeError("ERROR: number of result lines=%d is different that expected %d\n%s" % (len(result), linesExpected, '\n'.join(result)))
with open(resultsFileName, "a") as myfile:
myfile.write('%s %s %s md5=%s\n' % (branch, commit, compilerVersion, md5sum))
myfile.write('\n'.join(result) + '\n')
myfile.close()
if (last_cspeed == None):
log("WARNING: No data for comparison for branch=%s file=%s " % (branch, fileName))
return ""
commit, csize, cspeed, dspeed = get_last_results(resultsFileName)
text = ""
for i in range(0, min(len(cspeed), len(last_cspeed))):
print("%s:%s -%d cSpeed=%6.2f cLast=%6.2f cDiff=%1.4f dSpeed=%6.2f dLast=%6.2f dDiff=%1.4f ratioDiff=%1.4f %s" % (branch, commit, i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], float(last_csize[i])/csize[i], fileName))
if (cspeed[i]/last_cspeed[i] < args.lowerLimit):
text += "WARNING: %s -%d cSpeed=%.2f cLast=%.2f cDiff=%.4f %s\n" % (executableName, i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], fileName)
if (dspeed[i]/last_dspeed[i] < args.lowerLimit):
text += "WARNING: %s -%d dSpeed=%.2f dLast=%.2f dDiff=%.4f %s\n" % (executableName, i+1, dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], fileName)
if (float(last_csize[i])/csize[i] < args.ratioLimit):
text += "WARNING: %s -%d cSize=%d last_cSize=%d diff=%.4f %s\n" % (executableName, i+1, csize[i], last_csize[i], float(last_csize[i])/csize[i], fileName)
if text:
text = args.message + ("\nmaxLoadAvg=%s load average at start=%s end=%s\n%s last_commit=%s md5=%s\n" % (args.maxLoadAvg, start_load, end_load, compilerVersion, last_commit, md5sum)) + text
return text
def update_config_file(branch, commit):
last_commit = None
commitFileName = working_path + "/commit_" + branch.replace("/", "_") + ".txt"
if os.path.isfile(commitFileName):
with open(commitFileName, 'r') as infile:
last_commit = infile.read()
with open(commitFileName, 'w') as outfile:
outfile.write(commit)
return last_commit
def double_check(branch, commit, args, executableName, md5sum, compilerVersion, resultsFileName, filePath, fileName):
last_commit, csize, cspeed, dspeed = get_last_results(resultsFileName)
if not args.dry_run:
text = benchmark_and_compare(branch, commit, last_commit, args, executableName, md5sum, compilerVersion, resultsFileName, filePath, fileName, csize, cspeed, dspeed)
if text:
log("WARNING: redoing tests for branch %s: commit %s" % (branch, commit))
text = benchmark_and_compare(branch, commit, last_commit, args, executableName, md5sum, compilerVersion, resultsFileName, filePath, fileName, csize, cspeed, dspeed)
return text
def test_commit(branch, commit, last_commit, args, testFilePaths, have_mutt, have_mail):
local_branch = branch.split('/')[1]
version = local_branch.rpartition('-')[2] + '_' + commit
if not args.dry_run:
execute('make -C programs clean lz4 CC=clang MOREFLAGS="-Werror -Wconversion -Wno-sign-conversion -DLZ4_GIT_COMMIT=%s" && ' % version +
'mv programs/lz4 programs/lz4_clang && ' +
'make -C programs clean lz4 lz4c32 MOREFLAGS="-DLZ4_GIT_COMMIT=%s"' % version)
md5_lz4 = hashfile(hashlib.md5(), clone_path + '/programs/lz4')
md5_lz4c32 = hashfile(hashlib.md5(), clone_path + '/programs/lz4c32')
md5_lz4_clang = hashfile(hashlib.md5(), clone_path + '/programs/lz4_clang')
print("md5(lz4)=%s\nmd5(lz4c32)=%s\nmd5(lz4_clang)=%s" % (md5_lz4, md5_lz4c32, md5_lz4_clang))
print("gcc_version=%s clang_version=%s" % (gcc_version, clang_version))
logFileName = working_path + "/log_" + branch.replace("/", "_") + ".txt"
text_to_send = []
results_files = ""
for filePath in testFilePaths:
fileName = filePath.rpartition('/')[2]
resultsFileName = working_path + "/results_" + branch.replace("/", "_") + "_" + fileName.replace(".", "_") + ".txt"
text = double_check(branch, commit, args, 'lz4', md5_lz4, 'gcc_version='+gcc_version, resultsFileName, filePath, fileName)
if text:
text_to_send.append(text)
results_files += resultsFileName + " "
resultsFileName = working_path + "/results32_" + branch.replace("/", "_") + "_" + fileName.replace(".", "_") + ".txt"
text = double_check(branch, commit, args, 'lz4c32', md5_lz4c32, 'gcc_version='+gcc_version, resultsFileName, filePath, fileName)
if text:
text_to_send.append(text)
results_files += resultsFileName + " "
resultsFileName = working_path + "/resultsClang_" + branch.replace("/", "_") + "_" + fileName.replace(".", "_") + ".txt"
text = double_check(branch, commit, args, 'lz4_clang', md5_lz4_clang, 'clang_version='+clang_version, resultsFileName, filePath, fileName)
if text:
text_to_send.append(text)
results_files += resultsFileName + " "
if text_to_send:
send_email_with_attachments(branch, commit, last_commit, args, text_to_send, results_files, logFileName, have_mutt, have_mail)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('testFileNames', help='file or directory names list for speed benchmark')
parser.add_argument('emails', help='list of e-mail addresses to send warnings')
parser.add_argument('--message', '-m', help='attach an additional message to e-mail', default="")
parser.add_argument('--repoURL', help='changes default repository URL', default=default_repo_url)
parser.add_argument('--lowerLimit', '-l', type=float, help='send email if speed is lower than given limit', default=0.98)
parser.add_argument('--ratioLimit', '-r', type=float, help='send email if ratio is lower than given limit', default=0.999)
parser.add_argument('--maxLoadAvg', type=float, help='maximum load average to start testing', default=0.75)
parser.add_argument('--lastCLevel', type=int, help='last compression level for testing', default=5)
parser.add_argument('--sleepTime', '-s', type=int, help='frequency of repository checking in seconds', default=300)
parser.add_argument('--timeout', '-t', type=int, help='timeout for executing shell commands', default=1800)
parser.add_argument('--dry-run', dest='dry_run', action='store_true', help='not build', default=False)
parser.add_argument('--verbose', '-v', action='store_true', help='more verbose logs', default=False)
args = parser.parse_args()
verbose = args.verbose
# check if test files are accessible
testFileNames = args.testFileNames.split()
testFilePaths = []
for fileName in testFileNames:
fileName = os.path.expanduser(fileName)
if os.path.isfile(fileName) or os.path.isdir(fileName):
testFilePaths.append(os.path.abspath(fileName))
else:
log("ERROR: File/directory not found: " + fileName)
exit(1)
# check availability of e-mail senders
have_mutt = does_command_exist("mutt -h")
have_mail = does_command_exist("mail -V")
if not have_mutt and not have_mail:
log("ERROR: e-mail senders 'mail' or 'mutt' not found")
exit(1)
clang_version = execute("clang -v 2>&1 | grep 'clang version' | sed -e 's:.*version \\([0-9.]*\\).*:\\1:' -e 's:\\.\\([0-9][0-9]\\):\\1:g'", verbose)[0];
gcc_version = execute("gcc -dumpversion", verbose)[0];
if verbose:
print("PARAMETERS:\nrepoURL=%s" % args.repoURL)
print("working_path=%s" % working_path)
print("clone_path=%s" % clone_path)
print("testFilePath(%s)=%s" % (len(testFilePaths), testFilePaths))
print("message=%s" % args.message)
print("emails=%s" % args.emails)
print("maxLoadAvg=%s" % args.maxLoadAvg)
print("lowerLimit=%s" % args.lowerLimit)
print("ratioLimit=%s" % args.ratioLimit)
print("lastCLevel=%s" % args.lastCLevel)
print("sleepTime=%s" % args.sleepTime)
print("timeout=%s" % args.timeout)
print("dry_run=%s" % args.dry_run)
print("verbose=%s" % args.verbose)
print("have_mutt=%s have_mail=%s" % (have_mutt, have_mail))
# clone lz4 repo if needed
if not os.path.isdir(working_path):
os.mkdir(working_path)
if not os.path.isdir(clone_path):
execute.cwd = working_path
execute('git clone ' + args.repoURL)
if not os.path.isdir(clone_path):
log("ERROR: lz4 clone not found: " + clone_path)
exit(1)
execute.cwd = clone_path
# check if speedTest.pid already exists
pidfile = "./speedTest.pid"
if os.path.isfile(pidfile):
log("ERROR: %s already exists, exiting" % pidfile)
exit(1)
send_email(args.emails, '[%s:%s] test-lz4-speed.py %s has been started' % (email_header, pid, script_version), args.message, have_mutt, have_mail)
with open(pidfile, 'w') as the_file:
the_file.write(pid)
branch = ""
commit = ""
first_time = True
while True:
try:
if first_time:
first_time = False
else:
if verbose:
log("sleep for %s seconds" % args.sleepTime)
time.sleep(args.sleepTime)
loadavg = os.getloadavg()[0]
if (loadavg <= args.maxLoadAvg):
branches = git_get_branches()
for branch in branches:
commit = execute('git show -s --format=%h ' + branch, verbose)[0]
last_commit = update_config_file(branch, commit)
if commit == last_commit:
log("skipping branch %s: head %s already processed" % (branch, commit))
else:
log("build branch %s: head %s is different from prev %s" % (branch, commit, last_commit))
execute('git checkout -- . && git checkout ' + branch)
print(git_get_changes(branch, commit, last_commit))
test_commit(branch, commit, last_commit, args, testFilePaths, have_mutt, have_mail)
else:
log("WARNING: main loadavg=%.2f is higher than %s" % (loadavg, args.maxLoadAvg))
except Exception as e:
stack = traceback.format_exc()
email_topic = '[%s:%s] ERROR in %s:%s' % (email_header, pid, branch, commit)
send_email(args.emails, email_topic, stack, have_mutt, have_mail)
print(stack)
except KeyboardInterrupt:
os.unlink(pidfile)
send_email(args.emails, '[%s:%s] test-lz4-speed.py %s has been stopped' % (email_header, pid, script_version), args.message, have_mutt, have_mail)
exit(0)
| 47.6875
| 294
| 0.628202
|
4a17d9f14ec8ebe165efe9bd41b64b3ed86b0f53
| 348
|
py
|
Python
|
other/dingding/dingtalk/api/rest/OapiAttendanceGroupCreateRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiAttendanceGroupCreateRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiAttendanceGroupCreateRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
'''
Created by auto_sdk on 2020.04.09
'''
from dingtalk.api.base import RestApi
class OapiAttendanceGroupCreateRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.group = None
self.op_userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.attendance.group.create'
| 21.75
| 48
| 0.755747
|
4a17da2d583f064d6a4bb30f66ac2c988f984e0c
| 10,618
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpingressrrosubobjectslist_a21ca5185e1490831d56bb810b32d086.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpingressrrosubobjectslist_a21ca5185e1490831d56bb810b32d086.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpingressrrosubobjectslist_a21ca5185e1490831d56bb810b32d086.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class RsvpIngressRroSubObjectsList(Base):
"""Rsvp Ingress RRO Sub-Objects
The RsvpIngressRroSubObjectsList class encapsulates a list of rsvpIngressRroSubObjectsList resources that are managed by the system.
A list of resources can be retrieved from the server using the RsvpIngressRroSubObjectsList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'rsvpIngressRroSubObjectsList'
_SDM_ATT_MAP = {
'BandwidthProtection': 'bandwidthProtection',
'CType': 'cType',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'GlobalLabel': 'globalLabel',
'Ip': 'ip',
'Label': 'label',
'LocalIp': 'localIp',
'Name': 'name',
'NodeProtection': 'nodeProtection',
'P2mpIdAsIp': 'p2mpIdAsIp',
'P2mpIdAsNum': 'p2mpIdAsNum',
'ProtectionAvailable': 'protectionAvailable',
'ProtectionInUse': 'protectionInUse',
'Type': 'type',
}
def __init__(self, parent):
super(RsvpIngressRroSubObjectsList, self).__init__(parent)
@property
def BandwidthProtection(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Bandwidth Protection
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BandwidthProtection']))
@property
def CType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): C-Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CType']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def GlobalLabel(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Global Label
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GlobalLabel']))
@property
def Ip(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ip']))
@property
def Label(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Label
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Label']))
@property
def LocalIp(self):
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NodeProtection(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Node Protection
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NodeProtection']))
@property
def P2mpIdAsIp(self):
"""
Returns
-------
- list(str): P2MP ID As IP
"""
return self._get_attribute(self._SDM_ATT_MAP['P2mpIdAsIp'])
@property
def P2mpIdAsNum(self):
"""
Returns
-------
- list(str): P2MP ID displayed in Integer format
"""
return self._get_attribute(self._SDM_ATT_MAP['P2mpIdAsNum'])
@property
def ProtectionAvailable(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Protection Available
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ProtectionAvailable']))
@property
def ProtectionInUse(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Protection In Use
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ProtectionInUse']))
@property
def Type(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Type: IP or Label
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Type']))
def update(self, Name=None):
"""Updates rsvpIngressRroSubObjectsList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, LocalIp=None, Name=None, P2mpIdAsIp=None, P2mpIdAsNum=None):
"""Finds and retrieves rsvpIngressRroSubObjectsList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve rsvpIngressRroSubObjectsList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all rsvpIngressRroSubObjectsList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- LocalIp (list(str)): Local IP
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- P2mpIdAsIp (list(str)): P2MP ID As IP
- P2mpIdAsNum (list(str)): P2MP ID displayed in Integer format
Returns
-------
- self: This instance with matching rsvpIngressRroSubObjectsList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of rsvpIngressRroSubObjectsList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the rsvpIngressRroSubObjectsList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, BandwidthProtection=None, CType=None, GlobalLabel=None, Ip=None, Label=None, NodeProtection=None, ProtectionAvailable=None, ProtectionInUse=None, Type=None):
"""Base class infrastructure that gets a list of rsvpIngressRroSubObjectsList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- BandwidthProtection (str): optional regex of bandwidthProtection
- CType (str): optional regex of cType
- GlobalLabel (str): optional regex of globalLabel
- Ip (str): optional regex of ip
- Label (str): optional regex of label
- NodeProtection (str): optional regex of nodeProtection
- ProtectionAvailable (str): optional regex of protectionAvailable
- ProtectionInUse (str): optional regex of protectionInUse
- Type (str): optional regex of type
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 36.740484
| 202
| 0.651535
|
4a17da9c107a8a2af613b8b327c9f11591c0bb7a
| 4,946
|
py
|
Python
|
tests/test_localization.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 1
|
2017-11-26T18:48:29.000Z
|
2017-11-26T18:48:29.000Z
|
tests/test_localization.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 3
|
2018-06-22T23:33:46.000Z
|
2018-06-25T00:14:35.000Z
|
tests/test_localization.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 1
|
2020-08-12T08:27:22.000Z
|
2020-08-12T08:27:22.000Z
|
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import streamlink.utils.l10n as l10n
try:
import iso639
import iso3166
ISO639 = True
except ImportError:
ISO639 = False
try:
import pycountry
PYCOUNTRY = True
except ImportError:
PYCOUNTRY = False
class LocalizationTestsMixin(object):
def test_language_code_us(self):
l = l10n.Localization("en_US")
self.assertEqual("en_US", l.language_code)
def test_language_code_kr(self):
l = l10n.Localization("ko_KR")
self.assertEqual("ko_KR", l.language_code)
def test_bad_language_code(self):
self.assertRaises(LookupError, l10n.Localization, "enUS")
def test_equivalent(self):
l = l10n.Localization("en_US")
self.assertTrue(l.equivalent(language="eng"))
self.assertTrue(l.equivalent(language="en"))
self.assertTrue(l.equivalent(language="en", country="US"))
self.assertTrue(l.equivalent(language="en", country="United States"))
def test_equivalent_remap(self):
l = l10n.Localization("fr_FR")
self.assertTrue(l.equivalent(language="fra"))
self.assertTrue(l.equivalent(language="fre"))
def test_not_equivalent(self):
l = l10n.Localization("es_ES")
self.assertFalse(l.equivalent(language="eng"))
self.assertFalse(l.equivalent(language="en"))
self.assertFalse(l.equivalent(language="en", country="US"))
self.assertFalse(l.equivalent(language="en", country="United States"))
self.assertFalse(l.equivalent(language="en", country="ES"))
self.assertFalse(l.equivalent(language="en", country="Spain"))
@patch("locale.getdefaultlocale")
def test_default(self, getdefaultlocale):
getdefaultlocale.return_value = (None, None)
l = l10n.Localization()
self.assertEqual("en_US", l.language_code)
self.assertTrue(l.equivalent(language="en", country="US"))
@patch("locale.getdefaultlocale")
def test_default_invalid(self, getdefaultlocale):
getdefaultlocale.return_value = ("en_150", None)
l = l10n.Localization()
self.assertEqual("en_US", l.language_code)
self.assertTrue(l.equivalent(language="en", country="US"))
def test_get_country(self):
self.assertEqual("US",
l10n.Localization.get_country("USA").alpha2)
self.assertEqual("GB",
l10n.Localization.get_country("GB").alpha2)
self.assertEqual("United States",
l10n.Localization.get_country("United States").name)
def test_get_country_miss(self):
self.assertRaises(LookupError, l10n.Localization.get_country, "XE")
self.assertRaises(LookupError, l10n.Localization.get_country, "XEX")
self.assertRaises(LookupError, l10n.Localization.get_country, "Nowhere")
def test_get_language(self):
self.assertEqual("eng",
l10n.Localization.get_language("en").alpha3)
self.assertEqual("fre",
l10n.Localization.get_language("fra").bibliographic)
self.assertEqual("fra",
l10n.Localization.get_language("fre").alpha3)
self.assertEqual("gre",
l10n.Localization.get_language("gre").bibliographic)
def test_get_language_miss(self):
self.assertRaises(LookupError, l10n.Localization.get_language, "00")
self.assertRaises(LookupError, l10n.Localization.get_language, "000")
self.assertRaises(LookupError, l10n.Localization.get_language, "0000")
def test_country_compare(self):
a = l10n.Country("AA", "AAA", "001", "Test")
b = l10n.Country("AA", "AAA", "001", "Test")
self.assertEqual(a, b)
def test_language_compare(self):
a = l10n.Language("AA", "AAA", "Test")
b = l10n.Language("AA", None, "Test")
self.assertEqual(a, b)
a = l10n.Language("BB", "BBB", "Test")
b = l10n.Language("AA", None, "Test")
self.assertNotEqual(a, b)
@unittest.skipIf(not ISO639, "iso639+iso3166 modules are required to test iso639+iso3166 Localization")
class TestLocalization(LocalizationTestsMixin, unittest.TestCase):
def setUp(self):
l10n.PYCOUNTRY = False
def test_pycountry(self):
self.assertEqual(False, l10n.PYCOUNTRY)
@unittest.skipIf(not PYCOUNTRY, "pycountry module required to test pycountry Localization")
class TestLocalizationPyCountry(LocalizationTestsMixin, unittest.TestCase):
"""Duplicate of all the Localization tests but using PyCountry instead of the iso* modules"""
def setUp(self):
from pycountry import languages, countries
l10n.countries = countries
l10n.languages = languages
l10n.PYCOUNTRY = True
def test_pycountry(self):
self.assertEqual(True, l10n.PYCOUNTRY)
| 36.10219
| 103
| 0.662353
|
4a17daeccdc59ed3332fbd6785a4c817bf0a62b0
| 8,690
|
py
|
Python
|
sdk/python/pulumi_equinix_metal/get_reserved_ip_block.py
|
pulumi/pulumi-equinix-metal
|
79213497bddc7ae806d3b27c3f349fdff935a19f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-01-08T21:57:33.000Z
|
2021-01-08T21:57:33.000Z
|
sdk/python/pulumi_equinix_metal/get_reserved_ip_block.py
|
pulumi/pulumi-equinix-metal
|
79213497bddc7ae806d3b27c3f349fdff935a19f
|
[
"ECL-2.0",
"Apache-2.0"
] | 33
|
2020-12-23T21:37:39.000Z
|
2022-03-25T19:23:17.000Z
|
sdk/python/pulumi_equinix_metal/get_reserved_ip_block.py
|
pulumi/pulumi-equinix-metal
|
79213497bddc7ae806d3b27c3f349fdff935a19f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-01-08T21:24:44.000Z
|
2021-01-08T21:24:44.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetReservedIpBlockResult',
'AwaitableGetReservedIpBlockResult',
'get_reserved_ip_block',
]
@pulumi.output_type
class GetReservedIpBlockResult:
"""
A collection of values returned by getReservedIpBlock.
"""
def __init__(__self__, address=None, address_family=None, cidr=None, cidr_notation=None, facility=None, gateway=None, global_=None, id=None, ip_address=None, manageable=None, management=None, metro=None, netmask=None, network=None, project_id=None, public=None, quantity=None, type=None):
if address and not isinstance(address, str):
raise TypeError("Expected argument 'address' to be a str")
pulumi.set(__self__, "address", address)
if address_family and not isinstance(address_family, int):
raise TypeError("Expected argument 'address_family' to be a int")
pulumi.set(__self__, "address_family", address_family)
if cidr and not isinstance(cidr, int):
raise TypeError("Expected argument 'cidr' to be a int")
pulumi.set(__self__, "cidr", cidr)
if cidr_notation and not isinstance(cidr_notation, str):
raise TypeError("Expected argument 'cidr_notation' to be a str")
pulumi.set(__self__, "cidr_notation", cidr_notation)
if facility and not isinstance(facility, str):
raise TypeError("Expected argument 'facility' to be a str")
pulumi.set(__self__, "facility", facility)
if gateway and not isinstance(gateway, str):
raise TypeError("Expected argument 'gateway' to be a str")
pulumi.set(__self__, "gateway", gateway)
if global_ and not isinstance(global_, bool):
raise TypeError("Expected argument 'global_' to be a bool")
pulumi.set(__self__, "global_", global_)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if manageable and not isinstance(manageable, bool):
raise TypeError("Expected argument 'manageable' to be a bool")
pulumi.set(__self__, "manageable", manageable)
if management and not isinstance(management, bool):
raise TypeError("Expected argument 'management' to be a bool")
pulumi.set(__self__, "management", management)
if metro and not isinstance(metro, str):
raise TypeError("Expected argument 'metro' to be a str")
pulumi.set(__self__, "metro", metro)
if netmask and not isinstance(netmask, str):
raise TypeError("Expected argument 'netmask' to be a str")
pulumi.set(__self__, "netmask", netmask)
if network and not isinstance(network, str):
raise TypeError("Expected argument 'network' to be a str")
pulumi.set(__self__, "network", network)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if public and not isinstance(public, bool):
raise TypeError("Expected argument 'public' to be a bool")
pulumi.set(__self__, "public", public)
if quantity and not isinstance(quantity, int):
raise TypeError("Expected argument 'quantity' to be a int")
pulumi.set(__self__, "quantity", quantity)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def address(self) -> str:
return pulumi.get(self, "address")
@property
@pulumi.getter(name="addressFamily")
def address_family(self) -> int:
return pulumi.get(self, "address_family")
@property
@pulumi.getter
def cidr(self) -> int:
return pulumi.get(self, "cidr")
@property
@pulumi.getter(name="cidrNotation")
def cidr_notation(self) -> str:
return pulumi.get(self, "cidr_notation")
@property
@pulumi.getter
def facility(self) -> str:
return pulumi.get(self, "facility")
@property
@pulumi.getter
def gateway(self) -> str:
return pulumi.get(self, "gateway")
@property
@pulumi.getter(name="global")
def global_(self) -> bool:
return pulumi.get(self, "global_")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def manageable(self) -> bool:
return pulumi.get(self, "manageable")
@property
@pulumi.getter
def management(self) -> bool:
return pulumi.get(self, "management")
@property
@pulumi.getter
def metro(self) -> str:
return pulumi.get(self, "metro")
@property
@pulumi.getter
def netmask(self) -> str:
return pulumi.get(self, "netmask")
@property
@pulumi.getter
def network(self) -> str:
return pulumi.get(self, "network")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def public(self) -> bool:
return pulumi.get(self, "public")
@property
@pulumi.getter
def quantity(self) -> int:
return pulumi.get(self, "quantity")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetReservedIpBlockResult(GetReservedIpBlockResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReservedIpBlockResult(
address=self.address,
address_family=self.address_family,
cidr=self.cidr,
cidr_notation=self.cidr_notation,
facility=self.facility,
gateway=self.gateway,
global_=self.global_,
id=self.id,
ip_address=self.ip_address,
manageable=self.manageable,
management=self.management,
metro=self.metro,
netmask=self.netmask,
network=self.network,
project_id=self.project_id,
public=self.public,
quantity=self.quantity,
type=self.type)
def get_reserved_ip_block(id: Optional[str] = None,
ip_address: Optional[str] = None,
project_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReservedIpBlockResult:
"""
Use this data source to find IP address blocks in Equinix Metal. You can use IP address or a block ID for lookup.
:param str id: UUID of the IP address block to look up
:param str ip_address: Block containing this IP address will be returned
:param str project_id: UUID of the project where the searched block should be
"""
__args__ = dict()
__args__['id'] = id
__args__['ipAddress'] = ip_address
__args__['projectId'] = project_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('equinix-metal:index/getReservedIpBlock:getReservedIpBlock', __args__, opts=opts, typ=GetReservedIpBlockResult).value
return AwaitableGetReservedIpBlockResult(
address=__ret__.address,
address_family=__ret__.address_family,
cidr=__ret__.cidr,
cidr_notation=__ret__.cidr_notation,
facility=__ret__.facility,
gateway=__ret__.gateway,
global_=__ret__.global_,
id=__ret__.id,
ip_address=__ret__.ip_address,
manageable=__ret__.manageable,
management=__ret__.management,
metro=__ret__.metro,
netmask=__ret__.netmask,
network=__ret__.network,
project_id=__ret__.project_id,
public=__ret__.public,
quantity=__ret__.quantity,
type=__ret__.type)
| 36.822034
| 292
| 0.646375
|
4a17db1446123a84c165af6bdf88e11684b706a0
| 19,718
|
py
|
Python
|
src/oci/cloud_guard/models/problem_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/cloud_guard/models/problem_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/cloud_guard/models/problem_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ProblemSummary(object):
"""
Summary of the Problem.
"""
#: A constant which can be used with the risk_level property of a ProblemSummary.
#: This constant has a value of "CRITICAL"
RISK_LEVEL_CRITICAL = "CRITICAL"
#: A constant which can be used with the risk_level property of a ProblemSummary.
#: This constant has a value of "HIGH"
RISK_LEVEL_HIGH = "HIGH"
#: A constant which can be used with the risk_level property of a ProblemSummary.
#: This constant has a value of "MEDIUM"
RISK_LEVEL_MEDIUM = "MEDIUM"
#: A constant which can be used with the risk_level property of a ProblemSummary.
#: This constant has a value of "LOW"
RISK_LEVEL_LOW = "LOW"
#: A constant which can be used with the risk_level property of a ProblemSummary.
#: This constant has a value of "MINOR"
RISK_LEVEL_MINOR = "MINOR"
#: A constant which can be used with the lifecycle_state property of a ProblemSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a ProblemSummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_detail property of a ProblemSummary.
#: This constant has a value of "OPEN"
LIFECYCLE_DETAIL_OPEN = "OPEN"
#: A constant which can be used with the lifecycle_detail property of a ProblemSummary.
#: This constant has a value of "RESOLVED"
LIFECYCLE_DETAIL_RESOLVED = "RESOLVED"
#: A constant which can be used with the lifecycle_detail property of a ProblemSummary.
#: This constant has a value of "DISMISSED"
LIFECYCLE_DETAIL_DISMISSED = "DISMISSED"
#: A constant which can be used with the detector_id property of a ProblemSummary.
#: This constant has a value of "IAAS_ACTIVITY_DETECTOR"
DETECTOR_ID_IAAS_ACTIVITY_DETECTOR = "IAAS_ACTIVITY_DETECTOR"
#: A constant which can be used with the detector_id property of a ProblemSummary.
#: This constant has a value of "IAAS_CONFIGURATION_DETECTOR"
DETECTOR_ID_IAAS_CONFIGURATION_DETECTOR = "IAAS_CONFIGURATION_DETECTOR"
def __init__(self, **kwargs):
"""
Initializes a new ProblemSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this ProblemSummary.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this ProblemSummary.
:type compartment_id: str
:param detector_rule_id:
The value to assign to the detector_rule_id property of this ProblemSummary.
:type detector_rule_id: str
:param risk_level:
The value to assign to the risk_level property of this ProblemSummary.
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type risk_level: str
:param resource_id:
The value to assign to the resource_id property of this ProblemSummary.
:type resource_id: str
:param resource_name:
The value to assign to the resource_name property of this ProblemSummary.
:type resource_name: str
:param resource_type:
The value to assign to the resource_type property of this ProblemSummary.
:type resource_type: str
:param labels:
The value to assign to the labels property of this ProblemSummary.
:type labels: list[str]
:param time_first_detected:
The value to assign to the time_first_detected property of this ProblemSummary.
:type time_first_detected: datetime
:param time_last_detected:
The value to assign to the time_last_detected property of this ProblemSummary.
:type time_last_detected: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ProblemSummary.
Allowed values for this property are: "ACTIVE", "INACTIVE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_detail:
The value to assign to the lifecycle_detail property of this ProblemSummary.
Allowed values for this property are: "OPEN", "RESOLVED", "DISMISSED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_detail: str
:param detector_id:
The value to assign to the detector_id property of this ProblemSummary.
Allowed values for this property are: "IAAS_ACTIVITY_DETECTOR", "IAAS_CONFIGURATION_DETECTOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type detector_id: str
:param region:
The value to assign to the region property of this ProblemSummary.
:type region: str
:param regions:
The value to assign to the regions property of this ProblemSummary.
:type regions: list[str]
:param target_id:
The value to assign to the target_id property of this ProblemSummary.
:type target_id: str
"""
self.swagger_types = {
'id': 'str',
'compartment_id': 'str',
'detector_rule_id': 'str',
'risk_level': 'str',
'resource_id': 'str',
'resource_name': 'str',
'resource_type': 'str',
'labels': 'list[str]',
'time_first_detected': 'datetime',
'time_last_detected': 'datetime',
'lifecycle_state': 'str',
'lifecycle_detail': 'str',
'detector_id': 'str',
'region': 'str',
'regions': 'list[str]',
'target_id': 'str'
}
self.attribute_map = {
'id': 'id',
'compartment_id': 'compartmentId',
'detector_rule_id': 'detectorRuleId',
'risk_level': 'riskLevel',
'resource_id': 'resourceId',
'resource_name': 'resourceName',
'resource_type': 'resourceType',
'labels': 'labels',
'time_first_detected': 'timeFirstDetected',
'time_last_detected': 'timeLastDetected',
'lifecycle_state': 'lifecycleState',
'lifecycle_detail': 'lifecycleDetail',
'detector_id': 'detectorId',
'region': 'region',
'regions': 'regions',
'target_id': 'targetId'
}
self._id = None
self._compartment_id = None
self._detector_rule_id = None
self._risk_level = None
self._resource_id = None
self._resource_name = None
self._resource_type = None
self._labels = None
self._time_first_detected = None
self._time_last_detected = None
self._lifecycle_state = None
self._lifecycle_detail = None
self._detector_id = None
self._region = None
self._regions = None
self._target_id = None
@property
def id(self):
"""
**[Required]** Gets the id of this ProblemSummary.
Unique identifier that is immutable on creation
:return: The id of this ProblemSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ProblemSummary.
Unique identifier that is immutable on creation
:param id: The id of this ProblemSummary.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ProblemSummary.
Compartment Identifier where the resource is created
:return: The compartment_id of this ProblemSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ProblemSummary.
Compartment Identifier where the resource is created
:param compartment_id: The compartment_id of this ProblemSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def detector_rule_id(self):
"""
Gets the detector_rule_id of this ProblemSummary.
Identifier of the rule
:return: The detector_rule_id of this ProblemSummary.
:rtype: str
"""
return self._detector_rule_id
@detector_rule_id.setter
def detector_rule_id(self, detector_rule_id):
"""
Sets the detector_rule_id of this ProblemSummary.
Identifier of the rule
:param detector_rule_id: The detector_rule_id of this ProblemSummary.
:type: str
"""
self._detector_rule_id = detector_rule_id
@property
def risk_level(self):
"""
Gets the risk_level of this ProblemSummary.
The Risk Level
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The risk_level of this ProblemSummary.
:rtype: str
"""
return self._risk_level
@risk_level.setter
def risk_level(self, risk_level):
"""
Sets the risk_level of this ProblemSummary.
The Risk Level
:param risk_level: The risk_level of this ProblemSummary.
:type: str
"""
allowed_values = ["CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR"]
if not value_allowed_none_or_none_sentinel(risk_level, allowed_values):
risk_level = 'UNKNOWN_ENUM_VALUE'
self._risk_level = risk_level
@property
def resource_id(self):
"""
Gets the resource_id of this ProblemSummary.
Identifier of the Resource
:return: The resource_id of this ProblemSummary.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""
Sets the resource_id of this ProblemSummary.
Identifier of the Resource
:param resource_id: The resource_id of this ProblemSummary.
:type: str
"""
self._resource_id = resource_id
@property
def resource_name(self):
"""
Gets the resource_name of this ProblemSummary.
DisplayName of the Resource
:return: The resource_name of this ProblemSummary.
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""
Sets the resource_name of this ProblemSummary.
DisplayName of the Resource
:param resource_name: The resource_name of this ProblemSummary.
:type: str
"""
self._resource_name = resource_name
@property
def resource_type(self):
"""
Gets the resource_type of this ProblemSummary.
Type of the Resource
:return: The resource_type of this ProblemSummary.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""
Sets the resource_type of this ProblemSummary.
Type of the Resource
:param resource_type: The resource_type of this ProblemSummary.
:type: str
"""
self._resource_type = resource_type
@property
def labels(self):
"""
Gets the labels of this ProblemSummary.
user defined labels on the problem
:return: The labels of this ProblemSummary.
:rtype: list[str]
"""
return self._labels
@labels.setter
def labels(self, labels):
"""
Sets the labels of this ProblemSummary.
user defined labels on the problem
:param labels: The labels of this ProblemSummary.
:type: list[str]
"""
self._labels = labels
@property
def time_first_detected(self):
"""
Gets the time_first_detected of this ProblemSummary.
The date and time the problem was first detected. Format defined by RFC3339.
:return: The time_first_detected of this ProblemSummary.
:rtype: datetime
"""
return self._time_first_detected
@time_first_detected.setter
def time_first_detected(self, time_first_detected):
"""
Sets the time_first_detected of this ProblemSummary.
The date and time the problem was first detected. Format defined by RFC3339.
:param time_first_detected: The time_first_detected of this ProblemSummary.
:type: datetime
"""
self._time_first_detected = time_first_detected
@property
def time_last_detected(self):
"""
Gets the time_last_detected of this ProblemSummary.
The date and time the problem was last detected. Format defined by RFC3339.
:return: The time_last_detected of this ProblemSummary.
:rtype: datetime
"""
return self._time_last_detected
@time_last_detected.setter
def time_last_detected(self, time_last_detected):
"""
Sets the time_last_detected of this ProblemSummary.
The date and time the problem was last detected. Format defined by RFC3339.
:param time_last_detected: The time_last_detected of this ProblemSummary.
:type: datetime
"""
self._time_last_detected = time_last_detected
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this ProblemSummary.
The current state of the Problem.
Allowed values for this property are: "ACTIVE", "INACTIVE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this ProblemSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ProblemSummary.
The current state of the Problem.
:param lifecycle_state: The lifecycle_state of this ProblemSummary.
:type: str
"""
allowed_values = ["ACTIVE", "INACTIVE"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_detail(self):
"""
Gets the lifecycle_detail of this ProblemSummary.
The lifecycleDetail will give more detail on the substate of the lifecycleState.
Allowed values for this property are: "OPEN", "RESOLVED", "DISMISSED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_detail of this ProblemSummary.
:rtype: str
"""
return self._lifecycle_detail
@lifecycle_detail.setter
def lifecycle_detail(self, lifecycle_detail):
"""
Sets the lifecycle_detail of this ProblemSummary.
The lifecycleDetail will give more detail on the substate of the lifecycleState.
:param lifecycle_detail: The lifecycle_detail of this ProblemSummary.
:type: str
"""
allowed_values = ["OPEN", "RESOLVED", "DISMISSED"]
if not value_allowed_none_or_none_sentinel(lifecycle_detail, allowed_values):
lifecycle_detail = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_detail = lifecycle_detail
@property
def detector_id(self):
"""
Gets the detector_id of this ProblemSummary.
Id of detector associated with the Problem.
Allowed values for this property are: "IAAS_ACTIVITY_DETECTOR", "IAAS_CONFIGURATION_DETECTOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The detector_id of this ProblemSummary.
:rtype: str
"""
return self._detector_id
@detector_id.setter
def detector_id(self, detector_id):
"""
Sets the detector_id of this ProblemSummary.
Id of detector associated with the Problem.
:param detector_id: The detector_id of this ProblemSummary.
:type: str
"""
allowed_values = ["IAAS_ACTIVITY_DETECTOR", "IAAS_CONFIGURATION_DETECTOR"]
if not value_allowed_none_or_none_sentinel(detector_id, allowed_values):
detector_id = 'UNKNOWN_ENUM_VALUE'
self._detector_id = detector_id
@property
def region(self):
"""
Gets the region of this ProblemSummary.
DEPRECATED
:return: The region of this ProblemSummary.
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""
Sets the region of this ProblemSummary.
DEPRECATED
:param region: The region of this ProblemSummary.
:type: str
"""
self._region = region
@property
def regions(self):
"""
Gets the regions of this ProblemSummary.
Regions where the problem is found
:return: The regions of this ProblemSummary.
:rtype: list[str]
"""
return self._regions
@regions.setter
def regions(self, regions):
"""
Sets the regions of this ProblemSummary.
Regions where the problem is found
:param regions: The regions of this ProblemSummary.
:type: list[str]
"""
self._regions = regions
@property
def target_id(self):
"""
Gets the target_id of this ProblemSummary.
targetId associated with the problem.
:return: The target_id of this ProblemSummary.
:rtype: str
"""
return self._target_id
@target_id.setter
def target_id(self, target_id):
"""
Sets the target_id of this ProblemSummary.
targetId associated with the problem.
:param target_id: The target_id of this ProblemSummary.
:type: str
"""
self._target_id = target_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.00974
| 245
| 0.644792
|
4a17dbe912a0b27bd90a07d0cebe54e7da0ecaaa
| 9,995
|
py
|
Python
|
softwar/clients/SFWindow.py
|
TASnomad/etna-oss-projects
|
fbd252e5c0f38c4a3c086e61c1e2b22178cb9c67
|
[
"MIT"
] | null | null | null |
softwar/clients/SFWindow.py
|
TASnomad/etna-oss-projects
|
fbd252e5c0f38c4a3c086e61c1e2b22178cb9c67
|
[
"MIT"
] | null | null | null |
softwar/clients/SFWindow.py
|
TASnomad/etna-oss-projects
|
fbd252e5c0f38c4a3c086e61c1e2b22178cb9c67
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import time
import json
from threading import Thread
from Tkinter import *
from softZmq import *
from AI.AI import AI
#classe qui gère l'affichage du jeu et le jeu en lui même
class SFWindow(object) :
def __init__(self):
#principal window
self.window = Tk()
self.window.tk_setPalette(background = "#FFE082", foreground = "#3F51B5", activebackground = "#FFF9C4", activeforeground = "white")
self.window.geometry("800x600")
self.window.title("Softwar")
self.window.resizable(False, False)
datas = '{"notification_type": 0, "data": {"game_status": 1, "players": [{"y": 0, "x": 0, "energy": 48, "looking": 0, "id": "0x01"}, {"y": 0, "x": 4, "energy": 48, "looking": 1, "id": "0x02"}, {"y": 4, "x": 0, "energy": 48, "looking": 2, "id": "0x03"}, {"y": 4, "x": 4, "energy": 48, "looking": 3, "id": "0x04"}], "map_size": 5, "energy_cells": [{"y": 1, "x": 0, "value": 15}, {"y": 2, "x": 4, "value": 15}, {"y": 5, "x": 3, "value": 15}]}}'
self.mapSize = 5
self.items = {}
#window base txt
key = 'main_text'
self.items.update({key : Label(self.window, text="S O F T W A R")})
self.items[key].config(font = ('Arial',"75", "bold"))
self.items[key].place(relx=0.5, rely=0.2, anchor=CENTER)
self.displayGame(json.loads(datas))
#self.displayFormConnection()
self.window.mainloop()
#Affichage du formulaire de connection au serveur
def displayFormConnection(self) :
#champs du formulaire :
#-- IP
key = 'ip_input_label'
self.items.update({key : Label(self.window, text="Server IP :")})
self.items[key].config(font = ('Arial',"12"))
self.items[key].place(relx=0.3, rely=0.35, anchor=CENTER)
key = 'ip_input_field'
self.items.update({key : Entry(self.window)})
self.items[key].place(relx=0.5, rely=0.35, anchor=CENTER)
#-- PORT REP
key = 'port_rep_input_label'
self.items.update({key : Label(self.window, text="Server's REP Port :")})
self.items[key].config(font = ('Arial',"12"))
self.items[key].place(relx=0.3, rely=0.4, anchor=CENTER)
key = 'port_rep_input_field'
self.items.update({ key : Entry(self.window)})
self.items[key].place(relx=0.5, rely=0.4, anchor=CENTER)
#-- PORT PUB
key = 'port_pub_input_label'
self.items.update({key : Label(self.window, text="Server's PUB Port :")})
self.items[key].config(font = ('Arial',"12"))
self.items[key].place(relx=0.3, rely=0.45, anchor=CENTER)
key = 'port_pub_input_field'
self.items.update({ key : Entry(self.window)})
self.items[key].place(relx=0.5, rely=0.45, anchor=CENTER)
#-- BUTTON
key = 'form_connection_button'
self.items.update({ key : Button(self.window, text = "Submit", bg = "#3F51B5", fg = "white", activebackground = "#B39DDB", command = self.launchConnection)})
self.items[key].place(relx=0.5, rely=0.5, anchor=CENTER)
self.window.update()
#Connection au server : si c'est Bon on se connecte sinon on affiche l'erreur et on relance le processus
def launchConnection(self):
#recupération du formulaire
self.ip = self.items['ip_input_field'].get()
self.rep_port = self.items['port_rep_input_field'].get()
self.pub_port = self.items['port_pub_input_field'].get()
#suppresion des champs du formulaire
keys_to_delete = ['ip_input_field', 'port_rep_input_field', 'port_pub_input_field', 'form_connection_button']
for key in keys_to_delete :
self.removeOneItem(key)
#affichage des valeurs entrées dans le formulaire
key = 'ip_display'
self.items.update({key : Label(self.window, text = str(self.ip))})
self.items[key].config(font = ('Arial',"12", "bold"))
self.items[key].place(relx=0.5, rely=0.35, anchor=CENTER)
key = 'port_rep_display'
self.items.update({key : Label(self.window, text = str(self.rep_port))})
self.items[key].config(font = ('Arial',"12", "bold"))
self.items[key].place(relx=0.5, rely=0.4, anchor=CENTER)
key = 'port_pub_display'
self.items.update({key : Label(self.window, text = str(self.pub_port))})
self.items[key].config(font = ('Arial',"12", "bold"))
self.items[key].place(relx=0.5, rely=0.45, anchor=CENTER)
#label du statut de la connexion
key = 'connection_status'
self.items.update({key : Label(self.window, text="Trying to connect to " + str(self.ip), fg = "#E91E63")})
self.items[key].config(font = ('Arial',"12"))
self.items[key].place(relx=0.5, rely=0.8, anchor=CENTER)
self.window.update()
if self.connectToServer() == False :
time.sleep(3)
#reset de la fenetre du formulaire
keys_to_delete = ['connection_status', 'connection_status_final', 'ip_input_label', 'port_rep_input_label', 'port_pub_input_label', 'ip_display','port_rep_display', 'port_pub_display']
for key in keys_to_delete :
self.removeOneItem(key)
self.displayFormConnection()
else :
self.playGame()
#Contrôle de zmq
def connectToServer(self) :
#instanciation de softZmq
self.zmq = softZmq(self.ip, self.rep_port, self.pub_port)
#affichage du statut de la connection
key = 'connection_status_final'
text_status = "Connection status : " + self.zmq.status
self.items.update({key : Label(self.window, text = text_status)})
self.items[key].config(font = ('Arial',"12"))
self.items[key].place(relx=0.5, rely=0.9, anchor=CENTER)
self.window.update()
#Gestion du résulat de la connection
if self.zmq.status == "connected" :
return True
else :
if self.zmq.repSock != False :
self.zmq.repSock.close()
if self.zmq.pubSock != False :
self.zmq.pubSock.close()
if self.zmq.context != False :
self.zmq.context.destroy()
return False
#Ajout d'un élément à la fenêtre
def AddItemToWindow(self, key, item, x, y) :
self.items.update({key : item})
self.items[key].place(relx = x, rely = y, anchor = CENTER)
self.window.update()
#cette fonction vide la liste items qui contient tous les éléments à afficher
def removeAllItems(self) :
for (key, item) in self.items.items() :
self.items[key].place_forget()
del self.items[key]
def removeOneItem(self, key) :
self.items[key].place_forget()
del self.items[key]
def receiveDatas(self) :
message = self.zmq.pubSock.recv_multipart()
notification = json.loads(message.pop())
actions = self.ai.chooseActions(notification)
print(actions)
#self.mapSize = state[1]['map_size']
def getPtsForPlayer(self, player) :
plY = player['y']
plX = player['x']
Tx = self.canvasWidth / self.mapSize
Ty = self.canvasHeight / self.mapSize
#disposition de tous les points
# 1 # 2 # 3
# 8 # 4
# 7 # 6 # 5
#1
p1X = Tx * 0.1 + plX * Tx
p1Y = Ty * 0.1 + plY * Ty
#3
p3X = Tx * 0.9 + plX * Tx
p3Y = Ty * 0.1 + plY * Ty
#5
p5X = Tx * 0.9 + plX * Tx
p5Y = Ty * 0.9 + plY * Ty
#7
p7X = Tx * 0.1 + plX * Tx
p7Y = Ty * 0.9 + plY * Ty
#left
if player['looking'] == 0 :
#8
p8X = Tx * 0.1 + plX * Tx
p8Y = Ty * 0.5 + plY * Ty
return [(p3X, p3Y), (p5X, p5Y), (p8X, p8Y)]
#top
elif player['looking'] == 1 :
#2
p2X = Tx * 0.5 + plX * Tx
p2Y = Ty * 0.1 + plY * Ty
return [(p2X, p2Y), (p5X, p5Y), (p7X, p7Y)]
#right
elif player['looking'] == 2 :
#4
p4X = Tx * 0.9 + plX * Tx
p4Y = Ty * 0.5 + plY * Ty
return [(p1X, p1Y), (p4X, p4Y), (p7X, p7Y)]
#bottom
elif player['looking'] == 3 :
#6
p6X = Tx * 0.5 + plX * Tx
p6Y = Ty * 0.9 + plY * Ty
return [(p1X, p1Y), (p3X, p3Y), (p6X, p6Y)]
def displayGame(self, datas) :
self.removeAllItems()
key = 'canvas'
self.window.tk_setPalette(background = "#29B6F6", foreground = "white")
self.canvasWidth = 0.8 * 800
self.canvasHeight = 600
self.items.update({key : Canvas(self.window, width = self.canvasWidth, height = self.canvasHeight, background = "#039BE5")})
self.items[key].pack(side = "left")
i = 0
Tx = self.canvasWidth / self.mapSize
Ty = self.canvasHeight / self.mapSize
playerColors = ['#FF5722', '#FFEB3B', '#9C27B0', '#8BC34A']
#look memo : left = 0, top = 1, right = 2, bottom = 3
for player in datas['data']['players'] :
pts = self.getPtsForPlayer(player)
self.items[key].create_polygon(pts, fill=playerColors[i])
i += 1
for energyCell in datas['data']['energy_cells'] :
clX = energyCell['x']
clY = energyCell['y']
p1X = Tx * 0.1 + clX * Tx
p1Y = Ty * 0.1 + clY * Ty
p2X = Tx * 0.9 + clX * Tx
p2Y = Ty * 0.9 + clY * Ty
pts = [(p1X, p1Y), (p2X, p2Y)]
self.items[key].create_oval(pts, fill="#CDDC39", outline="")
self.window.update()
def playGame(self) :
self.ai = AI(self.zmq.id)
self.pubThread = Thread(target = self.receiveDatas)
self.pubThread.start()
self.removeAllItems()
| 36.213768
| 449
| 0.555778
|
4a17dbfd28d58160a4726f3884a050ae6e16bebb
| 3,149
|
py
|
Python
|
tests/render/test_render_BulletListContentBlock.py
|
dalisaydavid/great_expectations
|
dbb465a524de4a18859acb7bb885b69a76d278e6
|
[
"Apache-2.0"
] | null | null | null |
tests/render/test_render_BulletListContentBlock.py
|
dalisaydavid/great_expectations
|
dbb465a524de4a18859acb7bb885b69a76d278e6
|
[
"Apache-2.0"
] | null | null | null |
tests/render/test_render_BulletListContentBlock.py
|
dalisaydavid/great_expectations
|
dbb465a524de4a18859acb7bb885b69a76d278e6
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import glob
import json
from string import Template as pTemplate
from great_expectations.render.renderer.content_block import (
ExpectationSuiteBulletListContentBlockRenderer,
)
from great_expectations.render.renderer.content_block.expectation_string import (
substitute_none_for_missing,
)
from six import PY2
def test_substitute_none_for_missing():
assert substitute_none_for_missing(
kwargs={"a": 1, "b": 2},
kwarg_list=["c", "d"]
) == {"a": 1, "b": 2, "c": None, "d": None}
my_kwargs = {"a": 1, "b": 2}
assert substitute_none_for_missing(
kwargs=my_kwargs,
kwarg_list=["c", "d"]
) == {"a": 1, "b": 2, "c": None, "d": None}
assert my_kwargs == {"a": 1, "b": 2}, \
"substitute_none_for_missing should not change input kwargs in place."
@pytest.mark.smoketest
def test_all_expectations_using_test_definitions():
test_files = glob.glob(
"tests/test_definitions/*/expect*.json"
)
all_true = True
failure_count, total_count = 0, 0
types = []
# Loop over all test_files, datasets, and tests:
test_results = {}
for filename in test_files:
test_definitions = json.load(open(filename))
types.append(test_definitions["expectation_type"])
test_results[test_definitions["expectation_type"]] = []
for dataset in test_definitions["datasets"]:
for test in dataset["tests"]:
# Construct an expectation from the test.
if type(test["in"]) == dict:
fake_expectation = {
"expectation_type": test_definitions["expectation_type"],
"kwargs": test["in"],
}
else:
# This would be a good place to put a kwarg-to-arg converter
continue
# Attempt to render it
render_result = ExpectationSuiteBulletListContentBlockRenderer.render(
[fake_expectation])
assert isinstance(render_result, dict)
assert "content_block_type" in render_result
assert render_result["content_block_type"] in render_result
assert isinstance(render_result[render_result["content_block_type"]], list )
# TODO: Assert that the template is renderable, with all the right arguments, etc.
# rendered_template = pTemplate(el["template"]).substitute(el["params"])
test_results[test_definitions["expectation_type"]].append({
test["title"]: render_result,
# "rendered_template":rendered_template
})
# TODO: accommodate case where multiple datasets exist within one expectation test definition
# We encountered unicode coding errors on Python 2, but since this is just a smoke test, review the smoke test results in python 3.
if PY2:
return
with open('./tests/render/output/test_render_bullet_list_content_block.json', 'w') as f:
json.dump(test_results, f, indent=2)
| 35.784091
| 135
| 0.618609
|
4a17dc947a2d3ee2f74382b903d154545a4ff1a8
| 569
|
py
|
Python
|
src/utest/driver/d_requests.py
|
Sirius1942/BearSki
|
bdc75d6f06946896e2128f1c095b9baf9863b124
|
[
"MIT"
] | 13
|
2019-12-10T09:07:45.000Z
|
2021-09-08T01:24:22.000Z
|
src/utest/driver/d_requests.py
|
Sirius1942/BearSki
|
bdc75d6f06946896e2128f1c095b9baf9863b124
|
[
"MIT"
] | 1
|
2020-05-06T01:43:50.000Z
|
2020-05-06T01:44:46.000Z
|
src/utest/driver/d_requests.py
|
Sirius1942/BearSki
|
bdc75d6f06946896e2128f1c095b9baf9863b124
|
[
"MIT"
] | 6
|
2020-01-07T07:07:42.000Z
|
2021-06-04T03:38:19.000Z
|
import requests
def get(url, params=None, **kwargs):
return requests.get(url,params,**kwargs)
def post(url, data=None, json=None, **kwargs):
return requests.post(url, data, json, **kwargs)
def delete(url, **kwargs):
return requests.delete(url, **kwargs)
def put(url, data=None, **kwargs):
return requests.put(url, data=None, **kwargs)
def patch(url, data=None, **kwargs):
return requests.patch(url, data=None, **kwargs)
def head(url, **kwargs):
return requests.head(url, **kwargs)
def options(url, **kwargs):
return requests.options(url,**kwargs)
| 23.708333
| 49
| 0.6942
|
4a17dd0bf8c7f15b511ed5d6c1a67cd62a987a3c
| 3,955
|
py
|
Python
|
socli/auth.py
|
l1n3n01z/socli
|
c6c33708ccd8aba999476326600a70438d7ec9ac
|
[
"BSD-3-Clause"
] | null | null | null |
socli/auth.py
|
l1n3n01z/socli
|
c6c33708ccd8aba999476326600a70438d7ec9ac
|
[
"BSD-3-Clause"
] | 1
|
2018-06-14T12:21:07.000Z
|
2018-06-14T12:22:12.000Z
|
socli/auth.py
|
l1n3n01z/socli
|
c6c33708ccd8aba999476326600a70438d7ec9ac
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from functools import wraps
from getpass import getpass
from bs4 import BeautifulSoup
from requests import Session
# Supporting input in Python 2/3
try:
input = raw_input
except NameError:
pass
# Supporting LWPCookieJar in Python 2/3
try:
from http.cookiejar import LWPCookieJar
except ImportError:
from cookielib import LWPCookieJar
COOKIES_FILE_PATH = '.cookies'
BASE_URL = 'https://stackoverflow.com/'
LOGIN_URL = BASE_URL + 'users/login'
LOGOUT_URL = BASE_URL + 'users/logout'
def login_required(func):
"""
:desc: decorator method to check user's login status
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
:desc: Wrapper to check if user is logged in, if the
stored cookies contain cookie named `acct`
and is not expired.
"""
is_login = False
resp = {'success': False, 'message': 'You are not logged in!'}
if os.path.exists(COOKIES_FILE_PATH):
cookiejar = LWPCookieJar(filename=COOKIES_FILE_PATH)
cookiejar.load()
for cookie in cookiejar:
if cookie.name == 'acct':
expiry_time_obj = datetime.utcfromtimestamp(cookie.expires)
if datetime.now() > expiry_time_obj:
is_login = True
if not is_login:
os.remove(COOKIES_FILE_PATH)
else:
return func(*args, **kwargs)
return resp
return wrapper
def get_session():
"""
:desc: Builds session from the saved cookies, if present.
Otherwise, a new session is created.
:return: requests.Session object
"""
session = Session()
if os.path.exists(COOKIES_FILE_PATH):
session.cookies = LWPCookieJar(filename=COOKIES_FILE_PATH)
session.cookies.load(ignore_discard=True, ignore_expires=True)
return session
def login_prompt():
"""
:desc: Prompts the user to enter email and password
:return: (email, password)
"""
email = input('Email: ')
password = getpass()
return (email, password)
def login(email, password):
"""
:desc: Logs a user in.
:param: email - Email of the user - required
password - Password of the user - required
:return: `dict`
"""
if email == '' or password == '':
return {'success': False, 'message': 'Email/Password field left blank.'}
resp = {'success': False}
data = {'email': email, 'password': password}
session = get_session()
session.cookies = LWPCookieJar(filename=COOKIES_FILE_PATH)
resp_obj = session.post(LOGIN_URL, data=data)
if resp_obj.status_code == 200:
if resp_obj.url == BASE_URL:
session.cookies.save(ignore_expires=True, ignore_discard=True)
resp['success'] = True
resp['message'] = 'Successfully Logged In!'
else:
resp['message'] = 'Incorrect credentials'
else:
resp['message'] = 'Stackoverflow is probably down. Please try again.'
return resp
@login_required
def logout():
"""
:desc: Logout a user. Deletes the cookies.
"""
session = get_session()
logout_page_resp = session.get(LOGOUT_URL)
resp = {'success': False}
soup = BeautifulSoup(logout_page_resp.content, 'html.parser')
fkey_input = soup.find('input', attrs={'name': 'fkey'})
if fkey_input:
data = {'fkey': fkey_input['value']}
resp_obj = session.post(LOGOUT_URL, data=data)
if resp_obj.url == BASE_URL:
if os.path.exists(COOKIES_FILE_PATH):
os.remove(COOKIES_FILE_PATH)
resp['success'] = True
resp['message'] = 'Successfully Logged Out!'
else:
resp['message'] = 'There were some problems. Please try again!'
else:
resp['message'] = 'There were some problems. Please try again!'
return resp
| 25.849673
| 80
| 0.613906
|
4a17ddbce54bb96b78f6b6516e4408e86e795fce
| 801
|
py
|
Python
|
swig/x64dbgpy/pluginsdk/_scriptapi/comment.py
|
limbernie/x64dbgpy
|
2e2f4108ddbb42cffb80fb444e3ac56924cf1f7a
|
[
"MIT"
] | 1,279
|
2016-06-28T19:17:37.000Z
|
2022-03-29T02:43:01.000Z
|
swig/x64dbgpy/pluginsdk/_scriptapi/comment.py
|
limbernie/x64dbgpy
|
2e2f4108ddbb42cffb80fb444e3ac56924cf1f7a
|
[
"MIT"
] | 60
|
2016-07-04T18:27:24.000Z
|
2021-09-11T08:12:48.000Z
|
swig/x64dbgpy/pluginsdk/_scriptapi/comment.py
|
limbernie/x64dbgpy
|
2e2f4108ddbb42cffb80fb444e3ac56924cf1f7a
|
[
"MIT"
] | 72
|
2016-07-23T00:39:49.000Z
|
2022-01-19T05:08:55.000Z
|
import ctypes
from .. import x64dbg
MAX_COMMENT_SIZE = 512
def Set(addr, text, manual = False):
return x64dbg.Comment_Set(addr, text, manual)
def SetInfo(info):
return x64dbg.Comment_Set(info)
def Get(addr):
text = ctypes.create_string_buffer(MAX_COMMENT_SIZE)
res = x64dbg.Comment_Get(addr, text)
if res:
return text.value
def GetInfo(addr):
info = x64dbg.CommentInfo()
res = x64dbg.Comment_GetInfo(addr, info)
if res:
return info
def Delete(addr):
return x64dbg.Comment_Delete(addr)
def DeleteRange(start, end):
return x64dbg.Comment_DeleteRange(start, end)
def Clear():
x64dbg.Comment_Clear()
def GetList():
l = x64dbg.ListInfo()
res = x64dbg.Comment_GetList(l)
if res:
return x64dbg.GetCommentInfoList(l)
| 20.025
| 56
| 0.690387
|
4a17ded76fe3d3cda72be7db717b46a013b4fa02
| 7,413
|
py
|
Python
|
dianping_comment.py
|
mwsssxu/DPspider
|
d0563bc22f426fab38d3923d1b23c2948bf5832b
|
[
"MIT"
] | null | null | null |
dianping_comment.py
|
mwsssxu/DPspider
|
d0563bc22f426fab38d3923d1b23c2948bf5832b
|
[
"MIT"
] | null | null | null |
dianping_comment.py
|
mwsssxu/DPspider
|
d0563bc22f426fab38d3923d1b23c2948bf5832b
|
[
"MIT"
] | null | null | null |
import datetime
import random
import time
import re
# from selenium.webdriver.chrome.options import Options
# from selenium import webdriver
from lxml import etree
import requests
class DianpingComment:
font_size = 14
start_y = 23
def __init__(self, shop_id, cookies, delay=7, handle_ban=False):
self.shop_id = shop_id
self._delay = delay
self._cookies = self._format_cookies(cookies)
self._css_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
self._default_headers = {
'Connection': 'keep-alive',
'Host': 'www.dianping.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
self._cur_request_url = 'http://www.dianping.com/shop/{}/review_all/p1'.format(shop_id)
if handle_ban:
print('不想写跳过验证了')
# self._browser = self._init_browser()
# self._handle_ban()
def run(self):
self._css_link = self._get_css_link(self._cur_request_url)
self._font_dict = self._get_font_dict(self._css_link)
self._get_conment_page()
def _delay_func(self):
delay_time = random.randint((self._delay - 2) * 10, (self._delay + 2) * 10) * 0.1
time.sleep(delay_time)
# def _init_browser(self):
# """
# 初始化游览器
# """
# chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
# browser = webdriver.Chrome(chrome_options=chrome_options)
# browser.get(self._cur_request_url)
# for name, value in self._cookies.items():
# browser.add_cookie({'name': name, 'value': value})
# browser.refresh()
# return browser
# def _handle_ban(self):
# """
# 爬取速度过快,出现异常时处理验证
# """
# try:
# self._browser.refresh()
# time.sleep(1)
# button = self._browser.find_element_by_id('yodaBox')
# move_x_offset = self._browser.find_element_by_id('yodaBoxWrapper').size['width']
# webdriver.ActionChains(self._browser).drag_and_drop_by_offset(
# button, move_x_offset, 0).perform()
# except:
# pass
def _format_cookies(self, cookies):
cookies = {cookie.split('=')[0]: cookie.split('=')[1]
for cookie in cookies.replace(' ', '').split(';')}
return cookies
def _get_conment_page(self):
"""
请求评论页,并将<span></span>样式替换成文字
"""
while self._cur_request_url:
self._delay_func()
print('[{now_time}] {msg}'.format(now_time=datetime.datetime.now(), msg=self._cur_request_url))
res = requests.get(self._cur_request_url, headers=self._default_headers, cookies=self._cookies)
html = res.text
class_set = set()
for span in re.findall(r'<span class="([a-zA-Z0-9]{5,6})"></span>', html):
class_set.add(span)
for class_name in class_set:
html = re.sub('<span class="%s"></span>' % class_name, self._font_dict[class_name], html)
doc = etree.HTML(html)
self._parse_comment_page(doc)
try:
self._default_headers['Referer'] = self._cur_request_url
next_page_url = 'http://www.dianping.com' + doc.xpath('.//a[@class="NextPage"]/@href')[0]
except IndexError:
next_page_url = None
self._cur_request_url = next_page_url
def _data_pipeline(self, data):
"""
处理数据
"""
print(data)
def _parse_comment_page(self, doc):
"""
解析评论页并提取数据
"""
for li in doc.xpath('//*[@class="reviews-items"]/ul/li'):
name = li.xpath('.//a[@class="name"]/text()')[0].strip('\n\r \t')
try:
star = li.xpath('.//span[contains(./@class, "sml-str")]/@class')[0]
star = re.search(r'sml-str(\d+)', star)[1]
except IndexError:
star = 0
time = li.xpath('.//span[@class="time"]/text()')[0].strip('\n\r \t')
score = ' '.join(map(lambda s: s.strip('\n\r \t'), li.xpath('.//span[@class="score"]//text()')))
comment = ''.join(li.xpath('.//div[@class="review-words Hide"]/text()')).strip('\n\r \t')
if not comment:
comment = ''.join(li.xpath('.//div[@class="review-words"]/text()')).strip('\n\r \t')
data = {
'name': name,
'comment': comment,
'star': star,
'score': score,
'time': time,
}
self._data_pipeline(data)
def _get_css_link(self, url):
"""
请求评论首页,获取css样式文件
"""
res = requests.get(url, headers=self._default_headers, cookies=self._cookies)
html = res.text
css_link = re.search(r'<link re.*?css.*?href="(.*?svgtextcss.*?)">', html)
assert css_link
css_link = 'http:' + css_link[1]
return css_link
def _get_font_dict(self, url):
"""
获取css样式对应文字的字典
"""
res = requests.get(url, headers=self._css_headers)
html = res.text
background_image_link = re.search(r'background-image:.*?\((.*?svg)\)', html)
assert background_image_link
background_image_link = 'http:' + background_image_link[1]
html = re.sub(r'span.*?\}', '', html)
group_offset_list = re.findall(r'\.([a-zA-Z0-9]{5,6}).*?round:(.*?)px (.*?)px;', html)
font_dict_by_offset = self._get_font_dict_by_offset(background_image_link)
font_dict = {}
for class_name, x_offset, y_offset in group_offset_list:
x_offset = x_offset.replace('.0', '')
y_offset = y_offset.replace('.0', '')
font_dict[class_name] = font_dict_by_offset[int(y_offset)][int(x_offset)]
return font_dict
def _get_font_dict_by_offset(self, url):
"""
获取坐标偏移的文字字典, 会有最少两种形式的svg文件(目前只遇到两种)
"""
res = requests.get(url, headers=self._css_headers)
html = res.text
font_dict = {}
y_list = re.findall(r'd="M0 (\d+?) ', html)
if y_list:
font_list = re.findall(r'<textPath .*?>(.*?)<', html)
for i, string in enumerate(font_list):
y_offset = self.start_y - int(y_list[i])
sub_font_dict = {}
for j, font in enumerate(string):
x_offset = -j * self.font_size
sub_font_dict[x_offset] = font
font_dict[y_offset] = sub_font_dict
else:
font_list = re.findall(r'<text.*?y="(.*?)">(.*?)<', html)
for y, string in font_list:
y_offset = self.start_y - int(y)
sub_font_dict = {}
for j, font in enumerate(string):
x_offset = -j * self.font_size
sub_font_dict[x_offset] = font
font_dict[y_offset] = sub_font_dict
return font_dict
if __name__ == "__main__":
pass
| 35.4689
| 150
| 0.547686
|
4a17df44b1370fdb32d971e0a3a334bf2d9c3e68
| 3,156
|
py
|
Python
|
zksync_sdk/zksync.py
|
zksync-sdk/zksync-python
|
740020b6c6b83548cf6cd2ec1b4af94316a74667
|
[
"MIT"
] | 22
|
2021-03-05T07:01:05.000Z
|
2022-03-26T19:15:19.000Z
|
zksync_sdk/zksync.py
|
zksync-sdk/zksync-python
|
740020b6c6b83548cf6cd2ec1b4af94316a74667
|
[
"MIT"
] | 23
|
2021-03-01T06:09:26.000Z
|
2022-02-17T21:54:44.000Z
|
zksync_sdk/zksync.py
|
zksync-sdk/zksync-python
|
740020b6c6b83548cf6cd2ec1b4af94316a74667
|
[
"MIT"
] | 10
|
2021-03-08T13:43:49.000Z
|
2021-08-23T16:18:14.000Z
|
from eth_account.signers.base import BaseAccount
from web3 import Web3
from zksync_sdk.contract_utils import erc20_abi, zksync_abi
MAX_ERC20_APPROVE_AMOUNT = 115792089237316195423570985008687907853269984665640564039457584007913129639935 # 2^256 - 1
ERC20_APPROVE_THRESHOLD = 57896044618658097711785492504343953926634992332820282019728792003956564819968 # 2^255
class Contract:
def __init__(self, contract_address: str, web3: Web3, account: BaseAccount, abi):
self.contract_address = contract_address
self.web3 = web3
self.contract = self.web3.eth.contract(self.contract_address, abi=abi) # type: ignore[call-overload]
self.account = account
def _call_method(self, method_name, *args, amount=None, **kwargs):
params = {}
if amount is not None:
params['value'] = amount
params['from'] = self.account.address
transaction = getattr(self.contract.functions, method_name)(
*args,
**kwargs
).buildTransaction(params)
transaction.update({'nonce': self.web3.eth.get_transaction_count(self.account.address)})
signed_tx = self.account.sign_transaction(transaction)
txn_hash = self.web3.eth.send_raw_transaction(signed_tx.rawTransaction)
txn_receipt = self.web3.eth.waitForTransactionReceipt(txn_hash)
return txn_receipt
class ZkSync(Contract):
def __init__(self, web3: Web3, zksync_contract_address: str, account: BaseAccount):
super().__init__(zksync_contract_address, web3, account, zksync_abi())
def deposit_eth(self, address: str, amount: int):
return self._call_method("depositETH", address, amount=amount)
def deposit_erc20(self, token_address: str, address: str, amount: int):
return self._call_method("depositERC20", token_address, amount, address)
def full_exit(self, account_id: int, token_address: str, ):
return self._call_method("requestFullExit", account_id, token_address)
def full_exit_nft(self, account_id: int, token_id: int):
return self._call_method("requestFullExitNFT", account_id, token_id)
def set_auth_pub_key_hash(self, pub_key_hash: bytes, nonce: int):
return self._call_method("setAuthPubkeyHash", pub_key_hash, nonce)
def auth_facts(self, sender_address: str, nonce: int) -> bytes:
return self.contract.caller.authFacts(sender_address, nonce)
class ERC20Contract(Contract):
def __init__(self, web3: Web3, zksync_address: str, contract_address: str,
account: BaseAccount):
self.zksync_address = zksync_address
super().__init__(contract_address, web3, account, erc20_abi())
def approve_deposit(self, max_erc20_approve_amount=MAX_ERC20_APPROVE_AMOUNT):
return self._call_method('approve', self.zksync_address, max_erc20_approve_amount)
def is_deposit_approved(self, erc20_approve_threshold=ERC20_APPROVE_THRESHOLD):
allowance = self.contract.functions.allowance(self.account.address,
self.zksync_address).call()
return allowance >= erc20_approve_threshold
| 44.450704
| 118
| 0.720532
|
4a17dfb63da5798cfb931b04555e8fff77a8bf43
| 3,789
|
py
|
Python
|
setup.py
|
DYL521/request_id
|
52059f5a1c2e21c375a585b99a376575b1bd45d5
|
[
"MIT"
] | null | null | null |
setup.py
|
DYL521/request_id
|
52059f5a1c2e21c375a585b99a376575b1bd45d5
|
[
"MIT"
] | null | null | null |
setup.py
|
DYL521/request_id
|
52059f5a1c2e21c375a585b99a376575b1bd45d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'request_tack_id'
DESCRIPTION = 'My short description for my project.'
URL = 'https://github.com/DYL521/request_id'
EMAIL = '1016068291@qq.com'
AUTHOR = 'DYL521'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.0.1'
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, '../README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['request_tack_id'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 28.704545
| 86
| 0.639483
|
4a17e3c79f99926f73d0a4fab0ce410423933587
| 33,399
|
py
|
Python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_task_runs_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_task_runs_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_task_runs_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TaskRunsOperations:
"""TaskRunsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
**kwargs: Any
) -> "_models.TaskRun":
"""Gets the detailed information for a given task run.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_run_name: The name of the task run.
:type task_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TaskRun, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRun
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TaskRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
task_run: "_models.TaskRun",
**kwargs: Any
) -> "_models.TaskRun":
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(task_run, 'TaskRun')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TaskRun', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TaskRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
task_run: "_models.TaskRun",
**kwargs: Any
) -> AsyncLROPoller["_models.TaskRun"]:
"""Creates a task run for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_run_name: The name of the task run.
:type task_run_name: str
:param task_run: The parameters of a run that needs to scheduled.
:type task_run: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRun
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either TaskRun or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRun]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskRun"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
task_run_name=task_run_name,
task_run=task_run,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TaskRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a specified task run resource.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_run_name: The name of the task run.
:type task_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
task_run_name=task_run_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
update_parameters: "_models.TaskRunUpdateParameters",
**kwargs: Any
) -> "_models.TaskRun":
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_parameters, 'TaskRunUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TaskRun', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TaskRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
update_parameters: "_models.TaskRunUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.TaskRun"]:
"""Updates a task run with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_run_name: The name of the task run.
:type task_run_name: str
:param update_parameters: The parameters for updating a task run.
:type update_parameters: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRunUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either TaskRun or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRun]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskRun"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
task_run_name=task_run_name,
update_parameters=update_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TaskRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}'} # type: ignore
async def get_details(
self,
resource_group_name: str,
registry_name: str,
task_run_name: str,
**kwargs: Any
) -> "_models.TaskRun":
"""Gets the detailed information for a given task run that includes all secrets.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_run_name: The name of the task run.
:type task_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TaskRun, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRun
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_details.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'taskRunName': self._serialize.url("task_run_name", task_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TaskRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}/listDetails'} # type: ignore
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.TaskRunListResult"]:
"""Lists all the task runs for a specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TaskRunListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRunListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskRunListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TaskRunListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns'} # type: ignore
| 52.185938
| 219
| 0.671188
|
4a17e411e3b94d2b3e016ce73135688a2ca43dcb
| 8,293
|
py
|
Python
|
docs/conf.py
|
edoburu/django-any-urlfield
|
d742d1f007eca00d18e221a7e169fa191099c080
|
[
"Apache-2.0"
] | 26
|
2015-02-02T20:51:17.000Z
|
2021-07-14T02:57:52.000Z
|
docs/conf.py
|
edoburu/django-any-urlfield
|
d742d1f007eca00d18e221a7e169fa191099c080
|
[
"Apache-2.0"
] | 11
|
2015-01-26T23:43:09.000Z
|
2019-09-17T12:39:19.000Z
|
docs/conf.py
|
edoburu/django-any-urlfield
|
d742d1f007eca00d18e221a7e169fa191099c080
|
[
"Apache-2.0"
] | 9
|
2015-01-26T23:49:14.000Z
|
2020-01-16T17:18:08.000Z
|
#
# django-any-urlfield documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 13 19:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import django
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('_ext'))
sys.path.insert(0, os.path.abspath('..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangodummy.settings'
django.setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-any-urlfield'
copyright = '2012-2018, Diederik van der Boor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.6.1'
# The full version, including alpha/beta/rc tags.
release = '2.6.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-any-urlfielddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-any-urlfield.tex', 'django-any-urlfield Documentation',
'Diederik van der Boor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-any-urlfield', 'django-any-urlfield Documentation',
['Diederik van der Boor'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-any-urlfield', 'django-any-urlfield Documentation',
'Diederik van der Boor', 'django-any-urlfield', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'https://docs.djangoproject.com/en/dev': 'https://docs.djangoproject.com/en/dev/_objects',
}
| 32.143411
| 94
| 0.717111
|
4a17e4139bd3fe8a01e708a3999b1a52865527cb
| 1,536
|
py
|
Python
|
custom-recipes/nlp-visualization-wordcloud/recipe.py
|
dataiku/dss-plugin-nlp-topic-discovery
|
d7731ced95d1a0fa7e117751bc1e3c30f7f1beb5
|
[
"Apache-2.0"
] | 5
|
2020-11-19T17:34:32.000Z
|
2021-06-09T21:09:49.000Z
|
custom-recipes/nlp-visualization-wordcloud/recipe.py
|
dataiku/dss-plugin-nlp-topic-discovery
|
d7731ced95d1a0fa7e117751bc1e3c30f7f1beb5
|
[
"Apache-2.0"
] | 20
|
2020-07-29T08:40:16.000Z
|
2022-03-12T00:56:17.000Z
|
custom-recipes/nlp-visualization-wordcloud/recipe.py
|
dataiku/dss-plugin-nlp-topic-discovery
|
d7731ced95d1a0fa7e117751bc1e3c30f7f1beb5
|
[
"Apache-2.0"
] | 1
|
2021-03-12T10:45:34.000Z
|
2021-03-12T10:45:34.000Z
|
# -*- coding: utf-8 -*-
import os
import logging
from time import perf_counter
from spacy_tokenizer import MultilingualTokenizer
from wordcloud_visualizer import WordcloudVisualizer
from plugin_config_loading import load_config_and_data_wordcloud
# Load config
params, df = load_config_and_data_wordcloud()
output_folder = params.output_folder
output_partition_path = params.output_partition_path
# Load wordcloud visualizer
worcloud_visualizer = WordcloudVisualizer(
tokenizer=MultilingualTokenizer(stopwords_folder_path=params.stopwords_folder_path),
text_column=params.text_column,
font_folder_path=params.font_folder_path,
language=params.language,
language_column=params.language_column,
subchart_column=params.subchart_column,
remove_stopwords=params.remove_stopwords,
remove_punctuation=params.remove_punctuation,
case_insensitive=params.case_insensitive,
max_words=params.max_words,
color_list=params.color_list,
)
# Prepare data and count tokens for each subchart
frequencies = worcloud_visualizer.tokenize_and_count(df)
# Clear output folder's target partition
output_folder.delete_path(output_partition_path)
# Save wordclouds to folder
start = perf_counter()
logging.info("Generating wordclouds...")
for temp, output_file_name in worcloud_visualizer.generate_wordclouds(frequencies):
output_folder.upload_data(os.path.join(output_partition_path, output_file_name), temp.getvalue())
logging.info(f"Generating wordclouds: Done in {perf_counter() - start:.2f} seconds.")
| 34.909091
| 101
| 0.82487
|
4a17e46dc46860094f4cc786744d5978fbd25407
| 4,120
|
py
|
Python
|
Rota_System/Reporting/LinkedBulkReport/Person.py
|
ergoregion/Rota-Program
|
44dab4cb11add184619d88aa0fcab61532d128e6
|
[
"MIT"
] | null | null | null |
Rota_System/Reporting/LinkedBulkReport/Person.py
|
ergoregion/Rota-Program
|
44dab4cb11add184619d88aa0fcab61532d128e6
|
[
"MIT"
] | null | null | null |
Rota_System/Reporting/LinkedBulkReport/Person.py
|
ergoregion/Rota-Program
|
44dab4cb11add184619d88aa0fcab61532d128e6
|
[
"MIT"
] | null | null | null |
__author__ = 'Neil Butcher'
from Rota_System.Reporting.HTMLObjects import HTMLObjects
from datetime import datetime
from Rota_System.StandardTimes import date_string, time_string
from Abstract import AbstractMultiAppointmentReporter, event_title, person_code
class PopulationReporter(object):
def events(self, events):
self._reporter = PersonReporter()
self._reporter.events(events)
def write_reports_about(self, a_list, a_folder):
if not self._reporter.events:
return HTMLObjects.HTMLNone()
self._write_index_file(a_list, a_folder)
for person in sorted(a_list, key=lambda p: person_code(p)):
html = self._reporter.report_about(person)
filename = a_folder + '\\' + person_code(person) + '.html'
fileopen = open(filename, 'w')
fileopen.write(html.html_string())
fileopen.close()
def _write_index_file(self, a_list, a_folder):
table = HTMLObjects.HTMLTable()
for person in a_list:
text = HTMLObjects.HTMLLink(person_code(person), "./" + person_code(person) + ".html")
cell = HTMLObjects.HTMLTableCell(text)
row = HTMLObjects.HTMLTableRow(cell)
table.add(row)
html = HTMLObjects.HTMLAll(HTMLObjects.HTMLHead(HTMLObjects.HTMLPageTitle('People')))
html.add(HTMLObjects.HTMLLink("index", "../index.html"))
html.add(HTMLObjects.HTMLTitle('People'))
html.add(table)
filename = a_folder + '\\' + 'index.html'
fileopen = open(filename, 'w')
fileopen.write(html.html_string())
fileopen.close()
class PersonReporter(AbstractMultiAppointmentReporter):
'''
produces a html from a person and a list of events
'''
def report_about(self, an_object):
if not self._events:
return HTMLObjects.HTMLNone()
self.person(an_object)
return self.html()
def person(self, person):
self._all_appointments = set()
self._person = person
for e in self._events:
correct_appointments = filter(lambda x: x.person == person, e.appointments)
self._all_appointments.update(correct_appointments)
self._sorted_appointments = sorted(self._all_appointments, key=lambda app: datetime.combine(app.date, app.time))
def _html_preheader(self):
return HTMLObjects.HTMLLink("people", "./index.html")
def _html_header(self):
return HTMLObjects.HTMLTitle(person_code(self._person))
def _html_table(self):
table = HTMLObjects.HTMLTable(self._html_table_header_row())
for appointment in self._sorted_appointments:
row = self._html_table_row_for_appointment(appointment)
table.add(row)
return table
def _html_table_row_for_appointment(self, appointment):
html = HTMLObjects.HTMLTableRow()
html.add(HTMLObjects.HTMLTableCell(date_string(appointment.date)))
time = time_string(appointment.time)
event_link = HTMLObjects.HTMLLink(appointment.event.title, "../events/" + event_title(appointment.event) + ".html")
html.add(HTMLObjects.HTMLTableCell(time))
html.add(HTMLObjects.HTMLTableCell(self._role_description(appointment)))
html.add(HTMLObjects.HTMLTableCell(event_link))
return html
def _role_description(self, appointment):
roleDescription = appointment.role.description
if appointment.note is not None and len(appointment.note) > 0 and appointment.note is not 'None':
roleDescription += '('
roleDescription += appointment.note
roleDescription += ')'
return HTMLObjects.HTMLLink(roleDescription, "../roles/" + appointment.role.description + ".html")
def _html_table_header_row(self):
html = HTMLObjects.HTMLTableRow()
html.add(HTMLObjects.HTMLTableHeaderCell('Date'))
html.add(HTMLObjects.HTMLTableHeaderCell('Time'))
html.add(HTMLObjects.HTMLTableHeaderCell('Role'))
html.add(HTMLObjects.HTMLTableHeaderCell('Event'))
return html
| 40.392157
| 123
| 0.674757
|
4a17e497eff4e748707f2acafcee154c863c9a3b
| 213
|
py
|
Python
|
Puzzle.py
|
MattChamberlain/Max_Determinant
|
a9abbdfe190ab79a70c2361058d98f5e45c659ad
|
[
"MIT"
] | null | null | null |
Puzzle.py
|
MattChamberlain/Max_Determinant
|
a9abbdfe190ab79a70c2361058d98f5e45c659ad
|
[
"MIT"
] | null | null | null |
Puzzle.py
|
MattChamberlain/Max_Determinant
|
a9abbdfe190ab79a70c2361058d98f5e45c659ad
|
[
"MIT"
] | null | null | null |
for A in range(1, 10):
for B in range(1, 10):
for C in range(1, 10):
concat = (A * 100) + (B * 10) + C
if concat/5 == (A*B*C):
print(str(A) + str(B) + str(C))
| 23.666667
| 47
| 0.399061
|
4a17e5473e49786cd374fb1b8f0840571e2083dd
| 9,443
|
py
|
Python
|
cpv/pipeline.py
|
zhongmicai/combined_Pvalue
|
0ed3d65a2dc17fa1007bca90533d92d550baf4e3
|
[
"MIT"
] | null | null | null |
cpv/pipeline.py
|
zhongmicai/combined_Pvalue
|
0ed3d65a2dc17fa1007bca90533d92d550baf4e3
|
[
"MIT"
] | null | null | null |
cpv/pipeline.py
|
zhongmicai/combined_Pvalue
|
0ed3d65a2dc17fa1007bca90533d92d550baf4e3
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import sys
import array
import os.path as op
import toolshed as ts
def main():
import argparse
from _common import get_col_num
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument("-c", dest="c", help="column number that has the value to"
"take the acf", default='4')
p.add_argument("--dist", "--distance" "--peak-dist", dest="dist", help="Maximum dist to "
" search for adjacent peaks.", type=int, required=True)
p.add_argument("--acf-dist", help="distance/window-size to use for "
" smoothing. Defaults to 1/3 * peak-dist ", type=int, default=None)
p.add_argument("--step", dest="step", help="step size for bins in the"
" ACF calculation", type=int)
p.add_argument("--seed", dest="seed", help="A value must be at least this"
" large/small in order to seed a region.", type=float,
default=0.05)
p.add_argument("--threshold", dest="threshold", help="After seeding, a value"
" of at least this number can extend a region. ",
type=float)
p.add_argument("--no-fdr", dest="no_fdr", help="Don't use FDR-corrected p-values "
"for finding peaks (either way, we still do multiple-testing correction "
"on the p-values for the regions).", action='store_true',
default=False)
p.add_argument("-p", "--prefix", dest="prefix",
help="prefix for output files", default=None)
p.add_argument("--genomic-control", dest="genomic_control",
help="perform the genomic control correction on the input"
" pvalues", action="store_true", default=False)
p.add_argument("--region-filter-p", help="max adjusted region-level p-value"
" to be reported "
"in final output. this requires the input bed file to have"
" chrom, start, end, 't' columns", type=float, default=1)
p.add_argument("--region-filter-n", help="require at least this many probes"
"for a region to be reported in final output. "
" this requires the input bed file to have chrom, start, "
"end, 't' columns", type=int, default=None)
p.add_argument("--annotate", help="annotate with refGen from this db" \
"in UCSC (e.g. hg19) requires cruzdb", default=None)
p.add_argument('bed_files', nargs='+', help='sorted bed file to process')
args = p.parse_args()
if not (args.prefix):
sys.exit(p.print_help())
if not args.threshold:
args.threshold = args.seed
assert op.exists(args.bed_files[0])
if args.acf_dist is None:
args.acf_dist = int(round(0.33333 * args.dist, -1))
sys.stderr.write("setting --acf-dist to 0.33 * --dist == %i\n" %
args.acf_dist)
col_num = get_col_num(args.c, args.bed_files[0])
return pipeline(col_num, args.step,
args.dist, args.acf_dist, args.prefix,
args.threshold, args.seed,
args.bed_files,
region_filter_p=args.region_filter_p,
region_filter_n=args.region_filter_n,
genome_control=args.genomic_control,
db=args.annotate,
use_fdr=not args.no_fdr)
def pipeline(col_num, step, dist, acf_dist, prefix, threshold, seed,
bed_files, mlog=True, region_filter_p=1, region_filter_n=None,
genome_control=False, db=None, use_fdr=True):
sys.path.insert(0, op.join(op.dirname(__file__), ".."))
from cpv import acf, slk, fdr, peaks, region_p, stepsize, filter
from cpv._common import genome_control_adjust, genomic_control, bediter
import operator
if step is None:
step = min(acf_dist, stepsize.stepsize(bed_files, col_num))
print("calculated stepsize as: %i" % step, file=sys.stderr)
lags = list(range(1, acf_dist, step))
lags.append(lags[-1] + step)
prefix = prefix.rstrip(".")
putative_acf_vals = acf.acf(bed_files, lags, col_num, simple=False,
mlog=mlog)
acf_vals = []
# go out to max requested distance but stop once an autocorrelation
# < 0.05 is added.
for a in putative_acf_vals:
# a is ((lmin, lmax), (corr, N))
# this heuristic seems to work. stop just above the 0.08 correlation
# lag.
if a[1][0] < 0.04 and len(acf_vals) > 2: break
acf_vals.append(a)
if a[1][0] < 0.04 and len(acf_vals): break
# save the arguments that this was called with.
with open(prefix + ".args.txt", "w") as fh:
print(" ".join(sys.argv[1:]) + "\n", file=fh)
import datetime
print("date: %s" % datetime.datetime.today(), file=fh)
from .__init__ import __version__
print("version:", __version__, file=fh)
with open(prefix + ".acf.txt", "w") as fh:
acf_vals = acf.write_acf(acf_vals, fh)
print("wrote: %s" % fh.name, file=fh)
print("ACF:\n", open(prefix + ".acf.txt").read(), file=sys.stderr)
spvals, opvals = array.array('f'), array.array('f')
with ts.nopen(prefix + ".slk.bed.gz", "w") as fhslk:
fhslk.write('#chrom\tstart\tend\tp\tregion-p\n')
for chrom, results in slk.adjust_pvals(bed_files, col_num, acf_vals):
fmt = chrom + "\t%i\t%i\t%.4g\t%.4g\n"
for row in results:
row = tuple(row)
fhslk.write(fmt % row)
opvals.append(row[-2])
spvals.append(row[-1])
print("# original lambda: %.2f" % genomic_control(opvals), file=sys.stderr)
del opvals
gc_lambda = genomic_control(spvals)
print("wrote: %s with lambda: %.2f" % (fhslk.name, gc_lambda),
file=sys.stderr)
if genome_control:
fhslk = ts.nopen(prefix + ".slk.gc.bed.gz", "w")
adj = genome_control_adjust([d['p'] for d in bediter(prefix + ".slk.bed.gz", -1)])
for i, line in enumerate(ts.nopen(prefix + ".slk.bed.gz")):
print("%s\t%.5g" % (line.rstrip("\r\n"), adj[i]), file=fhslk)
fhslk.close()
print("wrote: %s" % fhslk.name, file=sys.stderr)
with ts.nopen(prefix + ".fdr.bed.gz", "w") as fh:
fh.write('#chrom\tstart\tend\tp\tregion-p\tregion-q\n')
for bh, l in fdr.fdr(fhslk.name, -1):
fh.write("%s\t%.4g\n" % (l.rstrip("\r\n"), bh))
print("wrote: %s" % fh.name, file=sys.stderr)
fregions = prefix + ".regions.bed.gz"
with ts.nopen(fregions, "w") as fh:
list(peaks.peaks(prefix + ".fdr.bed.gz", -1 if use_fdr else -2, threshold, seed,
dist, fh, operator.le))
n_regions = sum(1 for _ in ts.nopen(fregions))
print("wrote: %s (%i regions)" % (fregions, n_regions), file=sys.stderr)
if n_regions == 0:
sys.exit()
with ts.nopen(prefix + ".regions-p.bed.gz", "w") as fh:
N = 0
fh.write("#chrom\tstart\tend\tmin_p\tn_probes\tz_p\tz_sidak_p\n")
# use -2 for original, uncorrected p-values in slk.bed
for region_line, slk_p, slk_sidak_p, sim_p in region_p.region_p(
prefix + ".slk.bed.gz",
prefix + ".regions.bed.gz", -2,
step):
fh.write("%s\t%.4g\t%.4g\n" % (region_line, slk_p, slk_sidak_p))
fh.flush()
N += int(slk_sidak_p < 0.05)
print("wrote: %s, (regions with corrected-p < 0.05: %i)" \
% (fh.name, N), file=sys.stderr)
regions_bed = fh.name
#if all(h in header for h in ('t', 'start', 'end')):
if region_filter_n is None: region_filter_n = 0
with ts.nopen(prefix + ".regions-t.bed", "w") as fh:
N = 0
for i, toks in enumerate(filter.filter(bed_files[0],
regions_bed, p_col_name=col_num)):
if i == 0: toks[0] = "#" + toks[0]
else:
if float(toks[6]) > region_filter_p: continue
if int(toks[4]) < region_filter_n: continue
#if region_filter_t and "/" in toks[7]:
# # t-pos/t-neg. if the lower one is > region_filter_t?
# vals = map(int, toks[7].split("/"))
# if min(vals) > region_filter_t: continue
N += 1
print("\t".join(toks), file=sys.stderr)
print(("wrote: %s, (regions with region-p "
"< %.3f and n-probes >= %i: %i)") \
% (fh.name, region_filter_p, region_filter_n, N),
file=sys.stderr)
try:
from cpv import manhattan
regions = manhattan.read_regions(fh.name)
manhattan.manhattan(prefix + ".slk.bed.gz", 3, prefix.rstrip(".") + ".manhattan.png",
False, ['#959899', '#484B4C'], "", False, None,
regions=regions, bonferonni=False)
except ImportError:
pass # they dont have matplotlib
if db is not None:
from cruzdb import Genome
g = Genome(db)
lastf = fh.name
with open(prefix + ".anno.%s.bed" % db, "w") as fh:
fh.write('#')
g.annotate(lastf, ("refGene", "cpgIslandExt"), out=fh,
feature_strand=True, parallel=len(spvals) > 500)
print("wrote: %s annotated with %s" % (fh.name, db), file=sys.stderr)
| 42.922727
| 93
| 0.575559
|
4a17e5b6a2c647ef3962933eb69bbe8547707de1
| 2,777
|
py
|
Python
|
edb/common/verutils.py
|
fantix/edgedb
|
9fcd0a0eaed771a55f958a85fc71efde76fa7ac7
|
[
"Apache-2.0"
] | null | null | null |
edb/common/verutils.py
|
fantix/edgedb
|
9fcd0a0eaed771a55f958a85fc71efde76fa7ac7
|
[
"Apache-2.0"
] | null | null | null |
edb/common/verutils.py
|
fantix/edgedb
|
9fcd0a0eaed771a55f958a85fc71efde76fa7ac7
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2020-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import enum
import re
VERSION_PATTERN = re.compile(r"""
^
(?P<release>[0-9]+(?:\.[0-9]+)*)
(?P<pre>
[-]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<dev>
[\.]?
(?P<dev_l>dev)
[\.]?
(?P<dev_n>[0-9]+)?
)?
(?:\+(?P<local>[a-z0-9]+(?:[\.][a-z0-9]+)*))?
$
""", re.X)
class VersionStage(enum.IntEnum):
DEV = 0
ALPHA = 10
BETA = 20
RC = 30
FINAL = 40
class Version(NamedTuple):
major: int
minor: int
stage: VersionStage
stage_no: int
local: Tuple[str, ...]
def __str__(self):
ver = f'{self.major}.{self.minor}'
if self.stage is not VersionStage.FINAL:
ver += f'-{self.stage.name.lower()}.{self.stage_no}'
if self.local:
ver += f'{("+" + ".".join(self.local)) if self.local else ""}'
return ver
def parse_version(ver: str) -> Version:
v = VERSION_PATTERN.match(ver)
if v is None:
raise ValueError(f'cannot parse version: {ver}')
local = []
if v.group('pre'):
pre_l = v.group('pre_l')
if pre_l in {'a', 'alpha'}:
stage = VersionStage.ALPHA
elif pre_l in {'b', 'beta'}:
stage = VersionStage.BETA
elif pre_l in {'c', 'rc'}:
stage = VersionStage.RC
else:
raise ValueError(f'cannot determine release stage from {ver}')
stage_no = int(v.group('pre_n'))
if v.group('dev'):
local.extend(['dev', v.group('dev_n')])
elif v.group('dev'):
stage = VersionStage.DEV
stage_no = int(v.group('dev_n'))
else:
stage = VersionStage.FINAL
stage_no = 0
if v.group('local'):
local.extend(v.group('local').split('.'))
release = [int(r) for r in v.group('release').split('.')]
return Version(
major=release[0],
minor=release[1],
stage=stage,
stage_no=stage_no,
local=tuple(local),
)
| 24.794643
| 74
| 0.57004
|
4a17e6d6503e062c2123fda9334d66d1ff9fff5f
| 7,948
|
py
|
Python
|
robust_fitting/lmeds.py
|
jswulff/mrflow
|
1bf782d3a75c292945a8a5ed20c6692242e9ef3a
|
[
"RSA-MD"
] | 124
|
2017-06-27T20:07:54.000Z
|
2022-03-15T03:42:55.000Z
|
robust_fitting/lmeds.py
|
jswulff/mrflow
|
1bf782d3a75c292945a8a5ed20c6692242e9ef3a
|
[
"RSA-MD"
] | 8
|
2017-07-17T00:51:54.000Z
|
2020-02-21T08:02:27.000Z
|
robust_fitting/lmeds.py
|
jswulff/mrflow
|
1bf782d3a75c292945a8a5ed20c6692242e9ef3a
|
[
"RSA-MD"
] | 38
|
2017-07-19T21:39:56.000Z
|
2021-06-24T04:11:53.000Z
|
#! /usr/bin/env python2
import numpy as np
import sys # For exception handling
"""
General LMedS implementation.
"""
def niter_LMEDS(p, epsilon, s, Nmax=100000):
"""
How many samples to compute with LMEDS.
Parameters
----------
p : float
Probability that at least 1 of the samples is free of outliers
epsilon : float
Proportion of outliers
s : int
Sample size (model complexity)
Nmax : int, optional
Upper bound on number of iterations. Default = 100000.
"""
if Nmax < 1:
return 100000
if epsilon <= 0:
return 1
N = int(np.ceil(np.log(1-p) / np.log(1-(1-epsilon)**s)))
if N > Nmax:
return Nmax
else:
return N
def solve(A, b, m=-1, seed=None, min_iters=1, robust=False, recompute_model=True, do_print=False):
"""
Solve least-squares problem using LMedS.
Solves
x = argmin_q median ||Aq - b||
Parameters
----------
A : array_like
b : array_like
Parameters of problem
m : int, optional
Model complexity. Default: A.shape[1]
seed : int
Optional seed for the random number generator
min_iters : int
Optional minimum amount of iterations.
robust : bool, optional
If set to True, use the absolute error instead of the squared error. Default = False.
recompute_model : bool, optional
Whether to recompute the model as a weighted least-squares problem. Default = True
Returns
-------
model : array_like
The best computed model
inliers : (N,) array_like
Inlier map.
"""
if m == -1:
s = A.shape[1]
else:
s = m
p = 0.99
epsilon = 0.5
if seed is not None:
np.random.seed(int(max(0,seed)))
niters = niter_LMEDS(p, epsilon, s)
niters = max(niters,min_iters)
if do_print:
print('Running for {} iterations.'.format(niters))
N = A.shape[0]
best_model = np.zeros(s)
best_cost = 1e12
best_residuals = np.zeros(N)
for i in range(niters):
sample_indices = np.random.choice(N, s, replace=False)
A_sample = A[sample_indices,:]
b_sample = b[sample_indices]
try:
model = np.linalg.lstsq(A_sample,b_sample)[0]
except Exception as inst:
print('Exception occured..')
print(inst)
continue
if robust:
cost_sq = np.abs(A.dot(model) - b)
else:
cost_sq = (A.dot(model) - b)**2
med_cost_sq = np.median(cost_sq)
if med_cost_sq < best_cost:
if do_print:
print('Updating.')
print('\t New model: {}'.format(model))
print('\t Median cost: {}'.format(med_cost_sq))
print('\t Best model computed from data points:')
print(np.c_[A_sample, b_sample])
best_model = model.ravel()
best_cost = med_cost_sq
best_residuals = cost_sq.ravel()
if best_cost == 1e12:
return None, None
if recompute_model:
# Compute final least squares estimate, according to
# http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node25.html
#sigma_hat = 1.4826 * (1.0 + 5.0 / (2*N - s + 1)) * np.sqrt(best_cost)
sigma_hat = 1.4826 * (1.0 + 5.0 / (N - s)) * np.sqrt(best_cost)
print('Robust standard deviation: {}'.format(sigma_hat))
#inliers = best_residuals < (2.5 * sigma_hat)**2
inliers = best_residuals <= best_cost
print('\t Number of inliers: {0} ({1:2.2f} %)'.format(inliers.sum(),inliers.sum() * 1.0/len(inliers) * 100.0))
model = np.linalg.lstsq(A[inliers,:],b[inliers])[0]
else:
inliers = np.ones_like(best_residuals)
model = best_model
return model, inliers
def estimate(X, m, estimate_model, estimate_residuals, recompute_model=True, recompute_init=False, seed=None, min_iters=1, do_print=False):
"""
General estimation function, using LMEDS.
Parameters
----------
X : (N, D) array_like
Array of datapoints. Each row corresponds to a datapoint, each
column to a dimension.
m : int
Number of datapoints to be contained in a single sample.
estimate_model : function
Function to fit the model to a selection of datapoints. Should take
a single array_like object as parameter, so it can be called as
model = estimate_model(X[selection,:])
The function estimate_model can also return None, in case no valid model
can be fitted to the sample.
estimate_residuals : function
Function to compute residuals. Should take an array_like object as
parameter, as well as the model, and return an array of squared residuals.
residuals = estimate_residuals(X, model)
recompute_model : bool, optional
Recompute the model using all inliers, computed by using the robust
estimation for the standard deviation. In order to do so, the function
estimate_model has to be able to handle more than m datapoints.
Default: True
recompute_init : bool, optional
If set and recompute_model is True, the best estimate is supplied to
estimate_model() via the ``init'' kwarg. That is, in the model
recomputation step,
model_final = estimate_model(X[inliers,:], init=best_model)
For this to work, estimate_model has to take an optional init kwarg.
Default: False.
seed : int, optional
Initialize random number generator to a given state.
Default: No seed.
min_iters : int, optional
Minimum number of iterations.
Default: 1
Returns
-------
model : object
The computed model with most inliers.
inliers : (N,) array_like
Inlier map. If the results are not re-computed, this is all ones.
"""
if seed is not None:
np.random.seed(int(max(0,seed)))
p = 0.99
N = X.shape[0]
niters = niter_LMEDS(p, 0.5, m)
# Enforce a minimum number of iterations.
niters = max(niters,min_iters)
best_model = []
best_cost = 1e12
best_residuals = np.zeros(N)
if do_print:
print('Running LMEDS for a maximum of {} iterations.'.format(niters))
for i in range(niters):
sample_indices = np.random.choice(N, m)
sample = X[sample_indices,:]
try:
model = estimate_model(sample)
except Exception as inst:
print('Exception occured..')
print(inst)
continue
if model is None:
continue
cost_sq = estimate_residuals(X, model)
med_cost_sq = np.median(cost_sq)
if med_cost_sq < best_cost:
if do_print:
print('Iteration {}. Updating.'.format(i))
print('\t New model: {}'.format(model))
print('\t Median cost: {}'.format(med_cost_sq))
print('\t Best model computed from data points:')
print(sample)
best_model = model
best_cost = med_cost_sq
best_residuals = cost_sq
if recompute_model:
# Compute final least squares estimate, according to
# http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node25.html
#sigma_hat = 1.4826 * (1.0 + 5.0 / (2*N - m + 1)) * np.sqrt(best_cost)
#inliers = best_residuals < (2.5 * sigma_hat)**2
# Just use best 50% of data points
inliers = best_residuals <= best_cost
if recompute_init:
model = estimate_model(X[inliers,:],init=best_model)
else:
model = estimate_model(X[inliers,:])
else:
inliers = np.ones_like(best_residuals) > 0
model = best_model
return model, inliers
| 29.328413
| 139
| 0.599144
|
4a17e6de4e43241f989869ae0ce8fd025aff8ebc
| 755
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractUkel2x.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractUkel2x.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractUkel2x.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractUkel2x(item):
"""
#'Ukel2x
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].lower().startswith('volume'):
return buildReleaseMessageWithType(item, 'Kokugensou wo Item Cheat de Ikinuku', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().startswith('dungeon kurashi no moto yuusha chapter'):
return buildReleaseMessageWithType(item, 'Dungeon Kurashi No Moto Yuusha', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().startswith('munivit anima chapter'):
return buildReleaseMessageWithType(item, 'Munivit Anima', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False
| 50.333333
| 119
| 0.739073
|
4a17e807f5a4576642a4e0e0da7dbe37ccb70c25
| 591
|
py
|
Python
|
Python/strings.py
|
rithsuon/LecturesHW
|
523b15071066e6470d2f066a6a6af4858044803f
|
[
"MIT"
] | 3
|
2021-01-25T18:54:02.000Z
|
2021-05-12T16:35:01.000Z
|
Python/strings.py
|
rithsuon/LecturesHW
|
523b15071066e6470d2f066a6a6af4858044803f
|
[
"MIT"
] | null | null | null |
Python/strings.py
|
rithsuon/LecturesHW
|
523b15071066e6470d2f066a6a6af4858044803f
|
[
"MIT"
] | 34
|
2018-01-11T17:25:56.000Z
|
2018-08-20T04:07:22.000Z
|
# strings can be enclosed in single, double, or triple quotes
a = "Hello world"
b = 'CECS 424 is awesome'
c = """Triple-quoted strings
can go more than one line"""
# can index strings with []
d = a[0] # d = 'H'
# the slicing operator [i:j] can extract substrings from index
# i <= k < j
e = a[2:5] # e == "llo"
f = b[:4] # f == "CECS"
g = b[5:] # g == "424 is awesome"
# concatenation with +
h = a + b
# repetition with *
print(a * 2)
# parse with int()
print("5" + "2") # "52"
print(int("5") + int("2")) # 7
# values can be converted to strings with str()
print(str(100)) # "100"
| 21.888889
| 62
| 0.592217
|
4a17e82a1a744742de5970841ed0ce6923a1fd3d
| 1,684
|
py
|
Python
|
model-optimizer/extensions/front/kaldi/fuse_repeated_reshape.py
|
mypopydev/dldt
|
8cd639116b261adbbc8db860c09807c3be2cc2ca
|
[
"Apache-2.0"
] | 3
|
2019-07-08T09:03:03.000Z
|
2020-09-09T10:34:17.000Z
|
model-optimizer/extensions/front/kaldi/fuse_repeated_reshape.py
|
openvino-pushbot/dldt
|
e607ee70212797cf9ca51dac5b7ac79f66a1c73f
|
[
"Apache-2.0"
] | 3
|
2020-11-13T18:59:18.000Z
|
2022-02-10T02:14:53.000Z
|
model-optimizer/extensions/front/kaldi/fuse_repeated_reshape.py
|
openvino-pushbot/dldt
|
e607ee70212797cf9ca51dac5b7ac79f66a1c73f
|
[
"Apache-2.0"
] | 1
|
2018-12-14T07:56:02.000Z
|
2018-12-14T07:56:02.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
from mo.front.common.replacement import FrontReplacementPattern
from mo.middle.passes.eliminate import remove_op_node
class FuseRepeatedReshapes(FrontReplacementPattern):
enabled = False
@staticmethod
def pattern():
return dict(
nodes=[
('reshape_1', dict(kind='op', op='Reshape')),
('data_node', dict(kind='data')),
('reshape_2', dict(kind='op', op='Reshape'))
],
edges=[
('reshape_1', 'data_node', {'out': 0}),
('data_node', 'reshape_2', {'in': 0})
],
node_attrs=['kind', 'op'],
edge_attrs=['in', 'out'])
@staticmethod
def replace_pattern(graph: nx.MultiDiGraph, match: dict):
node = match['reshape_1']
if (node.has_valid('type') and node.type == 'Reshape' and
len(node.out_nodes()) == 1 and node.out_node().has_valid('kind') and node.out_node().kind == 'data' and
len(node.out_node().out_nodes()) == 1):
remove_op_node(graph, node)
| 35.083333
| 119
| 0.627078
|
4a17e8f3543db23ad76d02075162a964157dcce9
| 10,570
|
py
|
Python
|
tests/test_nbody_ParseElements.py
|
Smithsonian/cheby_checker
|
ce1542e4b1b3303ac08ea823be1eaec06322fd48
|
[
"MIT"
] | 1
|
2020-03-05T15:20:30.000Z
|
2020-03-05T15:20:30.000Z
|
tests/test_nbody_ParseElements.py
|
Smithsonian/cheby_checker
|
ce1542e4b1b3303ac08ea823be1eaec06322fd48
|
[
"MIT"
] | null | null | null |
tests/test_nbody_ParseElements.py
|
Smithsonian/cheby_checker
|
ce1542e4b1b3303ac08ea823be1eaec06322fd48
|
[
"MIT"
] | 2
|
2020-02-04T15:26:08.000Z
|
2020-02-04T18:23:13.000Z
|
# -*- coding: utf-8 -*-
# /tests/test_nbody.py
"""
----------------------------------------------------------------------------
tests for mpc_nbody
Dec 2021
Matthew Payne
Prev Work:
Mike Alexandersen, Matthew Payne & Matthew Holman
This code simplified as of Dec 2021
Removing many tests of non-json input
- The non-json input methods *may* still work, but for now I just want to ensure that the json inputs work
----------------------------------------------------------------------------
"""
# import third-party packages
# -----------------------------------------------------------------------------
import os
import sys
import numpy as np
from astropy.time import Time
import pytest
from filecmp import cmp
import json
# Import neighbouring packages
# -----------------------------------------------------------------------------
sys.path.append(os.environ['REBX_DIR'])
from examples.ephem_forces import ephem_forces
# import the main mnbody code that we want to test ...
from cheby_checker import nbody
from cheby_checker.archaic import parse_input
# old conversion library that may be useful for cross-comparison of various tests ...
from cheby_checker import MPC_library as mpc
# Constants & Test Data
# -----------------------------------------------------------------------------
DATA_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'dev_data')
# Utility functions to help with testing
# -----------------------------------------------------------------------------
def _get_and_set_junk_data(P, BaryEqDirect=False ):
"""
For an input P = ParseElements-class object,
Just populate some junk data within the obbject
Of use for testing some save function(s).
"""
P.time = Time(2458849.5, format='jd', scale='tdb')
v = np.array( [[3., 2., 1., 0.3, 0.2, 0.1]] )
CoV = 0.01 * np.ones((1,6,6))
# Default is to make helio-ecl, then calc bary-eq from that
if not BaryEqDirect:
P.helio_ecl_vec = v
P.helio_ecl_vec_EXISTS = True
P.helio_ecl_cov = CoV
P.helio_ecl_cov_EXISTS = True
P.make_bary_equatorial()
# Alternative is to directly set bary-eq
else:
P.bary_eq_vec = v
P.bary_eq_vec_EXISTS = True
P.bary_eq_cov = CoV
P.bary_eq_cov_EXISTS = True
def is_parsed_good_enough(new_results_file, expected_results_file):
"""
Helper function to help test whether a just-created "new_results_file" file matches
the "expected_results_file" in the "dev_data" directory
"""
if cmp(new_results_file, expected_results_file):
assert True # If files are identical, no further testing needed.
else: # If files not identical, investigate further:
with open(new_results_file, 'r') as fileA, open(expected_results_file, 'r') as fileB :
five_tf = []
for _ in range(0, 5): # First five lines should be identical
lineA = fileA.readline()
lineB = fileB.readline()
five_tf.append(lineA == lineB)
xyzA = np.array(fileA.readline().split(), dtype=float)
xyzB = np.array(fileB.readline().split(), dtype=float)
vA = np.array(fileA.readline().split(), dtype=float)
vB = np.array(fileB.readline().split(), dtype=float)
error, good_tf = compare_xyzv(np.concatenate([xyzA, vA]),
np.concatenate([xyzB, vB]),
1e-13, 1e-14) # 15 mm, 1.5 mm/day
if np.all(good_tf) & np.all(five_tf):
pass # print('Awesome!')
else:
print(f'\n Problem detected in *is_parsed_good_enough* ... ')
print(f'new_results_file={new_results_file}, expected_results_file={expected_results_file}')
print(f'First five lines identical: {five_tf:}')
print(f'Position off by: {error[:3]:}')
print(f'Velocity off by: {error[3:6]:}')
assert np.all(good_tf) & np.all(five_tf)
def compare_xyzv(xyzv0, xyzv1, threshold_xyz, threshold_v):
"""
Calculate the difference between two sets of cartesian coordinates.
"""
if isinstance(xyzv0, list):
xyzv0 = np.array(xyzv0)
if isinstance(xyzv1, list):
xyzv1 = np.array(xyzv1)
error = xyzv0 - xyzv1
good_tf = np.abs(error) < np.array([threshold_xyz] * 3 + [threshold_v] * 3)
return error, good_tf
# Tests of ParseElements
# -----------------------------------------------------------------------------
"""
@pytest.mark.parametrize( ('data_file'),
[ '30101.eq0_postfit',
'30102.eq0_postfit',
'30101.eq0_horizons',
'30102.eq0_horizons'][:1])
def test_parse_orbfit_felfile_txt(data_file):
'''
Test that OrbFit files get parsed correctly.
NB: The ...eq0... files passed in (above) are
the OLD text-file output from ORBFIT orbit-fitting
They have filetypes like .eq0/.eq1
'''
P = nbody.ParseElements()
# Check that the expected attributes exist
# and that they are initiated == None
assert P.helio_ecl_vec_EXISTS is False
assert P.helio_ecl_vec is None
assert P.helio_ecl_cov_EXISTS is False
assert P.helio_ecl_cov is None
# Read the contents of the test file
# We are doing this here because we are explicitly testing ONLY the
# *parse_orbfit_felfile_txt* function below
with open(os.path.join(DATA_DIR, data_file),'r') as fh:
file_contents=fh.readlines()
# call parse_orbfit_felfile_txt
P.parse_orbfit_felfile_txt(file_contents, CHECK_EPOCHS=False)
# Check that the expected attributes exist
# and that they are populated
assert P.helio_ecl_vec_EXISTS is True
assert isinstance(P.helio_ecl_vec, np.ndarray)
assert P.helio_ecl_vec.ndim == 2
assert P.helio_ecl_vec.shape == (1,6)
assert P.helio_ecl_cov_EXISTS is True
assert isinstance(P.helio_ecl_cov, np.ndarray)
assert P.helio_ecl_cov.ndim == 3
assert P.helio_ecl_cov.shape == (1,6,6)
"""
# TODO: These files appear to be missing
@pytest.mark.skip(reason="see TODO above.")
@pytest.mark.parametrize( ('data_file'),
[ '10199fel_num.json',
'1566fel_num.json',
'2003AF23fel_num.json',
'2017AP4fel_num.json',
'545808fel_num.json'])
def test_parse_orbfit_json_A(data_file):
"""
Test that OrbFit files get parsed correctly.
NB(1): The ...json... files passed in (above) are
the mpcorb format jsons derived from ORBFIT orbit-fitting
NB(2): This test deliberately only works for 6-dimension stuff, i.e. gravity-only
"""
P = parse_input.ParseElements()
# Check that the expected attributes exist
assert P.helio_ecl_vec_EXISTS is False
assert P.helio_ecl_vec is None
assert P.helio_ecl_cov_EXISTS is False
assert P.helio_ecl_cov is None
# Read the contents of the test file
# We are doing this here because we are explicitly testing ONLY the
# *parse_orbfit_json* function below
with open(os.path.join(DATA_DIR, data_file),'r') as json_file:
file_contents = json.load(json_file)
# call parse_orbfit_json
P.parse_orbfit_json(file_contents, CHECK_EPOCHS=False)
# Check that the expected attributes exist
# and that they are populated
assert P.helio_ecl_vec_EXISTS is True
assert isinstance(P.helio_ecl_vec, np.ndarray)
assert P.helio_ecl_vec.ndim == 2
assert P.helio_ecl_vec.shape == (1,6)
assert P.helio_ecl_cov_EXISTS is True
assert isinstance(P.helio_ecl_cov, np.ndarray)
assert P.helio_ecl_cov.ndim == 3
assert P.helio_ecl_cov.shape in [(1,6,6),(1,7,7),(1,8,8),(1,9,9)]
"""
def test_save_elements():
'''Test that saving input-elements to an outpuut-file works correctly.'''
# Get rid of an save_file.tmp file in the test directory
if os.path.isfile('save_file.tmp'):
os.remove('save_file.tmp')
# Instantiate ...
P = nbody.ParseElements()
# Populate variables (junk data)
_get_and_set_junk_data(P , BaryEqDirect=True)
# Save to file
P.save_elements()
# Check contents of file are as expected
assert cmp('./save_file.tmp', os.path.join(DATA_DIR, 'expected_junk_save.dat'))
# Get rid of an save_file.tmp file in the test directory
if os.path.isfile('save_file.tmp'):
os.remove('save_file.tmp')
names_of_variables = ('data_file', 'file_type', 'test_result_file')
values_for_each_test = [
pytest.param('30101.ele220', 'ele220', 'holman_ic_30101',
marks=pytest.mark.xfail(reason='Not implemented yet.')),
pytest.param('30102.ele220', 'ele220', 'holman_ic_30102',
marks=pytest.mark.xfail(reason='Not implemented yet.')),
('30101.eq0_postfit', 'eq', 'holman_ic_30101'),
('30102.eq0_postfit', 'eq', 'holman_ic_30102'),
('30101.eq0_horizons', 'eq', 'holman_ic_30101_horizons'),
('30102.eq0_horizons', 'eq', 'holman_ic_30102_horizons'),
('10199fel_num.json', 'json', 'expected_10199fel_num.txt'),
]
@pytest.mark.parametrize( names_of_variables, values_for_each_test )
def test_instantiation_from_data_files(data_file, file_type, test_result_file):
'''
Test that instantiation with data works
(by doing so we essentially test most/all functionalities).
'''
# Where we will save a local test file
save_file='save_file.tmp'
if os.path.isfile(save_file) : os.remove(save_file)
# Instantiate from file (which calls *make_bary_equatorial*)
# and then save to save_file='save_file.tmp'
nbody.ParseElements( input = os.path.join(DATA_DIR, data_file),
filetype = file_type,
save_parsed=True,
save_file=save_file,
CHECK_EPOCHS=False )
# Check the output
is_parsed_good_enough( save_file , os.path.join(DATA_DIR, test_result_file) )
# Tidy-up by removing any local test file
if os.path.isfile(save_file) : os.remove(save_file)
"""
| 36.448276
| 108
| 0.600757
|
4a17e96812b61a0b269ae4cdfa052b412ecfb19d
| 10,326
|
py
|
Python
|
toxicity/linear_predictor.py
|
zihan993/jads_kaggle
|
a26af9ce11ec067dccdfbe7393c2ee109974f116
|
[
"MIT"
] | null | null | null |
toxicity/linear_predictor.py
|
zihan993/jads_kaggle
|
a26af9ce11ec067dccdfbe7393c2ee109974f116
|
[
"MIT"
] | null | null | null |
toxicity/linear_predictor.py
|
zihan993/jads_kaggle
|
a26af9ce11ec067dccdfbe7393c2ee109974f116
|
[
"MIT"
] | null | null | null |
import numpy as np
import multiprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
try:
from xgboost import XGBClassifier
except ImportError:
print("XGBoost not imported.")
from predictor import Predictor
class LogisticPredictor(Predictor):
"""
Adapted to our class design from the kernel:
https://www.kaggle.com/jhoward/nb-svm-strong-linear-baseline-eda-0-052-lb
"""
name = 'Logistic Regression Predictor'
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0, # noqa
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=None, name=name):
super().__init__(name)
n_jobs = n_jobs or max(1, multiprocessing.cpu_count() - 1)
self.model = LogisticRegression(penalty=penalty, dual=dual, tol=tol, C=C, fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling, class_weight=class_weight,
random_state=random_state, solver=solver, max_iter=max_iter,
multi_class=multi_class, verbose=verbose, warm_start=warm_start, n_jobs=n_jobs)
# Parameters need to be included for cross_validation to work.
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
# Used for internal representation
self.r = None
def fit(self, train_x, train_y, **params):
"""
A function that fits the predictor to the provided dataset
:param train_x Contains the input features
:param train_y Contains the dependent tag values
"""
def pr(y_i):
p = train_x[train_y == y_i].sum(0)
return (p + 1) / ((train_y == y_i).sum() + 1)
self.r = np.log(pr(1) / pr(0))
nb = train_x.multiply(self.r)
self.model.fit(nb, train_y, **params)
def predict_proba(self, test_x):
"""
Predicts the label for the given input
:param test_x: a (potentially sparse) array of shape: (n_samples, n_features)
:return: The predicted labels
"""
m = test_x.multiply(self.r)
return self.model.predict_proba(m)[:, 1]
def predict(self, test_x):
m = test_x.multiply(self.r)
return self.model.predict(m)
class SVMPredictor(Predictor):
"""
An linear Predictor based on SVMs.
"""
name = 'Linear SVM Predictor'
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=0.0001, C=1.0, multi_class='ovr', # noqa
fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0, random_state=None,
max_iter=1000, name=name):
super().__init__(name=name)
self.model = LinearSVC(penalty=penalty, loss=loss, dual=dual, tol=tol, C=C, multi_class=multi_class,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling,
class_weight=class_weight, verbose=verbose, random_state=random_state, max_iter=max_iter)
# Parameters need to be included for cross_validation to work.
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, train_x, train_y, **params):
"""
A function that fits the predictor to the provided dataset.
:param train_x Contains the input features
:param train_y Contains the dependent tag values
"""
self.model.fit(train_x, train_y, **params)
def predict_proba(self, test_x):
"""
Predicts the probability of the label being 1 for the given input.
:param test_x: a (potentially sparse) array of shape: (n_samples, n_features)
:return: The predicted probabilities for each sample
"""
return self.model.decision_function(test_x)
def predict(self, test_x):
"""
Predicts the label for each sample found in the input.
:param test_x: a (potentially sparse) array of shape: (n_samples, n_features)
:return: The predicted labels (binary) for each sample
"""
return self.model.predict(test_x)
class RFPredictor(Predictor):
"""
An linear Predictor based on Random Forests.
"""
name = 'Linear Random Predictor'
def __init__(self, n_estimators=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0,
warm_start=False, class_weight=None, name=name):
super().__init__(name=name)
self.model = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion, max_depth=max_depth,
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features, max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split, bootstrap=bootstrap,
oob_score=oob_score, n_jobs=n_jobs, random_state=random_state,
verbose=verbose, warm_start=warm_start, class_weight=class_weight)
def fit(self, train_x, train_y, **params):
"""
A function that fits the predictor to the provided dataset.
:param train_x Contains the input features
:param train_y Contains the dependent tag values
"""
self.model.fit(train_x, train_y, **params)
def predict_proba(self, test_x):
"""
Predicts the probability of the label being 1 for the given input.
:param test_x: a (potentially sparse) array of shape: (n_samples, n_features)
:return: The predicted probabilities for each sample
"""
return self.model.predict_proba(test_x)[:, 1]
def predict(self, test_x):
"""
Predicts the label for each sample found in the input.
:param test_x: a (potentially sparse) array of shape: (n_samples, n_features)
:return: The predicted labels (binary) for each sample
"""
return self.model.predict(test_x)
class XGBPredictor(Predictor):
"""
An XGBoost Classifier based on trees.
"""
name = 'XGBoost Predictor'
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100, silent=True, objective='binary:logistic',
gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None, name=name):
super().__init__(name=name)
self.model = XGBClassifier(max_depth=int(max_depth), learning_rate=learning_rate, n_estimators=n_estimators,
silent=silent, objective=objective,
gamma=gamma, min_child_weight=min_child_weight, max_delta_step=max_delta_step,
subsample=subsample, colsample_bytree=colsample_bytree,
colsample_bylevel=colsample_bylevel, reg_alpha=reg_alpha, reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight, base_score=base_score,
seed=seed, missing=missing)
# Parameters need to be included for cross_validation to work.
self.max_depth = int(max_depth)
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.silent = silent
self.objective = objective
self.gamma = gamma
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.colsample_bytree = colsample_bytree
self.colsample_bylevel = colsample_bylevel
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.scale_pos_weight = scale_pos_weight
self.base_score = base_score
self.seed = seed
self.missing = missing
def fit(self, train_x, train_y, **params):
"""
A function that fits the predictor to the provided dataset.
:param train_x Contains the input features
:param train_y Contains the dependent tag values
"""
self.model.fit(train_x, train_y, **params)
def predict_proba(self, test_x):
"""
Predicts the probability of the label being 1 for the given input.
:param test_x: a (potentially sparse) array of shape: (n_samples, n_features)
:return: The predicted probabilities for each sample
"""
return self.model.predict_proba(test_x)[:, 1]
def predict(self, test_x):
"""
Predicts the label for each sample found in the input.
:param test_x: a (potentially sparse) array of shape: (n_samples, n_features)
:return: The predicted labels (binary) for each sample
"""
return self.model.predict(test_x)
| 41.46988
| 120
| 0.630738
|
4a17e99cf3c67b0f93d5b18058cd3ddb05e35423
| 14,427
|
py
|
Python
|
query_clu.py
|
tahumada/kowalski-searches
|
824cd0e8cfb5b21441903fc18bdbd4e83b729b91
|
[
"MIT"
] | null | null | null |
query_clu.py
|
tahumada/kowalski-searches
|
824cd0e8cfb5b21441903fc18bdbd4e83b729b91
|
[
"MIT"
] | null | null | null |
query_clu.py
|
tahumada/kowalski-searches
|
824cd0e8cfb5b21441903fc18bdbd4e83b729b91
|
[
"MIT"
] | 1
|
2020-11-30T19:02:24.000Z
|
2020-11-30T19:02:24.000Z
|
'''
Query Kowalski with cone searches centred
on CLU galaxies, searching for transients
given a set of constraints.
'''
def print_query_params(args):
'''Print a summary of the query parameters'''
print("#-----")
print("Cone search parameters:")
print(f"Search radius {args.radius} arcmin")
print(f"Minimum time between the first and last alert {args.min_days} days")
print(f"Maximum time between the first and last alert {args.max_days} days")
print(f"CLU galaxies selected with distance between {args.min_dist}Mpc and {args.max_dist}Mpc, with Dec > {args.min_dec}")
print(f"Query divided in {args.slices} slices")
print("#-----")
print(" ")
def get_programidx(program_name, username, password):
'''Given a marshal science program name, it returns its programidx'''
r = requests.post('http://skipper.caltech.edu:8080/cgi-bin/growth/list_programs.cgi', auth=(username, password))
programs = json.loads(r.text)
program_dict = {p['name']:p['programidx'] for i,p in enumerate(programs)}
try:
return program_dict[program_name]
except KeyError:
print(f'The user {username} does not have access to the program {program_name}')
return None
def get_candidates_growth_marshal(program_name, username, password):
'''Query the GROWTH db for the science programs'''
programidx = get_programidx(program_name, username, password)
if programidx == None:
return None
r = requests.post('http://skipper.caltech.edu:8080/cgi-bin/growth/list_program_sources.cgi', \
auth=(username, password), data={'programidx':str(programidx)})
sources = json.loads(r.text)
sources_out = []
for s in sources:
coords = SkyCoord(ra=s['ra']*u.deg, dec=s['dec']*u.deg, frame='icrs')
sources_out.append({"name":s['name'], "ra":coords.ra, "dec":coords.dec, \
"classification":s['classification'], "redshift":s['redshift'], "creation_date":s['creationdate']})
return sources_out
def query_kowalski_clu(username, password, clu):
'''Query kowalski to get a table of CLU galaxies.'''
k = Kowalski(username=username, password=password, verbose=False)
q = {"query_type": "general_search",
"query": "db['CLU_20180513'].find({},{'distmpc': 1})"
}
r = k.query(query=q)
return r
def check_clu_transients(sources_kowalski, clu_sources):
'''Check if the selected sources are present in the
CLU science program. If so, print out the relevant information.'''
sources_in_clu = []
sources_not_in_clu = []
list_clu_sources = list(s['name'] for s in clu_sources)
for source in sources_kowalski:
print("-------")
if source in list_clu_sources:
clu_source = clu_sources[np.where(np.array(list_clu_sources) == source)[0][0]]
try:
for k in clu_source.keys():
print(f"{k}: {clu_source[k]}")
sources_in_clu.append(source)
except:
pdb.set_trace()
else:
print(f"{source} was not saved in CLU")
sources_not_in_clu.append(source)
print("-------")
print("Summary:")
print(f"Sources saved in CLU: {sources_in_clu}")
print(f"Sources not saved in CLU: {sources_not_in_clu}")
def query_kowalski(username, password, clu, args):
'''Query kowalski and apply the selection criteria'''
k = Kowalski(username=username, password=password, verbose=False)
#Initialize a set for the results
set_objectId_all = set([])
for slice_lim,i in zip(np.linspace(0,len(clu),args.slices)[:-1], np.arange(len(np.linspace(0,len(clu),args.slices)[:-1]))):
try:
t = clu[int(slice_lim):int(np.linspace(0,len(clu),args.slices)[:-1][i+1])]
except IndexError:
t = clu[int(slice_lim):]
coords_arr = []
galaxy_names_arr = []
for galaxy,ra, dec in zip(t["name"],t["ra"], t["dec"]):
try:
coords = SkyCoord(ra=ra*u.deg, dec=dec*u.deg)
coords_arr.append((coords.ra.deg,coords.dec.deg))
except ValueError:
print("Problems with the galaxy coordinates?")
pdb.set_trace()
continue
galaxy_names_arr.append(galaxy)
try:
print(f"slice: {int(slice_lim)}:{int(np.linspace(0,len(clu),args.slices)[:-1][i+1])}" )
except:
print(f"slice: {int(slice_lim)}:{int(len(clu))}" )
q = {"query_type": "cone_search",
"object_coordinates": {
"radec": f"{coords_arr}",
"cone_search_radius": f"{args.radius}",
"cone_search_unit": "arcmin"
},
"catalogs": {
"ZTF_alerts": {
"filter": {
"candidate.ndethist": {'$gt': 1},
"candidate.rb": {'$gt': 0.2}
},
"projection": {
"objectId": 1,
"candidate.rcid": 1,
"candidate.ra": 1,
"candidate.dec": 1,
"candidate.jd": 1,
"candidate.ndethist": 1,
"candidate.jdstarthist": 1,
"candidate.jdendhist": 1,
"candidate.jdendhist": 1,
"candidate.magpsf": 1,
"candidate.sigmapsf": 1,
"candidate.fid": 1,
"candidate.programid": 1,
"candidate.isdiffpos": 1,
"candidate.ndethist": 1,
"candidate.ssdistnr": 1,
"candidate.rb": 1,
"candidate.drb": 1,
"candidate.distpsnr1": 1,
"candidate.sgscore1": 1,
"candidate.srmag1": 1,
"candidate.distpsnr2": 1,
"candidate.sgscore2": 1,
"candidate.srmag2": 1,
"candidate.distpsnr3": 1,
"candidate.sgscore3": 1,
"candidate.srmag3": 1
}
}
}
}
#Perform the query
r = k.query(query=q)
print('Search completed for this slice.')
# #Dump the results in a json file
# with open(f'results_clu25Mpc_1week_{i+1}.json', 'w') as j:
# json.dump(r, j)
#Identify 'candid' for all relevant candidates
objectId_list = []
with_neg_sub = []
old = []
stellar_list = []
try:
keys_list = list(r['result_data']['ZTF_alerts'].keys())
except:
print("Error in the keys list?? Check 'r' ")
pdb.set_trace()
for key in keys_list:
all_info = r['result_data']['ZTF_alerts'][key]
for info in all_info:
# #Stop at a certain candidId for debugging
# if info['objectId'] == 'ZTF19aanfkyc':
# pdb.set_trace()
if info['objectId'] in old:
continue
if info['objectId'] in stellar_list:
continue
try:
if info['candidate']['drb'] < 0.5:
continue
except:
do = 'do nothing.'
if np.abs(info['candidate']['ssdistnr']) < 10:
continue
if info['candidate']['isdiffpos'] in ['f',0]:
with_neg_sub.append(info['objectId'])
if (info['candidate']['jdendhist'] - info['candidate']['jdstarthist']) < args.min_days:
continue
if (info['candidate']['jdendhist'] - info['candidate']['jdstarthist']) > args.max_days:
old.append(info['objectId'])
try:
if (np.abs(info['candidate']['distpsnr1']) < 1. and info['candidate']['sgscore1'] > 0.0):
stellar_list.append(info['objectId'])
except:
do = 'do nothing.'
try:
if (np.abs(info['candidate']['distpsnr1']) < 15. and info['candidate']['srmag1'] < 15. and info['candidate']['srmag1'] > 0. and info['candidate']['sgscore1'] >= 0.5):
continue
except:
do = 'do nothing.'
try:
if (np.abs(info['candidate']['distpsnr2']) < 15. and info['candidate']['srmag2'] < 15. and info['candidate']['srmag2'] > 0. and info['candidate']['sgscore2'] >= 0.5):
continue
except:
do = 'do nothing.'
try:
if (np.abs(info['candidate']['distpsnr3']) < 15. and info['candidate']['srmag3'] < 15. and info['candidate']['srmag3'] > 0. and info['candidate']['sgscore3'] >= 0.5):
continue
except:
do = 'do nothing.'
objectId_list.append(info['objectId'])
set_objectId = set(objectId_list)
#Remove those objects with negative subtraction
for n in set(with_neg_sub):
try:
set_objectId.remove(n)
except:
do = 'do nothing'
#Remove stellar objects
for n in set(stellar_list):
try:
set_objectId.remove(n)
except:
do = 'do nothing'
#Remove those objects considered old
for n in set(old):
try:
set_objectId.remove(n)
except:
do = 'do nothing'
print(set_objectId)
set_objectId_all = set_objectId_all | set_objectId
print("Cumulative:", set_objectId_all)
'''
print('----stats-----')
print('Number of sources with negative sub: ', len(set(with_neg_sub)))
print('Number of sources with only pos subtraction: ', len(set_objectId))
print(f"Number of sources older than {args.max_days} days: {len(set(old))}, specifically {set(old)}")
'''
return set_objectId_all
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query kowalski.')
parser.add_argument('--radius', dest='radius', type=float, required=False, \
help='Search radius (arcmin)', default = 1.)
parser.add_argument('--min-days', dest='min_days', type=float, required=False, \
help='Minimum time (days) between the first and last alert', default = 3.)
parser.add_argument('--max-days', dest='max_days', type=float, required=False, \
help='Maximum time (days) between the first and last alert', default = 14.)
parser.add_argument('--min-dist', dest='min_dist', type=float, required=False, \
help='Minimum distance(Mpc) of the CLU galaxies to explore', default = 0.)
parser.add_argument('--max-dist', dest='max_dist', type=float, required=False, \
help='Maximum distance(Mpc) of the CLU galaxies to explore', default = 200.)
parser.add_argument('--min-dec', dest='min_dec', type=float, required=False, \
help='Minimum declination (celestial, deg) of the CLU galaxies to explore', default = -30.)
parser.add_argument('--slices', dest='slices', type=int, required=False, \
help='Number (integer) of slices in which the query will be devided', default = 40)
args = parser.parse_args()
import requests
import json
import pdb
import numpy as np
from astropy.time import Time
from astropy.io import ascii
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
from penquins import Kowalski
#Print a summary of the query input
print_query_params(args)
#Read the CLU catalog
clu = Table.read('CLU_20181213V2.fits')
clu = clu[clu['dec'] > args.min_dec]
clu = clu[clu['distmpc'] >= args.min_dist]
clu = clu[clu['distmpc'] <= args.max_dist]
print(f"There are {len(clu)} CLU galaxies in this sample.")
#Read the secrets
secrets = ascii.read('secrets.csv', format = 'csv')
username = secrets['kowalski_user'][0]
password = secrets['kowalski_pwd'][0]
#Query kowalski
sources_kowalski = query_kowalski(username, password, clu, args)
#Check the CLU science program on the Marshal
username_marshal = secrets['marshal_user'][0]
password_marshal= secrets['marshal_pwd'][0]
program_name='Census of the Local Universe'
clu_sources = get_candidates_growth_marshal(program_name, username_marshal, password_marshal)
#For each transient check if it is present in the CLU science program
check_clu_transients(sources_kowalski, clu_sources)
print("Done.")
'''
#Plot the data
for galaxy_name, idcoords in zip(galaxy_names_arr, r['result_data']['ZTF_alerts'].keys()):
all_info=r['result_data']['ZTF_alerts'][idcoords]
jd_arr=[]
mag_arr=[]
magerr_arr=[]
filter_arr=[]
for info in all_info:
if info['candidate']['isdiffpos'] != 't':
continue
magpsf=info['candidate']['magpsf']
sigmapsf=info['candidate']['sigmapsf']
jd=info['candidate']['jd']
fil=info['candidate']['fid']
filter_arr.append(fil)
jd_arr.append(jd)
mag_arr.append(magpsf)
magerr_arr.append(sigmapsf)
if mag_arr!=[]:
print(info['candidate']['programid'])
jd0=min(jd_arr)
jd0_time=Time(jd0, format='jd')
for i in np.arange(len(jd_arr)):
jd_arr[i]=jd_arr[i]-jd0
plt.figure()
print(galaxy_name, info['objectId'], info['candidate']['ra'], info['candidate']['dec'])
plt.title(galaxy_name + ' '+info['objectId'])
#plt.errorbar(jd_arr, mag_arr, yerr=magerr_arr, color='blue', linestyle=' ', marker='o')
for jj,mm,me,ff in zip(jd_arr, mag_arr, magerr_arr, filter_arr):
if ff==1:
fcolor='b'
if ff==2:
fcolor='r'
if ff==3:
fcolor='y'
plt.errorbar(jj, mm, yerr=me, color=fcolor, linestyle=' ', marker='o')
plt.xlabel(f'Days since {jd0_time.iso}')
plt.gca().invert_yaxis()
plt.show()
'''
| 38.065963
| 186
| 0.56117
|
4a17ea5e9a063812ff8764afb10873166155f787
| 1,322
|
py
|
Python
|
setup.py
|
stuart-c/GithubOrganizer
|
ee6cdb3040f92f02b7588ea13034a5536e704e91
|
[
"MIT"
] | null | null | null |
setup.py
|
stuart-c/GithubOrganizer
|
ee6cdb3040f92f02b7588ea13034a5536e704e91
|
[
"MIT"
] | null | null | null |
setup.py
|
stuart-c/GithubOrganizer
|
ee6cdb3040f92f02b7588ea13034a5536e704e91
|
[
"MIT"
] | null | null | null |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
version = '0.1.0'
try:
long_description = open('README.md').read()
except:
long_description = ''
setup(
name = 'githuborganizer',
version = version,
packages=find_packages(),
description = '',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>=3',
author = 'Robert Hafner',
author_email = 'tedivm@tedivm.com',
url = 'https://github.com/tedivm/githuborganizer',
download_url = "https://github.com/tedivm/githuborganizer/archive/v%s.tar.gz" % (version),
keywords = 'automation github organizations',
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Topic :: Software Development :: Version Control',
'Programming Language :: Python :: 3',
],
install_requires=[
'Beaker>=1.11.0,<2',
'celery>=4.1,<5',
'click>=7.0,<8.0',
'cryptography>=2.1.4,<3',
'github3.py>=1,<2',
'github3apps.py>=0.1.3,<0.2',
'pyjwt>=1.5.3,<2',
'PyYAML>=5,<6',
'fastapi>=0.42.0',
],
extras_require={
'dev': [
'pypandoc',
'twine',
'wheel'
],
},
)
| 21.322581
| 92
| 0.631619
|
4a17eb6557653c3521bd11b2662407d819a9d724
| 271
|
py
|
Python
|
secondstring.py
|
Enfioz/pands-problems
|
a136b4f3e8a1c8424b529c62bf7d8eda3fc3c9aa
|
[
"MIT"
] | null | null | null |
secondstring.py
|
Enfioz/pands-problems
|
a136b4f3e8a1c8424b529c62bf7d8eda3fc3c9aa
|
[
"MIT"
] | null | null | null |
secondstring.py
|
Enfioz/pands-problems
|
a136b4f3e8a1c8424b529c62bf7d8eda3fc3c9aa
|
[
"MIT"
] | null | null | null |
# Patrick Corcoran
# Weekly Task 3 - strings
# Write a program that asks a user to input
# a string and outputs every second letter
# in reverse order
s = "The quick brown fox jumps over the lazy dog."
s = (input("Please enter a sentence: "))
print(s[::-1][0:44:2])
| 19.357143
| 50
| 0.690037
|
4a17eba0944df4a6b9a059f7eae23753f31c55a3
| 23,137
|
py
|
Python
|
test/solver_tests/hybrid_solver_test.py
|
YosefLab/SingleCellLineageTracing
|
d9133fc80c8314e7935fde037dd86111cac47447
|
[
"MIT"
] | 52
|
2019-05-14T02:06:24.000Z
|
2022-03-27T05:22:56.000Z
|
test/solver_tests/hybrid_solver_test.py
|
sbradford2/Cassiopeia
|
010072b307f7eadbf10dc4af8b2165e48f1736a7
|
[
"MIT"
] | 88
|
2019-06-07T15:07:45.000Z
|
2022-03-22T14:40:03.000Z
|
test/solver_tests/hybrid_solver_test.py
|
sbradford2/Cassiopeia
|
010072b307f7eadbf10dc4af8b2165e48f1736a7
|
[
"MIT"
] | 17
|
2019-05-17T00:46:16.000Z
|
2022-03-25T00:39:18.000Z
|
"""
Test HybridSolver in Cassiopeia.solver.
"""
import os
import unittest
import itertools
import networkx as nx
import pandas as pd
import pathlib as pl
import cassiopeia as cas
from cassiopeia.solver import solver_utilities
GUROBI_INSTALLED = True
try:
import gurobipy
except ModuleNotFoundError:
GUROBI_INSTALLED = False
def find_triplet_structure(triplet, T):
a, b, c = triplet[0], triplet[1], triplet[2]
a_ancestors = [node for node in nx.ancestors(T, a)]
b_ancestors = [node for node in nx.ancestors(T, b)]
c_ancestors = [node for node in nx.ancestors(T, c)]
ab_common = len(set(a_ancestors) & set(b_ancestors))
ac_common = len(set(a_ancestors) & set(c_ancestors))
bc_common = len(set(b_ancestors) & set(c_ancestors))
structure = "-"
if ab_common > bc_common and ab_common > ac_common:
structure = "ab"
elif ac_common > bc_common and ac_common > ab_common:
structure = "ac"
elif bc_common > ab_common and bc_common > ac_common:
structure = "bc"
return structure
class TestHybridSolver(unittest.TestCase):
def assertIsFile(self, path):
if not pl.Path(path).resolve().is_file():
raise AssertionError("File does not exist: %s" % str(path))
def setUp(self):
# basic PP example with no missing data
cm = pd.DataFrame.from_dict(
{
"a": [1, 1, 0],
"b": [1, 2, 0],
"c": [1, 2, 1],
"d": [2, 0, 0],
"e": [2, 0, 2],
},
orient="index",
columns=["x1", "x2", "x3"],
)
cm_large = pd.DataFrame.from_dict(
{
"a": [1, 0, 0, 0, 0, 0, 0, 0],
"b": [1, 1, 0, 0, 0, 0, 0, 0],
"c": [1, 1, 1, 0, 0, 0, 0, 0],
"d": [1, 1, 1, 1, 0, 0, 0, 0],
"e": [1, 1, 1, 1, 1, 0, 0, 0],
"f": [1, 1, 1, 1, 1, 1, 0, 0],
"g": [1, 1, 1, 1, 1, 1, 1, 0],
"h": [1, 1, 1, 1, 1, 1, 1, 1],
"i": [2, 0, 0, 0, 0, 0, 0, 0],
"j": [2, 2, 0, 0, 0, 0, 0, 0],
},
orient="index",
)
cm_missing = pd.DataFrame.from_dict(
{
"a": [1, 3, 1, 1],
"b": [1, 3, 1, -1],
"c": [1, 0, 1, 0],
"d": [1, 1, 3, 0],
"e": [1, 1, 0, 0],
"f": [2, 0, 0, 0],
"g": [2, 4, -1, -1],
"h": [2, 4, 2, 0],
"i": [2, 4, 2, 0],
},
orient="index",
)
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.logfile = os.path.join(self.dir_path, "test.log")
self.pp_tree = cas.data.CassiopeiaTree(cm, missing_state_indicator=-1)
self.large_tree = cas.data.CassiopeiaTree(
cm_large, missing_state_indicator=-1
)
self.missing_tree = cas.data.CassiopeiaTree(
cm_missing, missing_state_indicator=-1
)
## smaller hybrid solver
ilp_solver = cas.solver.ILPSolver(mip_gap=0.0)
greedy_solver = cas.solver.VanillaGreedySolver()
self.hybrid_pp_solver = cas.solver.HybridSolver(
greedy_solver, ilp_solver, cell_cutoff=3, threads=2
)
## larger hybrid solver
self.hybrid_pp_solver_large = cas.solver.HybridSolver(
greedy_solver, ilp_solver, cell_cutoff=3, threads=2
)
## hybrid solver with missing data
self.hybrid_pp_solver_missing = cas.solver.HybridSolver(
greedy_solver, ilp_solver, cell_cutoff=3, threads=2
)
## hybrid solver with MaxCut Greedy
greedy_maxcut_solver = cas.solver.MaxCutGreedySolver()
self.hybrid_pp_solver_maxcut = cas.solver.HybridSolver(
greedy_maxcut_solver, ilp_solver, cell_cutoff=3, threads=2
)
## hybrid solver with Greedy on top and Maxcut on Bottom
self.hybrid_pp_solver_greedy_over_greedy_maxcut = (
cas.solver.HybridSolver(
greedy_solver, greedy_maxcut_solver, cell_cutoff=3, threads=2
)
)
def test_constructor(self):
self.assertEqual(self.hybrid_pp_solver.cell_cutoff, 3)
self.assertEqual(self.hybrid_pp_solver.lca_cutoff, None)
# test bottom solver is populated correctly
self.assertEqual(
self.hybrid_pp_solver.bottom_solver.convergence_time_limit, 12600
)
self.assertEqual(
self.hybrid_pp_solver.bottom_solver.maximum_potential_graph_layer_size,
10000,
)
self.assertFalse(self.hybrid_pp_solver.bottom_solver.weighted)
expected_unique_character_matrix = pd.DataFrame.from_dict(
{
"a": [1, 1, 0],
"b": [1, 2, 0],
"c": [1, 2, 1],
"d": [2, 0, 0],
"e": [2, 0, 2],
},
orient="index",
columns=["x1", "x2", "x3"],
)
pd.testing.assert_frame_equal(
expected_unique_character_matrix,
self.pp_tree.character_matrix.copy(),
)
def test_cutoff(self):
character_matrix = self.pp_tree.character_matrix.copy()
missing_state = self.pp_tree.missing_state_indicator
self.assertTrue(
self.hybrid_pp_solver.assess_cutoff(
["a", "b", "c"], character_matrix, missing_state
),
True,
)
self.assertFalse(
self.hybrid_pp_solver.assess_cutoff(
["a", "b", "c", "d"], character_matrix, missing_state
),
False,
)
# test lca-based cutoff
self.hybrid_pp_solver.cell_cutoff = None
self.hybrid_pp_solver.lca_cutoff = 2
self.assertTrue(
self.hybrid_pp_solver.assess_cutoff(
["a", "b", "c"], character_matrix, missing_state
)
)
self.assertFalse(
self.hybrid_pp_solver.assess_cutoff(
["c", "d"], character_matrix, missing_state
)
)
def test_top_down_split_manual(self):
character_matrix = self.pp_tree.character_matrix.copy()
# test manually
mutation_frequencies = (
self.hybrid_pp_solver.top_solver.compute_mutation_frequencies(
["a", "b", "c", "d", "e"],
character_matrix,
self.pp_tree.missing_state_indicator,
)
)
expected_dictionary = {
0: {1: 3, 2: 2, -1: 0},
1: {1: 1, 2: 2, 0: 2, -1: 0},
2: {0: 3, 1: 1, 2: 1, -1: 0},
}
self.assertDictEqual(mutation_frequencies, expected_dictionary)
clades = self.hybrid_pp_solver.top_solver.perform_split(
character_matrix, ["a", "b", "c", "d", "e"]
)
expected_split = (["a", "b", "c"], ["d", "e"])
for expected_clade in expected_split:
self.assertIn(expected_clade, clades)
def test_apply_top_solver_small(self):
character_matrix = self.pp_tree.character_matrix.copy()
unique_character_matrix = character_matrix.drop_duplicates()
names = solver_utilities.node_name_generator()
_, subproblems, _ = self.hybrid_pp_solver.apply_top_solver(
unique_character_matrix,
list(unique_character_matrix.index),
nx.DiGraph(),
names,
)
expected_clades = (["a", "b", "c"], ["d", "e"])
observed_clades = [subproblem[1] for subproblem in subproblems]
self.assertEqual(len(expected_clades), len(observed_clades))
for clade in expected_clades:
self.assertIn(clade, observed_clades)
def test_apply_top_solver_large(self):
character_matrix = self.large_tree.character_matrix.copy()
unique_character_matrix = character_matrix.drop_duplicates()
names = solver_utilities.node_name_generator()
_, subproblems, _ = self.hybrid_pp_solver_large.apply_top_solver(
unique_character_matrix,
list(unique_character_matrix.index),
nx.DiGraph(),
names,
)
expected_clades = (
["a"],
["b"],
["c"],
["d"],
["e"],
["f", "g", "h"],
["i", "j"],
)
observed_clades = [subproblem[1] for subproblem in subproblems]
self.assertEqual(len(expected_clades), len(observed_clades))
for clade in expected_clades:
self.assertIn(clade, observed_clades)
def test_apply_top_solver_missing(self):
character_matrix = self.missing_tree.character_matrix.copy()
unique_character_matrix = character_matrix.drop_duplicates()
names = solver_utilities.node_name_generator()
_, subproblems, _ = self.hybrid_pp_solver_missing.apply_top_solver(
unique_character_matrix,
list(unique_character_matrix.index),
nx.DiGraph(),
names,
)
expected_clades = (["a", "b", "c"], ["d", "e"], ["f", "g", "h"])
observed_clades = [subproblem[1] for subproblem in subproblems]
self.assertEqual(len(expected_clades), len(observed_clades))
for clade in expected_clades:
self.assertIn(clade, observed_clades)
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_full_hybrid(self):
self.hybrid_pp_solver.solve(self.pp_tree, logfile=self.logfile)
tree = self.pp_tree.get_tree_topology()
# make sure log files are created correctly
self.assertIsFile(os.path.join(self.dir_path, "test_1-0-0.log"))
self.assertIsFile(os.path.join(self.dir_path, "test_2-0-0.log"))
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = [n for n in tree if tree.out_degree(n) == 0]
expected_leaves = ["a", "b", "c", "d", "e"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
# make sure every node has at most one parent
multi_parents = [n for n in tree if tree.in_degree(n) > 1]
self.assertEqual(len(multi_parents), 0)
# make sure the resulting tree has no unifurcations
one_child = [n for n in tree if tree.out_degree(n) == 1]
self.assertEqual(len(one_child), 0)
expected_tree = nx.DiGraph()
expected_tree.add_nodes_from(
["a", "b", "c", "d", "e", "root", "6", "7", "8", "9"]
)
expected_tree.add_edges_from(
[
("root", "9"),
("9", "8"),
("9", "7"),
("7", "6"),
("7", "a"),
("6", "b"),
("6", "c"),
("8", "e"),
("8", "d"),
]
)
triplets = itertools.combinations(["a", "b", "c", "d", "e"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
self.hybrid_pp_solver.solve(
self.pp_tree, logfile=self.logfile, collapse_mutationless_edges=True
)
tree = self.pp_tree.get_tree_topology()
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
# make sure that the tree can be converted to newick format
tree_newick = self.pp_tree.get_newick()
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_full_hybrid_single_thread(self):
self.hybrid_pp_solver.threads = 1
self.hybrid_pp_solver.solve(self.pp_tree, logfile=self.logfile)
# make sure log files are created correctly
self.assertIsFile(os.path.join(self.dir_path, "test_1-0-0.log"))
self.assertIsFile(os.path.join(self.dir_path, "test_2-0-0.log"))
tree = self.pp_tree.get_tree_topology()
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = [n for n in tree if tree.out_degree(n) == 0]
expected_leaves = ["a", "b", "c", "d", "e"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
# make sure every node has at most one parent
multi_parents = [n for n in tree if tree.in_degree(n) > 1]
self.assertEqual(len(multi_parents), 0)
# make sure the resulting tree has no unifurcations
one_child = [n for n in tree if tree.out_degree(n) == 1]
self.assertEqual(len(one_child), 0)
expected_tree = nx.DiGraph()
expected_tree.add_nodes_from(
["a", "b", "c", "d", "e", "root", "6", "7", "8", "9"]
)
expected_tree.add_edges_from(
[
("root", "9"),
("9", "8"),
("9", "7"),
("7", "6"),
("7", "a"),
("6", "b"),
("6", "c"),
("8", "e"),
("8", "d"),
]
)
triplets = itertools.combinations(["a", "b", "c", "d", "e"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
# make sure that the tree can be converted to newick format
tree_newick = self.pp_tree.get_newick()
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_full_hybrid_large(self):
self.hybrid_pp_solver_large.solve(self.large_tree, logfile=self.logfile)
tree = self.large_tree.get_tree_topology()
# make sure log files are created correctly
self.assertIsFile(
os.path.join(self.dir_path, "test_1-1-1-1-1-1-0-0.log")
)
self.assertIsFile(
os.path.join(self.dir_path, "test_2-0-0-0-0-0-0-0.log")
)
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = [n for n in tree if tree.out_degree(n) == 0]
expected_leaves = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("node0", "node1"),
("node0", "node2"),
("node1", "a"),
("node1", "node4"),
("node2", "i"),
("node2", "j"),
("node4", "b"),
("node4", "node8"),
("node8", "c"),
("node8", "node10"),
("node10", "d"),
("node10", "node12"),
("node12", "e"),
("node12", "node14"),
("node14", "f"),
("node14", "node16"),
("node16", "g"),
("node16", "h"),
]
)
triplets = itertools.combinations(
["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], 3
)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
self.hybrid_pp_solver_large.solve(
self.large_tree,
logfile=self.logfile,
collapse_mutationless_edges=True,
)
tree = self.large_tree.get_tree_topology()
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
# make sure that the tree can be converted to newick format
tree_newick = self.large_tree.get_newick()
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_full_hybrid_maxcut(self):
self.hybrid_pp_solver_maxcut.solve(
self.missing_tree, logfile=self.logfile
)
tree = self.missing_tree.get_tree_topology()
# make sure log files are created correctly
self.assertIsFile(os.path.join(self.dir_path, "test_1-0-1-0.log"))
self.assertIsFile(os.path.join(self.dir_path, "test_1-1-0-0.log"))
self.assertIsFile(os.path.join(self.dir_path, "test_2-0-0-0.log"))
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = [n for n in tree if tree.out_degree(n) == 0]
expected_leaves = ["a", "b", "c", "d", "e", "f", "g", "h", "i"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("node0", "node1"),
("node0", "node2"),
("node1", "node3"),
("node1", "node4"),
("node3", "c"),
("node3", "node6"),
("node6", "a"),
("node6", "b"),
("node4", "d"),
("node4", "e"),
("node2", "f"),
("node2", "node5"),
("node5", "g"),
("node5", "node7"),
("node7", "h"),
("node7", "i"),
]
)
triplets = itertools.combinations(
["a", "b", "c", "d", "e", "f", "g", "h", "i"], 3
)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
self.hybrid_pp_solver_maxcut.solve(
self.missing_tree, logfile=self.logfile
)
tree = self.missing_tree.get_tree_topology()
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
# make sure that the tree can be converted to newick format
tree_newick = self.missing_tree.get_newick()
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_full_hybrid_missing(self):
self.hybrid_pp_solver_missing.solve(
self.missing_tree,
logfile=self.logfile,
collapse_mutationless_edges=True,
)
tree = self.missing_tree.get_tree_topology()
# make sure log files are created correctly
self.assertIsFile(os.path.join(self.dir_path, "test_1-0-1-0.log"))
self.assertIsFile(os.path.join(self.dir_path, "test_1-1-0-0.log"))
self.assertIsFile(os.path.join(self.dir_path, "test_2-0-0-0.log"))
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = [n for n in tree if tree.out_degree(n) == 0]
expected_leaves = ["a", "b", "c", "d", "e", "f", "g", "h", "i"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("node0", "node1"),
("node0", "node2"),
("node1", "node3"),
("node1", "node4"),
("node3", "c"),
("node3", "node6"),
("node6", "a"),
("node6", "b"),
("node4", "d"),
("node4", "e"),
("node2", "f"),
("node2", "node5"),
("node5", "g"),
("node5", "h"),
("node5", "i"),
]
)
triplets = itertools.combinations(
["a", "b", "c", "d", "e", "f", "g", "h", "i"], 3
)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
def test_greedy_over_greedy_maxcut_missing(self):
self.hybrid_pp_solver_greedy_over_greedy_maxcut.solve(
self.missing_tree, collapse_mutationless_edges=True
)
tree = self.missing_tree.get_tree_topology()
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = [n for n in tree if tree.out_degree(n) == 0]
expected_leaves = ["a", "b", "c", "d", "e", "f", "g", "h"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("node0", "node1"),
("node0", "node2"),
("node1", "node3"),
("node1", "node4"),
("node3", "c"),
("node3", "node6"),
("node6", "a"),
("node6", "b"),
("node4", "d"),
("node4", "e"),
("node2", "f"),
("node2", "node5"),
("node5", "g"),
("node5", "h"),
]
)
triplets = itertools.combinations(
["a", "b", "c", "d", "e", "f", "g", "h"], 3
)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
# make sure that the tree can be converted to newick format
tree_newick = self.missing_tree.get_newick()
def tearDown(self):
for _file in os.listdir(self.dir_path):
if ".log" in _file:
os.remove(os.path.join(self.dir_path, _file))
if __name__ == "__main__":
unittest.main()
| 35.003026
| 83
| 0.540952
|
4a17ee2f357e39a2464d362216488f90c548647a
| 2,176
|
py
|
Python
|
tests/base_tmpl.py
|
gaogaotiantian/codesnap
|
15317ffcd44c4635fbf0ec35ec94b4368552b86c
|
[
"Apache-2.0"
] | 20
|
2020-08-06T02:52:18.000Z
|
2020-08-12T02:21:02.000Z
|
tests/base_tmpl.py
|
gaogaotiantian/codesnap
|
15317ffcd44c4635fbf0ec35ec94b4368552b86c
|
[
"Apache-2.0"
] | 1
|
2020-08-07T02:41:16.000Z
|
2020-08-10T11:03:11.000Z
|
tests/base_tmpl.py
|
gaogaotiantian/codesnap
|
15317ffcd44c4635fbf0ec35ec94b4368552b86c
|
[
"Apache-2.0"
] | 2
|
2020-08-06T23:38:33.000Z
|
2020-08-09T08:21:42.000Z
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
import gc
import io
import logging
import os
import sys
import time
from unittest import TestCase
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s"
)
class BaseTmpl(TestCase):
def setUp(self):
logging.info("=" * 60)
logging.info(f"{self.id()} start")
self.stdout = io.StringIO()
self.stdout_orig, sys.stdout = sys.stdout, self.stdout
def tearDown(self):
sys.stdout = self.stdout_orig
logging.info(f"{self.id()} finish")
gc.collect()
def dbgPrint(self, *args, **kwargs):
print(*args, file=self.stdout_orig, **kwargs)
def assertEventNumber(self, data, expected_entries):
entries = [entry for entry in data["traceEvents"] if entry["ph"] != "M"]
entries_count = len(entries)
self.assertEqual(entries_count, expected_entries,
f"Event number incorrect, {entries_count}(expected {expected_entries}) - {entries}")
def assertFileExists(self, path, timeout=None, msg=None):
err_msg = f"file {path} does not exist!"
if msg is not None:
err_msg = f"file {path} does not exist! {msg}"
if timeout is None:
if not os.path.exists(path):
raise AssertionError(err_msg)
else:
start = time.time()
while True:
if os.path.exists(path):
return
elif time.time() - start > timeout:
raise AssertionError(err_msg)
else:
time.sleep(0.5)
def assertFileNotExist(self, path):
if os.path.exists(path):
raise AssertionError(f"file {path} does exist!")
def assertTrueTimeout(self, func, timeout):
start = time.time()
while True:
try:
func()
break
except AssertionError as e:
if time.time() - start > timeout:
raise e
| 31.085714
| 109
| 0.579504
|
4a17ef24b5c95bd45cf9fd60ccadac7549a96e2a
| 10,665
|
py
|
Python
|
cli/src/commands/Apply.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
cli/src/commands/Apply.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
cli/src/commands/Apply.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from cli.src.ansible.AnsibleRunner import AnsibleRunner
from cli.src.helpers.build_io import (get_build_path, get_inventory_path,
get_manifest_path, load_inventory,
load_manifest, save_manifest)
from cli.src.helpers.cli_helpers import query_yes_no
from cli.src.helpers.data_loader import load_schema_obj, types
from cli.src.helpers.doc_list_helpers import (select_all, select_first,
select_single)
from cli.src.helpers.naming_helpers import get_os_name_normalized
from cli.src.helpers.yaml_helpers import safe_load_all
from cli.src.Log import Log
from cli.src.providers.provider_class_loader import provider_class_loader
from cli.src.schema.ConfigurationAppender import ConfigurationAppender
from cli.src.schema.DefaultMerger import DefaultMerger
from cli.src.schema.SchemaValidator import SchemaValidator
from cli.src.Step import Step
from cli.src.terraform.TerraformFileCopier import TerraformFileCopier
from cli.src.terraform.TerraformRunner import TerraformRunner
from cli.src.terraform.TerraformTemplateGenerator import \
TerraformTemplateGenerator
from cli.version import VERSION
class Apply(Step):
def __init__(self, input_data):
super().__init__(__name__)
self.file = input_data.file
self.skip_infrastructure = getattr(input_data, 'no_infra', False)
self.skip_config = getattr(input_data, 'skip_config', False)
self.ansible_options = {'forks': getattr(input_data, 'ansible_forks'),
'profile_tasks': getattr(input_data, 'profile_ansible_tasks', False)}
self.logger = Log(__name__)
self.cluster_model = None
self.input_docs = []
self.configuration_docs = []
self.infrastructure_docs = []
self.manifest_docs = []
self.ping_retries: int = input_data.ping_retries
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def process_input_docs(self):
# Load the user input YAML docs from the input file.
if os.path.isabs(self.file):
path_to_load = self.file
else:
path_to_load = os.path.join(os.getcwd(), self.file)
user_file_stream = open(path_to_load, 'r')
self.input_docs = safe_load_all(user_file_stream)
# Merge the input docs with defaults
with DefaultMerger(self.input_docs) as doc_merger:
self.input_docs = doc_merger.run()
# Get the cluster model.
self.cluster_model = select_single(self.input_docs, lambda x: x.kind == 'epiphany-cluster')
if self.cluster_model is None:
raise Exception('No cluster model defined in input YAML file')
# Validate cluster input document.
# Other documents might need more processing (SET_BY_AUTOMATION) so will be validated at a later stage.
with SchemaValidator(self.cluster_model.provider, [self.cluster_model]) as schema_validator:
schema_validator.run()
def process_infrastructure_docs(self):
# Build the infrastructure docs
with provider_class_loader(self.cluster_model.provider, 'InfrastructureBuilder')(
self.input_docs, self.manifest_docs) as infrastructure_builder:
self.infrastructure_docs = infrastructure_builder.run()
# Validate infrastructure documents
with SchemaValidator(self.cluster_model.provider, self.infrastructure_docs) as schema_validator:
schema_validator.run()
def process_configuration_docs(self):
# Append with components and configuration docs
with ConfigurationAppender(self.input_docs) as config_appender:
self.configuration_docs = config_appender.run()
# Validate configuration documents
with SchemaValidator(self.cluster_model.provider, self.configuration_docs) as schema_validator:
schema_validator.run()
def collect_infrastructure_config(self):
with provider_class_loader(self.cluster_model.provider, 'InfrastructureConfigCollector')(
[*self.configuration_docs, *self.infrastructure_docs]) as config_collector:
config_collector.run()
def load_manifest(self):
path_to_manifest = get_manifest_path(self.cluster_model.specification.name)
if os.path.isfile(path_to_manifest):
self.manifest_docs = load_manifest(get_build_path(self.cluster_model.specification.name))
def assert_incompatible_terraform(self):
cluster_model = select_first(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
if cluster_model:
old_major_version = int(cluster_model.version.split('.')[0])
new_major_version = int(VERSION.split('.')[0])
if old_major_version == 1 and new_major_version == 2:
if not query_yes_no("You are trying to re-apply a Epiphany 2.x configuration against an existing Epiphany 1.x cluster."
"The Terraform is not compatible between these versions and requires manual action described in the documentation."
"If you haven't done Terraform upgrade yet, it will break your cluster. Do you want to continue?"):
sys.exit(0)
def assert_no_master_downscale(self):
components = self.cluster_model.specification.components
# Skip downscale assertion for single machine clusters
if ('single_machine' in components) and (int(components['single_machine']['count']) > 0):
return
cluster_name = self.cluster_model.specification.name
inventory_path = get_inventory_path(cluster_name)
if os.path.isfile(inventory_path):
existing_inventory = load_inventory(inventory_path)
both_present = all([
'kubernetes_master' in existing_inventory.list_groups(),
'kubernetes_master' in components,
])
if both_present:
prev_master_count = len(existing_inventory.list_hosts(pattern='kubernetes_master'))
next_master_count = int(components['kubernetes_master']['count'])
if prev_master_count > next_master_count:
raise Exception("ControlPlane downscale is not supported yet. Please revert your 'kubernetes_master' count to previous value or increase it to scale up Kubernetes.")
def assert_no_postgres_nodes_number_change(self):
feature_mapping = select_first(self.input_docs, lambda x: x.kind == 'configuration/feature-mapping')
if feature_mapping:
with DefaultMerger([feature_mapping]) as doc_merger:
feature_mapping = doc_merger.run()
feature_mapping = feature_mapping[0]
else:
feature_mapping = load_schema_obj(types.DEFAULT, 'common', 'configuration/feature-mapping')
components = self.cluster_model.specification.components
inventory_path = get_inventory_path(self.cluster_model.specification.name)
if os.path.isfile(inventory_path):
next_postgres_node_count = 0
existing_inventory = load_inventory(inventory_path)
prev_postgres_node_count = len(existing_inventory.list_hosts(pattern='postgresql'))
postgres_available = [x for x in feature_mapping.specification.available_roles if x.name == 'postgresql']
if postgres_available[0].enabled:
for key, roles in feature_mapping.specification.roles_mapping.items():
if ('postgresql') in roles and key in components:
next_postgres_node_count = next_postgres_node_count + components[key].count
if prev_postgres_node_count > 0 and prev_postgres_node_count != next_postgres_node_count:
raise Exception("Postgresql scaling is not supported yet. Please revert your 'postgresql' node count to previous value.")
def assert_consistent_os_family(self):
# Before this issue https://github.com/epiphany-platform/epiphany/issues/195 gets resolved,
# we are forced to do assertion here.
virtual_machine_docs = select_all(
self.infrastructure_docs,
lambda x: x.kind == 'infrastructure/virtual-machine',
)
os_indicators = {
get_os_name_normalized(vm_doc)
for vm_doc in virtual_machine_docs
}
if len(os_indicators) > 1:
raise Exception("Detected mixed Linux distros in config, Epirepo will not work properly. Please inspect your config manifest. Forgot to define repository VM document?")
def apply(self):
self.process_input_docs()
self.assert_no_master_downscale()
self.assert_no_postgres_nodes_number_change()
self.load_manifest()
# assertions needs to be executed before save_manifest overides the manifest
if not self.skip_infrastructure:
self.assert_incompatible_terraform()
self.process_infrastructure_docs()
save_manifest([*self.input_docs, *self.infrastructure_docs], self.cluster_model.specification.name)
self.assert_consistent_os_family()
if not (self.skip_infrastructure or self.cluster_model['provider'] == 'any'):
# Generate terraform templates
with TerraformTemplateGenerator(self.cluster_model, self.infrastructure_docs) as template_generator:
template_generator.run()
# Copy cloud-config.yml since it contains bash code which can't be templated easily (requires {% raw %}...{% endraw %})
with TerraformFileCopier(self.cluster_model, self.infrastructure_docs) as file_copier:
file_copier.run()
# Run Terraform to create infrastructure
with TerraformRunner(self.cluster_model, self.configuration_docs) as tf_runner:
tf_runner.build()
self.process_configuration_docs()
self.collect_infrastructure_config()
# Merge all the docs
docs = [*self.configuration_docs, *self.infrastructure_docs]
# Save docs to manifest file
save_manifest(docs, self.cluster_model.specification.name)
# Run Ansible to provision infrastructure
if not(self.skip_config):
with AnsibleRunner(self.cluster_model, docs, ansible_options=self.ansible_options,
ping_retries=self.ping_retries) as ansible_runner:
ansible_runner.apply()
return 0
| 46.369565
| 185
| 0.687107
|
4a17ef7270de8d5f7dc9b0225554f68398c4323d
| 9,686
|
py
|
Python
|
metapool/tests/test_count.py
|
callaband/metagenomics_pooling_notebook
|
cda37552d604c7cbd45d3b7a4ae103cae26e95e1
|
[
"MIT"
] | null | null | null |
metapool/tests/test_count.py
|
callaband/metagenomics_pooling_notebook
|
cda37552d604c7cbd45d3b7a4ae103cae26e95e1
|
[
"MIT"
] | null | null | null |
metapool/tests/test_count.py
|
callaband/metagenomics_pooling_notebook
|
cda37552d604c7cbd45d3b7a4ae103cae26e95e1
|
[
"MIT"
] | null | null | null |
import json
import os
import tempfile
import shutil
import pandas as pd
from sample_sheet import Sample
from unittest import main, TestCase
from metapool import KLSampleSheet
from metapool.count import (_extract_name_and_lane, _parse_samtools_counts,
_parse_fastp_counts, bcl2fastq_counts,
fastp_counts, minimap2_counts, run_counts,
_parsefier)
class TestCount(TestCase):
def setUp(self):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.run_dir = os.path.join(data_dir, 'runs',
'200318_A00953_0082_AH5TWYDSXY')
self.ss = KLSampleSheet(os.path.join(self.run_dir, 'sample-sheet.csv'))
self.stats = pd.DataFrame(RUN_STATS)
# help make comparisons consistent
self.stats.sort_index(inplace=True)
self.stats.index.set_names(['Sample_ID', 'Lane'], inplace=True)
def test_extract_name_and_lane(self):
self.assertEqual(
_extract_name_and_lane('33333_G2750L_S2031_L001_I1_001.fastq.gz'),
('33333_G2750L', '1'))
self.assertEqual(
_extract_name_and_lane('33333_G2750L_S2031_L001_R1_001.fastq.gz'),
('33333_G2750L', '1'))
self.assertEqual(
_extract_name_and_lane('33333_G2750L_S2031_L001_R2_001.fastq.gz'),
('33333_G2750L', '1'))
self.assertEqual(
_extract_name_and_lane('33333_G2751R_S2072_L009_R1_001.fastq.gz'),
('33333_G2751R', '9'))
self.assertEqual(
_extract_name_and_lane('33333_G2751R_S2072_L010_R1_001.fastq.gz'),
('33333_G2751R', '10'))
def test_extract_name_and_lane_terrible_pattern(self):
# this is likely to never happen but we label a sample with the same
# scheme that Illumina would use to identify differnt cells, lanes, and
# orientations
self.assertEqual(
_extract_name_and_lane('S2031_L001_R1_S2031_L001_I1_001.fastq.gz'),
('S2031_L001_R1', '1'))
def test_parsefier_multiple_matches_raises(self):
with tempfile.TemporaryDirectory() as tmp:
run = os.path.join(tmp, 'funky-rerun-with-repeated-samples')
shutil.copytree(self.run_dir, run)
# sample 3 exists, but not with cell number S458, so this should
# raise an error because if this happense something else went wrong
fake = os.path.join(run, 'Trojecp_666', 'json',
'sample3_S458_L003_R1_001.json')
with open(fake, 'w') as f:
f.write(json.dumps({}))
msg = ('Multiple matches found for the same samples in the same '
'lane, only one match is expected: sample3 in lane 3')
with self.assertRaisesRegex(ValueError, msg):
_parsefier(run, self.ss, 'json', '.json', 'halloween',
lambda x: 1)
def test_parsefier_no_logs_warns(self):
self.ss.add_sample(Sample({
'Sample_ID': 'H20_Myers',
'Lane': '1',
'Sample_Name': 'H20_Myers',
'index': 'ACTTTGTTGGAA',
'index2': 'GGTTAATTGAGA',
'Sample_Project': 'Trojecp_666'
}))
exp = pd.DataFrame(data=[[1], [1], [1], [1], [1], [1], [1]],
columns=['halloween'],
index=self.stats.index.copy())
with self.assertWarnsRegex(UserWarning, 'No halloween log found for '
'these samples: H20_Myers'):
obs = _parsefier(self.run_dir, self.ss, 'json', '.json',
'halloween', lambda x: 1)
pd.testing.assert_frame_equal(obs.sort_index(), exp)
def test_parse_fastp_malformed(self):
with tempfile.NamedTemporaryFile('w+') as tmp:
tmp.write(json.dumps({}))
tmp.seek(0)
with self.assertRaisesRegex(ValueError, 'The fastp log for '
f'{tmp.name} is'
' malformed'):
_parse_fastp_counts(tmp.name)
tmp.write(json.dumps({'summary': {}}))
tmp.seek(0)
with self.assertRaisesRegex(ValueError, 'The fastp log for '
f'{tmp.name} is'
' malformed'):
_parse_fastp_counts(tmp.name)
tmp.write(json.dumps({'summary': {'after_filtering': {}}}))
tmp.seek(0)
with self.assertRaisesRegex(ValueError, 'The fastp log for '
f'{tmp.name} is'
' malformed'):
_parse_fastp_counts(tmp.name)
def test_parse_fastp_counts(self):
obs = _parse_fastp_counts(
os.path.join(self.run_dir, 'Trojecp_666', 'json',
'sample3_S457_L003_R1_001.json'))
self.assertEqual(obs, 4692)
def test_parse_samtools_malformed(self):
with tempfile.NamedTemporaryFile('w+') as tmp:
tmp.write('[hey] we processed like 42 reads\n')
tmp.seek(0)
with self.assertRaisesRegex(ValueError, 'The samtools log for '
f'{tmp.name} is'
' malformed'):
_parse_samtools_counts(tmp.name)
def test_parse_samtools_counts(self):
obs = _parse_samtools_counts(
os.path.join(self.run_dir, 'Trojecp_666', 'samtools',
'sample4_S369_L003_R1_001.log'))
self.assertEqual(obs, 2777)
def test_bcl2fastq_no_stats_file(self):
bad_dir = os.path.join(os.path.abspath(self.run_dir), 'Trojecp_666')
with self.assertRaisesRegex(IOError,
rf"Cannot find stats file \({bad_dir}"
r"/Stats/Stats.json\) for this run"):
bcl2fastq_counts(bad_dir, self.ss)
def test_bcl2fastq_counts_malformed_results(self):
with tempfile.TemporaryDirectory() as tmp:
stats = os.path.join(tmp, 'Stats')
os.makedirs(stats)
with open(os.path.join(stats, 'Stats.json'), 'w') as f:
f.write(json.dumps({}))
with self.assertRaisesRegex(KeyError, 'bcl stats file is missing '
'ConversionResults '
'attribute'):
bcl2fastq_counts(tmp, self.ss)
def test_bcl2fastq_counts_malformed_lane(self):
with tempfile.TemporaryDirectory() as tmp:
stats = os.path.join(tmp, 'Stats')
os.makedirs(stats)
with open(os.path.join(stats, 'Stats.json'), 'w') as f:
f.write(json.dumps({'ConversionResults': [{}]}))
with self.assertRaisesRegex(KeyError, 'bcl stats file is missing '
'DemuxResults '
'attribute'):
bcl2fastq_counts(tmp, self.ss)
def test_bcl2fastq_counts_malformed_lane_number(self):
with tempfile.TemporaryDirectory() as tmp:
stats = os.path.join(tmp, 'Stats')
os.makedirs(stats)
with open(os.path.join(stats, 'Stats.json'), 'w') as f:
f.write(json.dumps(
{'ConversionResults': [{'DemuxResults': {}}]}))
with self.assertRaisesRegex(KeyError, 'bcl stats file is missing '
'LaneNumber attribute'):
bcl2fastq_counts(tmp, self.ss)
def test_bcl2fastq_counts(self):
obs = bcl2fastq_counts(self.run_dir, self.ss)
pd.testing.assert_frame_equal(obs.sort_index(),
self.stats[['bcl_counts']])
def test_fastp_counts(self):
obs = fastp_counts(self.run_dir, self.ss)
pd.testing.assert_frame_equal(obs.sort_index(),
self.stats[['fastp_counts']])
def test_minimap2_counts(self):
obs = minimap2_counts(self.run_dir, self.ss)
pd.testing.assert_frame_equal(obs.sort_index(),
self.stats[['minimap2_counts']])
def test_count_collector(self):
obs = run_counts(self.run_dir, self.ss)
pd.testing.assert_frame_equal(obs.sort_index(), self.stats)
RUN_STATS = {
'bcl_counts': {('sample1', '1'): 10000, ('sample2', '1'): 100000,
('sample1', '3'): 100000, ('sample2', '3'): 2300000,
('sample3', '3'): 300000, ('sample4', '3'): 400000,
('sample5', '3'): 567000},
'fastp_counts': {('sample1', '1'): 10800, ('sample2', '1'): 61404,
('sample1', '3'): 335996, ('sample2', '3'): 18374,
('sample3', '3'): 4692, ('sample4', '3'): 960,
('sample5', '3'): 30846196},
'minimap2_counts': {('sample1', '1'): 111172.0, ('sample2', '1'): 277611.0,
('sample1', '3'): 1168275.0, ('sample2', '3'): 1277.0,
('sample3', '3'): 33162.0, ('sample4', '3'): 2777.0,
('sample5', '3'): 4337654.0}
}
if __name__ == '__main__':
main()
| 42.296943
| 79
| 0.528804
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.