hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf758f00c1ea09446618caddf86b1f3c27d9fd9 | 1,627 | py | Python | ComposeIt/Parser.py | k3lsey/ComposeIt | da790690b496b7add64f17b56838ad48dd42cf73 | [
"MIT"
] | 2 | 2018-06-04T03:51:47.000Z | 2018-06-05T00:12:46.000Z | ComposeIt/Parser.py | kelsey19/ComposeIt | da790690b496b7add64f17b56838ad48dd42cf73 | [
"MIT"
] | null | null | null | ComposeIt/Parser.py | kelsey19/ComposeIt | da790690b496b7add64f17b56838ad48dd42cf73 | [
"MIT"
] | null | null | null | import sys
import yaml
from collections import OrderedDict
from ComposeIt.Option import options
COMPOSE_VERSION = '3'
class InspectParser(object):
def __init__(self, inspect_results):
self.inspect_results = inspect_results
yaml.add_representer(OrderedDict, self.represent_ordereddict)
@staticmethod
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
def perform_parse(self):
service_name = OrderedDict()
yml = OrderedDict()
for container in self.inspect_results:
config = container['Config']
host_config = container['HostConfig']
service = OrderedDict()
for key, value in config.items():
current_option = 'Config' + '.' + key
if current_option in options:
options[current_option].process_option(value=value, yml=service)
for key, value in host_config.items():
current_option = 'HostConfig' + '.' + key
if current_option in options:
options[current_option].process_option(value=value, yml=service)
service_name[container['Name'][1:]] = service
yml['version'] = COMPOSE_VERSION
yml['services'] = service_name
yaml.dump(yml, sys.stdout, default_flow_style=False)
| 29.581818 | 84 | 0.629994 |
acf7592d07f4957ce6d8e97912656539e77f27e5 | 2,393 | py | Python | flink-python/setup.py | jainanuj07/flink | b92365d442aafec3a42522eb2c7873b7cecc5cef | [
"Apache-2.0"
] | null | null | null | flink-python/setup.py | jainanuj07/flink | b92365d442aafec3a42522eb2c7873b7cecc5cef | [
"Apache-2.0"
] | null | null | null | flink-python/setup.py | jainanuj07/flink | b92365d442aafec3a42522eb2c7873b7cecc5cef | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import io
import os
import sys
from setuptools import setup
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyflink',
version=VERSION,
packages=['pyflink',
'pyflink.table',
'pyflink.util'],
url='http://flink.apache.org',
license='http://www.apache.org/licenses/LICENSE-2.0',
author='Flink Developers',
author_email='dev@flink.apache.org',
install_requires=['py4j==0.10.8.1'],
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7']
)
| 36.815385 | 84 | 0.648558 |
acf7592e037f55f632831aa1a72464b372e964f9 | 504 | py | Python | configs/hyper_c8/fcnsp4_r50sp_4k_hyper_c8.py | dkswxd/Swin-Transformer-Semantic-Segmentation | 6af19736e5492a01d8952d4ee86a6d59b21c2ae1 | [
"Apache-2.0"
] | 1 | 2021-09-29T06:17:25.000Z | 2021-09-29T06:17:25.000Z | configs/hyper_c8/fcnsp4_r50sp_4k_hyper_c8.py | DKJJ/Swin-Transformer-Semantic-Segmentation | c8707951ddabdc0189451bcbd25c145f1f6cc041 | [
"Apache-2.0"
] | null | null | null | configs/hyper_c8/fcnsp4_r50sp_4k_hyper_c8.py | DKJJ/Swin-Transformer-Semantic-Segmentation | c8707951ddabdc0189451bcbd25c145f1f6cc041 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/fcnsp_r50sp.py', '../_base_/datasets/hyper_c8.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_4k.py'
]
norm_cfg = dict(type='BN', track_running_stats=True, requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg,in_channels=3),
decode_head=dict(num_classes=2,norm_cfg=norm_cfg,num_sp=4,sp_s=8),
auxiliary_head=dict(num_classes=2,norm_cfg=norm_cfg,num_sp=2,sp_s=8))
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,) | 36 | 73 | 0.72619 |
acf75946415c44a6f79fb20b58e0af80558987cb | 1,729 | py | Python | main/init_net.py | lstrgar/experimental-rnns | c522ae373237811411f14212bceb97cda8ddcf36 | [
"MIT"
] | null | null | null | main/init_net.py | lstrgar/experimental-rnns | c522ae373237811411f14212bceb97cda8ddcf36 | [
"MIT"
] | null | null | null | main/init_net.py | lstrgar/experimental-rnns | c522ae373237811411f14212bceb97cda8ddcf36 | [
"MIT"
] | null | null | null | import torch, numpy as np, torch.nn as nn, matplotlib.pyplot as plt
from models import Reservoir, CUDAvoir
from optimizers import Force
'''
Runs naive reservoir model 'steps' number of
timesteps. Returns entire state vector history
when 'record' is set to True.
'''
def run_naive_net(net,steps,record=False):
state_vecs = []
for s in range(steps):
y = net.forward()
if record:
state_vecs.append(y)
else:
pass
if record:
return np.asarray(state_vecs)
'''
Runs pytorch-based reservoir network 'steps'
number of timesteps. Returns entire state and
output vector history when 'record' is set to True.
'''
def run_torch_net(net,steps,record=False):
state_vecs = []
outputs = []
sin = torch.sin
amp = 4
func = lambda a : amp*sin(a)
f = Force(targ=func,dt=0.1,a=3,n=net.n)
for s in range(steps):
y = net.forward()
if record:
state_vecs.append(y)
outputs.append(net.o)
else:
pass
net.ol.weight = nn.Parameter(f.rls_update(net.ol.weight,net.v,net.o,s))
if record:
state_vecs = torch.cat(state_vecs).detach().numpy()
state_vecs = np.array_split(state_vecs, len(state_vecs)/net.n)
outputs = torch.cat(outputs).detach().numpy()
return (np.asarray(state_vecs), outputs)
if __name__ == '__main__':
n = 1600
p = 0.2
sig = 0.3
steps = 1000
#res = Reservoir(n=n,p=p,sig=sig,bias=True)
#svs = run_network(res,steps,record=True)
cudares = CUDAvoir(n=n,p=p,sig=sig,o=0.0,bias=True)
svs,ops = run_cudares(cudares,steps,record=True)
plt.plot(ops)
plt.show()
# plt.imshow(svs.T)
# plt.show()
| 25.426471 | 79 | 0.622325 |
acf75987826a6fa19ce9c81e1387149218f2af92 | 3,274 | py | Python | tests/test_complexity_checker.py | willprice/pylint-quality | 34921e7fe30d3417e2f1ae3e1dc11082d5b850ab | [
"Apache-2.0"
] | null | null | null | tests/test_complexity_checker.py | willprice/pylint-quality | 34921e7fe30d3417e2f1ae3e1dc11082d5b850ab | [
"Apache-2.0"
] | null | null | null | tests/test_complexity_checker.py | willprice/pylint-quality | 34921e7fe30d3417e2f1ae3e1dc11082d5b850ab | [
"Apache-2.0"
] | null | null | null | import astroid
import pylint.testutils
from pylint_complexity import method_length_checker
class TestMethodLengthChecker(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = method_length_checker.MethodLengthChecker
def test_function_with_more_than_10_statements_is_too_long(self):
self.assertTooLong("""
def longFunction(): #@
x = 1
x = 2
x = 3
x = 4
x = 5
x = 6
x = 7
x = 8
x = 9
x = 10
x = 11
""")
def test_function_with_10_statements_is_acceptable(self):
self.assertNotTooLong("""
def acceptableLengthFunction(self): #@
x = 1
x = 2
x = 3
x = 4
x = 5
x = 6
x = 7
x = 8
x = 9
x = 10
""")
def test_function_length_doesnt_count_comments(self):
self.assertNotTooLong("""
def acceptableLengthFunctionWithComments(self): #@
x = 1
x = 2
x = 3
x = 4
x = 5
x = 6
x = 7
x = 8
x = 9
# comments aren't included in the method LOC
x = 10
""")
def test_blank_lines_in_function_dont_count_as_statements(self):
self.assertNotTooLong("""
def longFunction(): #@
x = 11
""")
def test_multiline_statements_count_as_one_line_of_code(self):
self.assertNotTooLong("""
def acceptableFunctionWithSplitExpression(self):
x = 1
x = 2
x = 3
x = 4
x = 5
x = 6
x = 7
x = 8
x = 9
x = 10 + \
11
""")
def test_method_with_11_statements_is_too_long(self):
self.assertTooLong("""
class SomeClass():
def overlyLongMethod(self): #@
x = 1
x = 2
x = 3
x = 4
x = 5
x = 6
x = 7
x = 8
x = 9
x = 10
x = 11
""")
def test_method_with_10_statements_isnt_too_long(self):
self.assertNotTooLong("""
class SomeClass():
def overlyLongMethod(self): #@
x = 1
x = 2
x = 3
x = 4
x = 5
x = 6
x = 7
x = 8
x = 9
x = 10
""")
def assertNotTooLong(self, function: str):
func_node = astroid.extract_node(function)
with self.assertNoMessages():
self.checker.visit_functiondef(func_node)
def assertTooLong(self, function: str):
func_node = astroid.extract_node(function)
with self.assertAddsMessages(
pylint.testutils.Message(
msg_id='method-too-long',
node=func_node
)
):
self.checker.visit_functiondef(func_node)
| 23.724638 | 69 | 0.432804 |
acf7598b1354a20394cb15ba6d132ba43b324dce | 2,789 | py | Python | local/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/domains/tests/utils.py | sahilsdei/django_ecommerce | edc2513e41aca178d1ccae14ebaa6c7b1d709e73 | [
"MIT"
] | null | null | null | local/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/domains/tests/utils.py | sahilsdei/django_ecommerce | edc2513e41aca178d1ccae14ebaa6c7b1d709e73 | [
"MIT"
] | null | null | null | local/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/domains/tests/utils.py | sahilsdei/django_ecommerce | edc2513e41aca178d1ccae14ebaa6c7b1d709e73 | [
"MIT"
] | null | null | null | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
import sys
import traceback
from regression.python_test_utils import test_utils as utils
def create_domain(server, db_name, schema_name, schema_id, domain_name):
"""
This function is used to add the domain to existing schema
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param schema_name: schema name
:type schema_name: str
:param schema_id: schema id
:type schema_id: int
:param domain_name: domain name
:type domain_name: str
:return: None
"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'])
pg_cursor = connection.cursor()
query = 'CREATE DOMAIN ' + schema_name + '.' + domain_name + \
' AS character(10) DEFAULT 1'
pg_cursor.execute(query)
connection.commit()
# Get 'oid' from newly created domain
pg_cursor.execute("SELECT d.oid, d.typname FROM pg_type d WHERE"
" d.typname='%s' AND d.typnamespace='%s'" %
(domain_name, schema_id))
domains = pg_cursor.fetchone()
connection.close()
return domains
except Exception:
traceback.print_exc(file=sys.stderr)
def verify_domain(server, db_name, schema_id, domain_name):
"""
This function get the oid & name of the domain
:param server: server details
:type server: dict
:param db_name: db name
:type db_name: str
:param schema_id: schema id
:type schema_id: int
:param domain_name: domain name
:type domain_name: str
:return:
"""
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'])
pg_cursor = connection.cursor()
pg_cursor.execute("SELECT d.oid, d.typname FROM pg_type d WHERE"
" d.typname='%s' AND d.typnamespace='%s'" %
(domain_name, schema_id))
domains = pg_cursor.fetchone()
connection.close()
return domains
| 34.8625 | 74 | 0.534242 |
acf75994803646bc9968cccf46636d47ff0c24fd | 1,170 | py | Python | buglocalizer/tfidf.py | datnvhust/bug_locator | 10c616622e462dd1488048de56804867e1582e54 | [
"MIT"
] | 2 | 2021-05-31T14:35:41.000Z | 2021-12-07T08:09:02.000Z | buglocalizer/tfidf.py | datnvhust/bug_locator | 10c616622e462dd1488048de56804867e1582e54 | [
"MIT"
] | null | null | null | buglocalizer/tfidf.py | datnvhust/bug_locator | 10c616622e462dd1488048de56804867e1582e54 | [
"MIT"
] | null | null | null | from math import log
class TFIDFVectorizer():
def __init__(self, k=1.5, b=0.75):
self.k = k
self.b = b
def tf(self,word,doc, doc_list):
return log(doc[word]) + 1
def idf(self, word,doc_list):
all_num=len(doc_list)
word_count=0
for doc in doc_list:
if word in doc:
word_count+=1
if word_count == 0:
print(word, doc_list)
return log((all_num)/(word_count))
def tfidf(self, word,doc,doc_list):
score = self.tf(word,doc, doc_list)*self.idf(word,doc_list)
return score
if __name__=='__main__':
doc1={'mik1':28,'aa':16,'web':14,'be':2,'python':1}
doc2={'mik2':21,'ab':11,'web':14,'chal':5}
doc3={'mik3':126,'bc':116,'web':74,'lelo':12,'foot':1}
doc4={'mik4':8,'cd':3,'arbit':2,'da':1,'fork':1}
# print(sum(doc1.values()))
doc_list=[doc1,doc2,doc3,doc4]
tfidf = TFIDFVectorizer()
for doc in doc_list:
for word in doc:
print( word,tfidf.tfidf(word,doc,doc_list))
import pandas as pd
# data = pd.read_csv('AspectJ.csv', index_col=False)
# doc_list = data['summary']
# print(doc_list) | 29.25 | 67 | 0.580342 |
acf759a11cf7d929977012b26b3ab28d16569234 | 70,607 | py | Python | sklearn/linear_model/tests/test_sgd.py | danifernandes-hub/scikit-learn | 8d7935827d01d95239710c9c519bb1905f46b645 | [
"BSD-3-Clause"
] | null | null | null | sklearn/linear_model/tests/test_sgd.py | danifernandes-hub/scikit-learn | 8d7935827d01d95239710c9c519bb1905f46b645 | [
"BSD-3-Clause"
] | null | null | null | sklearn/linear_model/tests/test_sgd.py | danifernandes-hub/scikit-learn | 8d7935827d01d95239710c9c519bb1905f46b645 | [
"BSD-3-Clause"
] | null | null | null | import pickle
import joblib
import pytest
import numpy as np
import scipy.sparse as sp
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone, is_classifier
from sklearn.svm import OneClassSVM
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.linear_model import _sgd_fast as sgd_fast
from sklearn.model_selection import RandomizedSearchCV
def _update_kwargs(kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
class _SparseSGDClassifier(linear_model.SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super().decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super().predict_proba(X)
class _SparseSGDRegressor(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args, **kw)
class _SparseSGDOneClassSVM(linear_model.SGDOneClassSVM):
def fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.fit(self, X, *args, **kw)
def partial_fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.partial_fit(self, X, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.decision_function(self, X, *args, **kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDOneClassSVM(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
def SparseSGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDOneClassSVM(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array(
[
[-1, 1],
[-0.75, 0.5],
[-1.5, 1.5],
[1, 1],
[0.75, 0.5],
[1.5, 1.5],
[-1, -1],
[0, -0.5],
[1, -1],
]
)
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array(
[
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
]
)
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array(
[
[1, 0.9, 0.8, 0, 0, 0],
[1, 0.84, 0.98, 0, 0, 0],
[1, 0.96, 0.88, 0, 0, 0],
[1, 0.91, 0.99, 0, 0, 0],
[0, 0, 0, 0.89, 0.91, 1],
[0, 0, 0, 0.79, 0.84, 1],
[0, 0, 0, 0.91, 0.95, 1],
[0, 0, 0, 0.93, 1, 1],
]
)
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
@pytest.mark.parametrize("fit_method", ["fit", "partial_fit"])
@pytest.mark.parametrize(
"params, err_msg",
[
({"alpha": -0.1}, "alpha must be >= 0"),
({"penalty": "foobar", "l1_ratio": 0.85}, "Penalty foobar is not supported"),
({"loss": "foobar"}, "The loss foobar is not supported"),
({"l1_ratio": 1.1}, r"l1_ratio must be in \[0, 1\]"),
({"learning_rate": "<unknown>"}, "learning rate <unknown> is not supported"),
({"nu": -0.5}, r"nu must be in \(0, 1]"),
({"nu": 2}, r"nu must be in \(0, 1]"),
({"alpha": 0, "learning_rate": "optimal"}, "alpha must be > 0"),
({"eta0": 0, "learning_rate": "constant"}, "eta0 must be > 0"),
({"max_iter": -1}, "max_iter must be > zero"),
({"shuffle": "false"}, "shuffle must be either True or False"),
({"early_stopping": "false"}, "early_stopping must be either True or False"),
(
{"validation_fraction": -0.1},
r"validation_fraction must be in range \(0, 1\)",
),
({"n_iter_no_change": 0}, "n_iter_no_change must be >= 1"),
],
# Avoid long error messages in test names:
# https://github.com/scikit-learn/scikit-learn/issues/21362
ids=lambda x: x[:10].replace("]", "") if isinstance(x, str) else x,
)
def test_sgd_estimator_params_validation(klass, fit_method, params, err_msg):
"""Validate parameters in the different SGD estimators."""
try:
sgd_estimator = klass(**params)
except TypeError as err:
if "unexpected keyword argument" in str(err):
# skip test if the parameter is not supported by the estimator
return
raise err
with pytest.raises(ValueError, match=err_msg):
if is_classifier(sgd_estimator) and fit_method == "partial_fit":
fit_params = {"classes": np.unique(Y)}
else:
fit_params = {}
getattr(sgd_estimator, fit_method)(X, Y, **fit_params)
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(
alpha=0.01, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr
)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty="l1")
clf = clone(clf)
clf.set_params(penalty="l2")
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty="l2")
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=0.01)
clf.fit(X, Y)
assert hasattr(clf, "_average_coef")
assert hasattr(clf, "_average_intercept")
assert hasattr(clf, "_standard_intercept")
assert hasattr(clf, "_standard_coef")
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, "_average_coef")
assert not hasattr(clf, "_average_intercept")
assert not hasattr(clf, "_standard_intercept")
assert not hasattr(clf, "_standard_coef")
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
if klass in [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]:
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
assert_allclose(clf1.offset_, clf2.offset_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_late_onset_averaging_reached(klass):
eta0 = 0.001
alpha = 0.0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(
average=7,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=2,
shuffle=False,
)
clf2 = klass(
average=0,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=1,
shuffle=False,
)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = asgd(
klass,
X,
Y_encode,
eta0,
alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_,
)
assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3, max_iter=max_iter).fit(
X, Y
)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3, max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3, max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(
early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(
early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction, random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [
klass(
early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4,
max_iter=1000,
)
.fit(X, Y)
.n_iter_
for n_iter_no_change in [2, 3, 10]
]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
###############################################################################
# Classification Test Case
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log_loss", "modified_huber"):
clf = klass(
penalty="l2",
alpha=0.01,
fit_intercept=True,
loss=loss,
max_iter=10,
shuffle=True,
)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_provide_coef(klass):
"""Check that the shape of `coef_init` is validated."""
with pytest.raises(ValueError, match="Provided coef_init does not match dataset"):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": np.zeros((3,))}),
(SparseSGDClassifier, {"intercept_init": np.zeros((3,))}),
(SGDOneClassSVM, {"offset_init": np.zeros((3,))}),
(SparseSGDOneClassSVM, {"offset_init": np.zeros((3,))}),
],
)
def test_set_intercept_offset(klass, fit_params):
"""Check that `intercept_init` or `offset_init` is validated."""
sgd_estimator = klass()
with pytest.raises(ValueError, match="does not match dataset"):
sgd_estimator.fit(X, Y, **fit_params)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_sgd_early_stopping_with_partial_fit(klass):
"""Check that we raise an error for `early_stopping` used with
`partial_fit`.
"""
err_msg = "early_stopping should be False with partial_fit"
with pytest.raises(ValueError, match=err_msg):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": 0}),
(SparseSGDClassifier, {"intercept_init": 0}),
(SGDOneClassSVM, {"offset_init": 0}),
(SparseSGDOneClassSVM, {"offset_init": 0}),
],
)
def test_set_intercept_offset_binary(klass, fit_params):
"""Check that we can pass a scaler with binary classification to
`intercept_init` or `offset_init`."""
klass().fit(X5, Y5, **fit_params)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = 0.1
alpha = 2.0
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_, average_weights, decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (
r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can us a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\."
)
with pytest.raises(ValueError, match=regex):
klass(class_weight="balanced").partial_fit(X, Y, classes=np.unique(Y))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = 0.001
alpha = 0.01
# Multi-class average test case
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
# TODO: Remove filterwarnings in v1.2.
@pytest.mark.filterwarnings("ignore:.*squared_loss.*:FutureWarning")
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
# TODO(1.3): Remove "log"
if loss in ("log_loss", "log", "modified_huber"):
assert hasattr(clf, "predict_proba")
assert hasattr(clf, "predict_log_proba")
else:
message = "probability estimates are not available for loss={!r}".format(
loss
)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
with pytest.raises(AttributeError, match=message):
clf.predict_proba
with pytest.raises(AttributeError, match=message):
clf.predict_log_proba
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log_loss", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log_loss", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[0.1, -0.1], [0.3, 0.2]])
p = clf.predict_proba([[0.1, -0.1], [0.3, 0.2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.0] * 3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(
penalty="l1",
alpha=0.2,
fit_intercept=False,
max_iter=2000,
tol=None,
shuffle=False,
)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_format(klass):
# ValueError due to wrong class_weight argument type.
clf = klass(alpha=0.1, max_iter=1000, class_weight=[0.5])
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: 0.6, 2: 0.3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000, class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(
alpha=0.0001, max_iter=1000, class_weight="balanced", shuffle=False
).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") > 0.96
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_wrong_sample_weights(klass):
# Test if ValueError is raised if sample_weight has wrong shape
if klass in [SGDClassifier, SparseSGDClassifier]:
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
clf = klass(nu=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
with pytest.raises(ValueError):
clf.fit(X, Y, sample_weight=np.arange(7))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_exception(klass):
clf = klass(alpha=0.01)
# classes was not specified
with pytest.raises(ValueError):
clf.partial_fit(X3, Y3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_binary(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert clf.coef_.shape == (1, X.shape[1])
assert clf.intercept_.shape == (1,)
assert clf.decision_function([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass_average(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
clf.partial_fit(X2[third:], Y2[third:])
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_fit_then_partial_fit(klass):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = klass()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_classif(klass, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = klass(alpha=0.01, eta0=0.01, max_iter=2, learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_regression_losses(klass):
random_state = np.random.RandomState(1)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="squared_epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, loss="huber", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.01,
loss="squared_error",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_warm_start_multiclass(klass):
_test_warm_start(klass, X2, Y2, "optimal")
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_multiple_fit(klass):
# Test multiple calls of fit w/ different shaped inputs.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
assert hasattr(clf, "coef_")
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
###############################################################################
# Regression Test Case
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_reg(klass):
# Check that SGD gives any results.
clf = klass(alpha=0.1, max_iter=2, fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
# Tests the average regressor matches the naive implementation
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_partial_fit(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:], y[: int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2) :][:], y[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_average_sparse(klass):
# Checks the average weights on data with 0s
eta = 0.001
alpha = 0.01
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = Y3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)][:], Y3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :][:], Y3[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_least_squares_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_epsilon_insensitive(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_huber_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_elasticnet_convergence(klass):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False
)
cd.fit(X, y)
sgd = klass(
penalty="elasticnet",
max_iter=50,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=False,
)
sgd.fit(X, y)
err_msg = (
"cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio)
)
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg)
@ignore_warnings
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_partial_fit(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.intercept_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit(klass, lr):
clf = klass(alpha=0.01, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_loss_function_epsilon(klass):
clf = klass(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions["huber"][1] == 0.1
###############################################################################
# SGD One Class SVM Test Case
# a simple implementation of ASGD to use for testing SGDOneClassSVM
def asgd_oneclass(klass, X, eta, nu, coef_init=None, offset_init=0.0):
if coef_init is None:
coef = np.zeros(X.shape[1])
else:
coef = coef_init
average_coef = np.zeros(X.shape[1])
offset = offset_init
intercept = 1 - offset
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass == SparseSGDOneClassSVM:
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, coef)
p += intercept
if p <= 1.0:
gradient = -1
else:
gradient = 0
coef *= max(0, 1.0 - (eta * nu / 2))
coef += -(eta * gradient * entry)
intercept += -(eta * (nu + gradient)) * decay
average_coef *= i
average_coef += coef
average_coef /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_coef, 1 - average_intercept
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def _test_warm_start_oneclass(klass, X, lr):
# Test that explicit warm restart...
clf = klass(nu=0.5, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X)
clf2 = klass(nu=0.1, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, coef_init=clf.coef_.copy(), offset_init=clf.offset_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(nu=0.5, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr)
clf3.fit(X)
assert clf3.t_ == clf.t_
assert_allclose(clf3.coef_, clf.coef_)
clf3.set_params(nu=0.1)
clf3.fit(X)
assert clf3.t_ == clf2.t_
assert_allclose(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start_oneclass(klass, lr):
_test_warm_start_oneclass(klass, X, lr)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_clone_oneclass(klass):
# Test whether clone works ok.
clf = klass(nu=0.5)
clf = clone(clf)
clf.set_params(nu=0.1)
clf.fit(X)
clf2 = klass(nu=0.1)
clf2.fit(X)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_partial_fit_oneclass(klass):
third = X.shape[0] // 3
clf = klass(nu=0.1)
clf.partial_fit(X[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.offset_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
previous_coefs = clf.coef_
clf.partial_fit(X[third:])
# check that coef_ haven't been re-allocated
assert clf.coef_ is previous_coefs
# raises ValueError if number of features does not match previous data
with pytest.raises(ValueError):
clf.partial_fit(X[:, 1])
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_oneclass(klass, lr):
clf = klass(nu=0.05, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X)
y_scores = clf.decision_function(T)
t = clf.t_
coef = clf.coef_
offset = clf.offset_
clf = klass(nu=0.05, eta0=0.01, max_iter=1, learning_rate=lr, shuffle=False)
for _ in range(2):
clf.partial_fit(X)
y_scores2 = clf.decision_function(T)
assert clf.t_ == t
assert_allclose(y_scores, y_scores2)
assert_allclose(clf.coef_, coef)
assert_allclose(clf.offset_, offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_late_onset_averaging_reached_oneclass(klass):
# Test average
eta0 = 0.001
nu = 0.05
# 2 passes over the training set but average only at second pass
clf1 = klass(
average=7, learning_rate="constant", eta0=eta0, nu=nu, max_iter=2, shuffle=False
)
# 1 pass over the training set with no averaging
clf2 = klass(
average=0, learning_rate="constant", eta0=eta0, nu=nu, max_iter=1, shuffle=False
)
clf1.fit(X)
clf2.fit(X)
# Start from clf2 solution, compute averaging using asgd function and
# compare with clf1 solution
average_coef, average_offset = asgd_oneclass(
klass, X, eta0, nu, coef_init=clf2.coef_.ravel(), offset_init=clf2.offset_
)
assert_allclose(clf1.coef_.ravel(), average_coef.ravel())
assert_allclose(clf1.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_computed_correctly_oneclass(klass):
# Tests the average SGD One-Class SVM matches the naive implementation
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X)
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_partial_fit_oneclass(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:])
clf.partial_fit(X[int(n_samples / 2) :][:])
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_average_sparse_oneclass(klass):
# Checks the average coef on data with 0s
eta = 0.001
nu = 0.01
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = X3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :])
average_coef, average_offset = asgd_oneclass(klass, X3, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
def test_sgd_oneclass():
# Test fit, decision_function, predict and score_samples on a toy
# dataset
X_train = np.array([[-2, -1], [-1, -1], [1, 1]])
X_test = np.array([[0.5, -2], [2, 2]])
clf = SGDOneClassSVM(
nu=0.5, eta0=1, learning_rate="constant", shuffle=False, max_iter=1
)
clf.fit(X_train)
assert_allclose(clf.coef_, np.array([-0.125, 0.4375]))
assert clf.offset_[0] == -0.5
scores = clf.score_samples(X_test)
assert_allclose(scores, np.array([-0.9375, 0.625]))
dec = clf.score_samples(X_test) - clf.offset_
assert_allclose(clf.decision_function(X_test), dec)
pred = clf.predict(X_test)
assert_array_equal(pred, np.array([-1, 1]))
def test_ocsvm_vs_sgdocsvm():
# Checks SGDOneClass SVM gives a good approximation of kernelized
# One-Class SVM
nu = 0.05
gamma = 2.0
random_state = 42
# Generate train and test data
rng = np.random.RandomState(random_state)
X = 0.3 * rng.randn(500, 2)
X_train = np.r_[X + 2, X - 2]
X = 0.3 * rng.randn(100, 2)
X_test = np.r_[X + 2, X - 2]
# One-Class SVM
clf = OneClassSVM(gamma=gamma, kernel="rbf", nu=nu)
clf.fit(X_train)
y_pred_ocsvm = clf.predict(X_test)
dec_ocsvm = clf.decision_function(X_test).reshape(1, -1)
# SGDOneClassSVM using kernel approximation
max_iter = 15
transform = Nystroem(gamma=gamma, random_state=random_state)
clf_sgd = SGDOneClassSVM(
nu=nu,
shuffle=True,
fit_intercept=True,
max_iter=max_iter,
random_state=random_state,
tol=-np.inf,
)
pipe_sgd = make_pipeline(transform, clf_sgd)
pipe_sgd.fit(X_train)
y_pred_sgdocsvm = pipe_sgd.predict(X_test)
dec_sgdocsvm = pipe_sgd.decision_function(X_test).reshape(1, -1)
assert np.mean(y_pred_sgdocsvm == y_pred_ocsvm) >= 0.99
corrcoef = np.corrcoef(np.concatenate((dec_ocsvm, dec_sgdocsvm)))[0, 1]
assert corrcoef >= 0.9
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(
n_samples=1000, n_features=100, n_informative=20, random_state=1234
)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.9999999999,
random_state=42,
).fit(X, y)
est_l1 = SGDClassifier(
alpha=0.001, penalty="l1", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.0000000001,
random_state=42,
).fit(X, y)
est_l2 = SGDClassifier(
alpha=0.001, penalty="l2", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all="raise"):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert np.isfinite(X).all()
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert np.isfinite(X_scaled).all()
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.0).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss="squared_hinge", max_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert np.isfinite(model.coef_).all()
# model is numerically unstable on unscaled data
msg_regxp = (
r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help."
)
with pytest.raises(ValueError, match=msg_regxp):
model.fit(X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(
loss="squared_hinge",
max_iter=10,
shuffle=True,
penalty="elasticnet",
l1_ratio=0.3,
alpha=0.01,
eta0=0.001,
random_state=0,
tol=None,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert np.isfinite(model.coef_).all()
@pytest.mark.parametrize("penalty", ["l2", "l1", "elasticnet"])
def test_large_regularization(penalty):
# Non regression tests for numerical stability issues caused by large
# regularization parameters
model = SGDClassifier(
alpha=1e5,
learning_rate="constant",
eta0=0.1,
penalty=penalty,
shuffle=False,
tol=None,
max_iter=6,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def test_tol_parameter():
# Test that the tol parameter behaves as expected
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
# With tol is None, the number of iteration should be equal to max_iter
max_iter = 42
model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter)
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
# If tol is not None, the number of iteration should be less than max_iter
max_iter = 2000
model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter)
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
# A larger tol should yield a smaller number of iteration
model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter)
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
# Strict tolerance and small max_iter should trigger a warning
model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0)
warning_message = (
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
model_3.fit(X, y)
assert model_3.n_iter_ == 3
def _test_loss_common(loss_function, cases):
# Test the different loss functions
# cases is a list of (p, y, expected)
for p, y, expected_loss, expected_dloss in cases:
assert_almost_equal(loss_function.py_loss(p, y), expected_loss)
assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss)
def test_loss_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.1, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, 1.0, 0.0, -1.0),
(-1.0, -1.0, 0.0, 1.0),
(0.5, 1.0, 0.5, -1.0),
(2.0, -1.0, 3.0, 1.0),
(-0.5, -1.0, 0.5, 1.0),
(0.0, 1.0, 1, -1.0),
]
_test_loss_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-0.1, -1.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -1.0),
(0.0, -1.0, 0.0, 1.0),
(0.5, -1.0, 0.5, 1.0),
(2.0, -1.0, 2.0, 1.0),
(-0.5, 1.0, 0.5, -1.0),
(-1.0, 1.0, 1.0, -1.0),
]
_test_loss_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, -1.0, 4.0, 4.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, 1.0, 0.25, -1.0),
(0.5, -1.0, 2.25, 3.0),
]
_test_loss_common(loss, cases)
def test_loss_log():
# Test Log (logistic loss)
loss = sgd_fast.Log()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, np.log(1.0 + np.exp(-1.0)), -1.0 / (np.exp(1.0) + 1.0)),
(1.0, -1.0, np.log(1.0 + np.exp(1.0)), 1.0 / (np.exp(-1.0) + 1.0)),
(-1.0, -1.0, np.log(1.0 + np.exp(-1.0)), 1.0 / (np.exp(1.0) + 1.0)),
(-1.0, 1.0, np.log(1.0 + np.exp(1.0)), -1.0 / (np.exp(-1.0) + 1.0)),
(0.0, 1.0, np.log(2), -0.5),
(0.0, -1.0, np.log(2), 0.5),
(17.9, -1.0, 17.9, 1.0),
(-17.9, 1.0, 17.9, -1.0),
]
_test_loss_common(loss, cases)
assert_almost_equal(loss.py_dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
assert_almost_equal(loss.py_loss(18.1, 1.0), np.exp(-18.1), 16)
assert_almost_equal(loss.py_dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
assert_almost_equal(loss.py_loss(-18.1, 1.0), 18.1, 16)
def test_loss_squared_loss():
# Test SquaredLoss
loss = sgd_fast.SquaredLoss()
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(1.0, 1.0, 0.0, 0.0),
(1.0, 0.0, 0.5, 1.0),
(0.5, -1.0, 1.125, 1.5),
(-2.5, 2.0, 10.125, -4.5),
]
_test_loss_common(loss, cases)
def test_loss_huber():
# Test Huber
loss = sgd_fast.Huber(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.005, 0.1),
(0.0, 0.1, 0.005, -0.1),
(3.95, 4.0, 0.00125, -0.05),
(5.0, 2.0, 0.295, 0.1),
(-1.0, 5.0, 0.595, -0.1),
]
_test_loss_common(loss, cases)
def test_loss_modified_huber():
# (p, y, expected_loss, expected_dloss)
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-1.0, -1.0, 0.0, 0.0),
(2.0, 1.0, 0.0, 0.0),
(0.0, 1.0, 1.0, -2.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, -1.0, 2.25, 3.0),
(-2.0, 1.0, 8, -4.0),
(-3.0, 1.0, 12, -4.0),
]
_test_loss_common(loss, cases)
def test_loss_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.1, 1.0),
(2.0, -1.0, 2.9, 1.0),
(2.0, 2.2, 0.1, -1.0),
(-2.0, 1.0, 2.9, -1.0),
]
_test_loss_common(loss, cases)
def test_loss_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.01, 0.2),
(2.0, -1.0, 8.41, 5.8),
(2.0, 2.2, 0.01, -0.2),
(-2.0, 1.0, 8.41, -5.8),
]
_test_loss_common(loss, cases)
def test_multi_thread_multi_class_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and thread-based parallelism.
clf = SGDClassifier(
alpha=1e-3,
tol=1e-3,
max_iter=1000,
early_stopping=True,
n_iter_no_change=100,
random_state=0,
n_jobs=2,
)
clf.fit(iris.data, iris.target)
assert clf.n_iter_ > clf.n_iter_no_change
assert clf.n_iter_ < clf.n_iter_no_change + 20
assert clf.score(iris.data, iris.target) > 0.8
def test_multi_core_gridsearch_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and process-based multi-core
# parallelism.
param_grid = {
"alpha": np.logspace(-4, 4, 9),
"n_iter_no_change": [5, 10, 50],
}
clf = SGDClassifier(tol=1e-2, max_iter=1000, early_stopping=True, random_state=0)
search = RandomizedSearchCV(clf, param_grid, n_iter=3, n_jobs=2, random_state=0)
search.fit(iris.data, iris.target)
assert search.best_score_ > 0.8
@pytest.mark.parametrize("backend", ["loky", "multiprocessing", "threading"])
def test_SGDClassifier_fit_for_all_backends(backend):
# This is a non-regression smoke test. In the multi-class case,
# SGDClassifier.fit fits each class in a one-versus-all fashion using
# joblib.Parallel. However, each OvA step updates the coef_ attribute of
# the estimator in-place. Internally, SGDClassifier calls Parallel using
# require='sharedmem'. This test makes sure SGDClassifier.fit works
# consistently even when the user asks for a backend that does not provide
# sharedmem semantics.
# We further test a case where memmapping would have been used if
# SGDClassifier.fit was called from a loky or multiprocessing backend. In
# this specific case, in-place modification of clf.coef_ would have caused
# a segmentation fault when trying to write in a readonly memory mapped
# buffer.
random_state = np.random.RandomState(42)
# Create a classification problem with 50000 features and 20 classes. Using
# loky or multiprocessing this make the clf.coef_ exceed the threshold
# above which memmaping is used in joblib and loky (1MB as of 2018/11/1).
X = sp.random(500, 2000, density=0.02, format="csr", random_state=random_state)
y = random_state.choice(20, 500)
# Begin by fitting a SGD classifier sequentially
clf_sequential = SGDClassifier(max_iter=1000, n_jobs=1, random_state=42)
clf_sequential.fit(X, y)
# Fit a SGDClassifier using the specified backend, and make sure the
# coefficients are equal to those obtained using a sequential fit
clf_parallel = SGDClassifier(max_iter=1000, n_jobs=4, random_state=42)
with joblib.parallel_backend(backend=backend):
clf_parallel.fit(X, y)
assert_array_almost_equal(clf_sequential.coef_, clf_parallel.coef_)
@pytest.mark.parametrize(
"old_loss, new_loss, Estimator",
[
# TODO(1.2): Remove "squared_loss"
("squared_loss", "squared_error", linear_model.SGDClassifier),
("squared_loss", "squared_error", linear_model.SGDRegressor),
# TODO(1.3): Remove "log"
("log", "log_loss", linear_model.SGDClassifier),
],
)
def test_loss_deprecated(old_loss, new_loss, Estimator):
# Note: class BaseSGD calls self._validate_params() in __init__, therefore
# even instantiation of class raises FutureWarning for deprecated losses.
with pytest.warns(FutureWarning, match=f"The loss '{old_loss}' was deprecated"):
est1 = Estimator(loss=old_loss, random_state=0)
est1.fit(X, Y)
est2 = Estimator(loss=new_loss, random_state=0)
est2.fit(X, Y)
if hasattr(est1, "predict_proba"):
assert_allclose(est1.predict_proba(X), est2.predict_proba(X))
else:
assert_allclose(est1.predict(X), est2.predict(X))
@pytest.mark.parametrize(
"Estimator", [linear_model.SGDClassifier, linear_model.SGDRegressor]
)
def test_sgd_random_state(Estimator, global_random_seed):
# Train the same model on the same data without converging and check that we
# get reproducible results by fixing the random seed.
if Estimator == linear_model.SGDRegressor:
X, y = datasets.make_regression(random_state=global_random_seed)
else:
X, y = datasets.make_classification(random_state=global_random_seed)
# Fitting twice a model with the same hyper-parameters on the same training
# set with the same seed leads to the same results deterministically.
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_a = est.fit(X, y).coef_
assert est.n_iter_ == 1
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_b = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert_allclose(coef_same_seed_a, coef_same_seed_b)
# Fitting twice a model with the same hyper-parameters on the same training
# set but with different random seed leads to different results after one
# epoch because of the random shuffling of the dataset.
est = Estimator(random_state=global_random_seed + 1, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_other_seed = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert np.abs(coef_same_seed_a - coef_other_seed).max() > 1.0
| 32.27011 | 88 | 0.639937 |
acf75a68ecf0a77b90c7876f0278022abb680cd9 | 9,594 | py | Python | ivy_tests/test_ivy/test_stateful/test_training/test_on_call_build.py | RitujaPawas/ivy | 595788507aca609e868cb3d17edd815463af28e4 | [
"Apache-2.0"
] | null | null | null | ivy_tests/test_ivy/test_stateful/test_training/test_on_call_build.py | RitujaPawas/ivy | 595788507aca609e868cb3d17edd815463af28e4 | [
"Apache-2.0"
] | null | null | null | ivy_tests/test_ivy/test_stateful/test_training/test_on_call_build.py | RitujaPawas/ivy | 595788507aca609e868cb3d17edd815463af28e4 | [
"Apache-2.0"
] | null | null | null | """Collection of tests for training neural network layers with "on call"
building.
"""
# global
from hypothesis import given, strategies as st
import numpy as np
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# Weight Conditioned Network #
# ---------------------------#
class FC(ivy.Module):
def __init__(self, output_size=1, num_layers=2, layer_dim=64, device=None, v=None):
self._output_size = output_size
self._num_layers = num_layers
self._layer_dim = layer_dim
super(FC, self).__init__(device=device, v=v, build_mode="on_call")
# noinspection PyUnusedLocal
def _build(self, x, *args, **kwargs):
input_size = x.shape[-1]
self._layers = [ivy.Linear(input_size, self._layer_dim, device=self._dev)]
self._layers += [
ivy.Linear(self._layer_dim, self._layer_dim, device=self._dev)
for _ in range(self._num_layers - 2)
]
self._layers.append(
ivy.Linear(self._layer_dim, self._output_size, device=self._dev)
)
def _forward(self, x):
for layer in self._layers:
x = ivy.leaky_relu(layer(x))
return x
class WeConLayerFC(ivy.Module):
def __init__(self, num_layers=2, layer_dim=64, device=None, v=None):
self._num_layers = num_layers
self._layer_dim = layer_dim
super(WeConLayerFC, self).__init__(device=device, v=v, build_mode="on_call")
# noinspection PyUnusedLocal
def _build(self, implicit_weights, *args, **kwargs):
implicit_shapes = implicit_weights.shapes
self._layers = list()
for i in range(self._num_layers):
if i == 0:
self._layers.append(
implicit_shapes.map(
lambda shp, kc: ivy.Linear(
int(np.prod(shp[1:])), self._layer_dim, device=self._dev
)
)
)
else:
self._layers.append(
implicit_shapes.map(
lambda shp, kc: ivy.Linear(
self._layer_dim, self._layer_dim, device=self._dev
)
)
)
def _forward(self, implicit_weights):
xs = implicit_weights
for layer in self._layers:
xs = ivy.Container.multi_map(
lambda args, _: ivy.leaky_relu(args[0](args[1])), [layer, xs]
)
return xs
class WeConFC(ivy.Module):
def __init__(self, device=None, v=None):
self._layer_specific_fc = WeConLayerFC(device=device)
self._fc = FC(device=device)
super(WeConFC, self).__init__(device=device, v=v)
# noinspection PyUnusedLocal
def _build(self, *args, **kwargs):
self._layer_specific_fc.build()
self._fc.build()
return self._layer_specific_fc.built and self._fc.built
def _forward(self, implicit_weights):
batch_shape = [i for i in implicit_weights.shape if i]
total_batch_size = np.prod(batch_shape)
reshaped_weights = implicit_weights.reshape(
pre_shape=[total_batch_size], post_shape=[-1]
)
xs = self._layer_specific_fc(reshaped_weights)
x = ivy.concat([v for k, v in xs.to_iterator()], -1)
ret_flat = self._fc(x)
return ivy.reshape(ret_flat, batch_shape + [-1])
# WeConFC
# @given(
# batch_shape=st.sampled_from([[1, 2], [1, 3], [1, 4]]),
# dtype=st.sampled_from(ivy_np.valid_float_dtypes),
# )
# def test_weight_conditioned_network_training(batch_shape, dtype, device, call):
#
# # smoke test
# if call is helpers.np_call:
# # NumPy does not support gradients
# return
# x = ivy.Container(
# {
# "layer0": {
# "w": ivy.random_uniform(shape=batch_shape + [64, 3], device=device),
# "b": ivy.random_uniform(shape=batch_shape + [64], device=device),
# },
# "layer1": {
# "w": ivy.random_uniform(shape=batch_shape + [1, 64], device=device),
# "b": ivy.random_uniform(shape=batch_shape + [1], device=device),
# },
# }
# )
# we_con_net = WeConFC(device=device)
#
# def loss_fn(v_=None):
# out = we_con_net(x, v=v_)
# return ivy.mean(out)
#
# # train
# loss_tm1 = 1e12
# loss = None
# grads = None
# loss_fn() # build on_call layers
# for i in range(10):
# loss, grads = ivy.execute_with_gradients(loss_fn, we_con_net.v)
# we_con_net.v = ivy.gradient_descent_update(we_con_net.v, grads, 1e-3)
# assert loss < loss_tm1
# loss_tm1 = loss
#
# # type test
# assert ivy.is_array(loss)
# assert isinstance(grads, ivy.Container)
# # cardinality test
# if call is helpers.mx_call:
# # mxnet slicing cannot reduce dimension to zero
# assert loss.shape == (1,)
# else:
# assert loss.shape == ()
# # value test
# assert (abs(grads).max() > 0).all_true()
# HyperNetwork #
# -------------#
class HyperNet(ivy.Module):
def __init__(
self, num_layers=3, layer_dim=64, latent_size=256, device=None, v=None
):
self._num_layers = num_layers
self._layer_dim = layer_dim
self._latent_size = latent_size
super(HyperNet, self).__init__(device=device, v=v, build_mode="on_call")
def _create_variables(self, device):
return {
"latent": ivy.variable(
ivy.random_uniform(shape=(self._latent_size,), device=device)
)
}
# noinspection PyUnusedLocal
def _build(self, hypo_shapes, *args, **kwargs):
self._layers = list()
for i in range(self._num_layers):
if i == 0:
self._layers.append(
ivy.Linear(self._latent_size, self._layer_dim, device=self._dev)
)
if i < self._num_layers - 1:
self._layers.append(
ivy.Linear(self._layer_dim, self._layer_dim, device=self._dev)
)
else:
self._layers.append(
hypo_shapes.map(
lambda shp, kc: ivy.Linear(
self._layer_dim, int(np.prod(shp)), device=self._dev
)
)
)
def _forward(self, hypo_shapes):
x = self.v.latent
for layer in self._layers[:-1]:
x = ivy.leaky_relu(layer(x))
weights_flat = self._layers[-1].map(lambda lyr, _: ivy.leaky_relu(lyr(x)))
return weights_flat.reshape_like(hypo_shapes)
class HypoNet(ivy.Module):
def __init__(
self,
input_size=1,
output_size=1,
num_layers=2,
layer_dim=64,
device=None,
v=None,
):
self._input_size = input_size
self._output_size = output_size
self._num_layers = num_layers
self._layer_dim = layer_dim
super(HypoNet, self).__init__(device=device, v=v, store_vars=False)
# noinspection PyUnusedLocal
def _build(self, *args, **kwargs):
self._layers = [ivy.Linear(self._input_size, self._layer_dim, device=self._dev)]
self._layers += [
ivy.Linear(self._layer_dim, self._layer_dim, device=self._dev)
for _ in range(self._num_layers - 2)
]
self._layers.append(
ivy.Linear(self._layer_dim, self._output_size, device=self._dev)
)
def _forward(self, x):
for layer in self._layers:
x = ivy.leaky_relu(layer(x))
return x
class HyperHypoNet(ivy.Module):
def __init__(self, device=None, v=None):
self._hypernet = HyperNet(device=device)
self._hyponet = HypoNet(device=device)
super(HyperHypoNet, self).__init__(device=device, v=v)
# noinspection PyUnusedLocal
def _build(self, *args, **kwargs):
self._hypernet.build()
hypo_v = self._hyponet.build()
self._hypo_shapes = hypo_v.shapes
return self._hypernet.built and self._hyponet.built
def _forward(self, hyponet_input):
return self._hyponet(hyponet_input, v=self._hypernet(self._hypo_shapes))
# HyperHypoNet
@given(
batch_shape=st.sampled_from([[1, 2], [1, 3], [1, 4]]),
dtype=st.sampled_from(ivy_np.valid_float_dtypes),
)
def test_hyper_hypo_network_training(batch_shape, dtype, device, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
return
x = ivy.random_uniform(shape=batch_shape + [1], device=device)
hyper_hypo_net = HyperHypoNet(device=device)
def loss_fn(v_=None):
out = hyper_hypo_net(x, v=v_)
return ivy.mean(out)
# train
loss_tm1 = 1e12
loss = None
grads = None
loss_fn() # build on_call layers
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, hyper_hypo_net.v)
hyper_hypo_net.v = ivy.gradient_descent_update(hyper_hypo_net.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
if call is helpers.mx_call:
# mxnet slicing cannot reduce dimension to zero
assert loss.shape == (1,)
else:
assert loss.shape == ()
# value test
assert (abs(grads).max() > 0).all_true()
| 32.30303 | 88 | 0.587972 |
acf75ac44cb76b5163260a44aad882a215eed981 | 5,132 | py | Python | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/299_cv_filter.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | 1 | 2020-02-28T12:03:39.000Z | 2020-02-28T12:03:39.000Z | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/299_cv_filter.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | null | null | null | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/299_cv_filter.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 09:22:55 2018
@author: Kazuki
"""
from glob import glob
from os import system
import pandas as pd
import sys
sys.path.append('/home/kazuki_onodera/Python')
import lgbmextension as ex
import lightgbm as lgb
import multiprocessing
import utils
utils.start(__file__)
#==============================================================================
SEED = 71
base_folder = ['../data/001_train']
target_folders = glob('../data/2*_train')
folders = base_folder+target_folders
X = pd.concat([
utils.read_pickles(f) for f in (folders)
], axis=1)
y = utils.read_pickles('../data/label').TARGET
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'max_depth': -1,
'num_leaves': 255,
'max_bin': 255,
'colsample_bytree': 0.1,
'subsample': 0.5,
'nthread': int(multiprocessing.cpu_count()/2),
# 'nthread': multiprocessing.cpu_count(),
'bagging_freq': 1,
'seed': SEED
}
categorical_feature = ['NAME_CONTRACT_TYPE',
'CODE_GENDER',
'FLAG_OWN_CAR',
'FLAG_OWN_REALTY',
'NAME_TYPE_SUITE',
'NAME_INCOME_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_FAMILY_STATUS',
'NAME_HOUSING_TYPE',
'OCCUPATION_TYPE',
'WEEKDAY_APPR_PROCESS_START',
'ORGANIZATION_TYPE',
'FONDKAPREMONT_MODE',
'HOUSETYPE_MODE',
'WALLSMATERIAL_MODE',
'EMERGENCYSTATE_MODE']
categorical_feature += ['prev_DL1-0NAME_CONTRACT_TYPE', 'prev_DL1-0WEEKDAY_APPR_PROCESS_START', 'prev_DL1-0NAME_CASH_LOAN_PURPOSE', 'prev_DL1-0NAME_CONTRACT_STATUS', 'prev_DL1-0NAME_PAYMENT_TYPE', 'prev_DL1-0CODE_REJECT_REASON', 'prev_DL1-0NAME_TYPE_SUITE', 'prev_DL1-0NAME_CLIENT_TYPE', 'prev_DL1-0NAME_GOODS_CATEGORY', 'prev_DL1-0NAME_PORTFOLIO', 'prev_DL1-0NAME_PRODUCT_TYPE', 'prev_DL1-0CHANNEL_TYPE', 'prev_DL1-0NAME_SELLER_INDUSTRY', 'prev_DL1-0NAME_YIELD_GROUP', 'prev_DL1-0PRODUCT_COMBINATION', 'prev_DL1-1NAME_CONTRACT_TYPE', 'prev_DL1-1WEEKDAY_APPR_PROCESS_START', 'prev_DL1-1NAME_CASH_LOAN_PURPOSE', 'prev_DL1-1NAME_CONTRACT_STATUS', 'prev_DL1-1NAME_PAYMENT_TYPE', 'prev_DL1-1CODE_REJECT_REASON', 'prev_DL1-1NAME_TYPE_SUITE', 'prev_DL1-1NAME_CLIENT_TYPE', 'prev_DL1-1NAME_GOODS_CATEGORY', 'prev_DL1-1NAME_PORTFOLIO', 'prev_DL1-1NAME_PRODUCT_TYPE', 'prev_DL1-1CHANNEL_TYPE', 'prev_DL1-1NAME_SELLER_INDUSTRY', 'prev_DL1-1NAME_YIELD_GROUP', 'prev_DL1-1PRODUCT_COMBINATION', 'prev_DL1-2NAME_CONTRACT_TYPE', 'prev_DL1-2WEEKDAY_APPR_PROCESS_START', 'prev_DL1-2NAME_CASH_LOAN_PURPOSE', 'prev_DL1-2NAME_CONTRACT_STATUS', 'prev_DL1-2NAME_PAYMENT_TYPE', 'prev_DL1-2CODE_REJECT_REASON', 'prev_DL1-2NAME_TYPE_SUITE', 'prev_DL1-2NAME_CLIENT_TYPE', 'prev_DL1-2NAME_GOODS_CATEGORY', 'prev_DL1-2NAME_PORTFOLIO', 'prev_DL1-2NAME_PRODUCT_TYPE', 'prev_DL1-2CHANNEL_TYPE', 'prev_DL1-2NAME_SELLER_INDUSTRY', 'prev_DL1-2NAME_YIELD_GROUP', 'prev_DL1-2PRODUCT_COMBINATION']
dtrain = lgb.Dataset(X, y, categorical_feature=list( set(X.columns)&set(categorical_feature)) )
#dtrain.construct()
ret = lgb.cv(param, dtrain, 9999, nfold=5,
early_stopping_rounds=50, verbose_eval=10,
# categorical_feature=list( set(X.columns)&set(categorical_feature)),
seed=SEED)
print(f"CV auc-mean {ret['auc-mean'][-1]}")
dtrain = lgb.Dataset(X, y, categorical_feature=list( set(X.columns)&set(categorical_feature)) )
model = lgb.train(param, dtrain, len(ret['auc-mean']))
#model = lgb.train(param, dtrain, 300, valid_sets=[dtrain], valid_names=['train'])
imp = ex.getImp(model)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
"""
imp = pd.read_csv('LOG/imp_111-1_cv_filter.py.csv')
"""
# =============================================================================
#
# =============================================================================
imp = imp.set_index('index')
feature_all = imp[imp['split'] != 0].index.tolist()
import gc
def read_pickle(folder, usecols):
df = pd.read_pickle(folder+'/000.p')
col = list( set(usecols) & set(df.columns))
if len(col)>0:
print(folder)
df = utils.read_pickles(folder, col)
utils.to_pickles(df, folder+'_filtered', utils.SPLIT_SIZE)
del df; gc.collect()
folder = folder.replace('_train', '_test')
df = utils.read_pickles(folder, col)
utils.to_pickles(df, folder+'_filtered', utils.SPLIT_SIZE)
else:
print(f'{folder} doesnt have valid features')
pass
[read_pickle(f, feature_all) for f in target_folders]
#==============================================================================
utils.end(__file__)
| 38.586466 | 1,460 | 0.624513 |
acf75af39c3735f1dfbcb61643d324daa6fdae8f | 16,620 | py | Python | src/server.py | takeshitakenji/wikiserv | 8327eadd186cdc998dc9885259cae3a9d3a2d63d | [
"Apache-2.0"
] | 1 | 2016-04-07T13:03:24.000Z | 2016-04-07T13:03:24.000Z | src/server.py | takeshitakenji/wikiserv | 8327eadd186cdc998dc9885259cae3a9d3a2d63d | [
"Apache-2.0"
] | null | null | null | src/server.py | takeshitakenji/wikiserv | 8327eadd186cdc998dc9885259cae3a9d3a2d63d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
if sys.version_info < (3, 3):
raise RuntimeError('At least Python 3.3 is required')
import tornado.ioloop
import tornado.web
import logging, binascii, cgi, shelve, pickle, shutil
import config, cache, processors, filestuff, search, worker, common
from dateutil.parser import parse as date_parse
from threading import Semaphore
from pytz import utc
from dateutil.tz import tzlocal
from email.utils import format_datetime
from shutil import copyfileobj
from collections import namedtuple
import itertools, functools
from os.path import relpath, join as path_join, isdir
from os import mkdir
from codecs import getreader, getwriter
LOGGER = logging.getLogger('wikiserv')
class VarHost(object):
__slots__ = 'runtime_vars',
def __init__(self, path):
self.runtime_vars = shelve.open(path, 'c', protocol = pickle.HIGHEST_PROTOCOL)
common.fix_perms(path)
def __del__(self):
self.close()
def close(self):
if self.runtime_vars is not None:
self.runtime_vars.close()
self.runtime_vars = None
def getvar(self, key):
try:
return self.runtime_vars[key]
except KeyError:
return None
def setvar(self, key, value):
self.runtime_vars[key] = value
class Server(VarHost):
__slots__ = 'configuration', 'caches', 'processors', 'send_etags', 'search', 'preview_lines', 'workers', 'runtime_vars',
instance = None
ilock = Semaphore()
localzone = tzlocal()
CACHE_TYPES = {
'document' : 'process',
'preview' : 'doc_head',
}
@classmethod
def get_instance(cls):
with cls.ilock:
if cls.instance is None:
raise RuntimeError
return cls.instance
@classmethod
def set_instance(cls, configuration):
with cls.ilock:
cls.instance = cls(configuration)
@classmethod
def close_instance(cls):
with cls.ilock:
cls.instance.close()
cls.instance = None
@staticmethod
def get_cache(configuration, process, subdir = None):
ctype = cache.DispatcherCache if configuration.dispatcher_thread else cache.Cache
return ctype(
(path_join(configuration.cache_dir, subdir) if subdir is not None else configuration.cache_dir),
configuration.source_dir,
configuration.checksum_function,
process,
configuration.max_age,
configuration.max_entries,
configuration.auto_scrub
)
@classmethod
def process_funcs(cls, obj):
return {ctype : getattr(obj, method) for ctype, method in cls.CACHE_TYPES.items()}
@classmethod
def get_caches(cls, configuration, process_funcs, skip = frozenset()):
if not isdir(configuration.cache_dir):
mkdir(configuration.cache_dir)
common.fix_dir_perms(configuration.cache_dir)
pfsrc = None
if hasattr(process_funcs, '__getitem__'):
pfsrc = lambda ctype: process_funcs[ctype]
else:
pfsrc = lambda ctype: process_funcs
return {ctype : cls.get_cache(configuration, pfsrc(ctype), ctype) for ctype in cls.CACHE_TYPES.keys() if not ctype in skip}
def __init__(self, configuration):
self.caches = {}
self.workers = None
self.search = None
self.preview_lines = configuration.preview_lines
self.processors = configuration.processors
self.send_etags = configuration.send_etags
VarHost.__init__(self, configuration.runtime_vars)
skip = []
if not self.preview_lines:
skip.append('preview')
else:
preview_root = path_join(configuration.cache_dir, 'preview')
if self.preview_lines != self.getvar('PREVIEW_LINES'):
try:
shutil.rmtree(preview_root)
except OSError:
pass
self.setvar('PREVIEW_LINES', self.preview_lines)
self.caches.update(self.get_caches(configuration, self.process_funcs(self), skip))
if configuration.use_search_cache:
self.search = search.Search(self, path_join(configuration.cache_dir, 'search'), \
configuration.search_max_age, configuration.search_max_entries, configuration.search_auto_scrub)
else:
self.search = search.Search(self)
self.workers = worker.WorkerPool(configuration.worker_threads, autostart = True)
def __del__(self):
self.close()
def __getitem__(self, key):
return self.cache[key]
@property
def root(self):
return self.cache.source_root
@property
def cache(self):
return self.caches['document']
@property
def preview(self):
return self.caches.get('preview', None)
def get_preview(self, path):
LOGGER.debug('get_preview path=%s' % path)
if not self.preview_lines:
return None
with self.preview[path] as preview:
try:
header = processors.Processor.read_header(preview)
reader = getreader(header.encoding)(preview)
return reader.read()
except IOError:
return None
@property
def default_processor(self):
return self.processors[None]
def process(self, inf, outf, cached):
fname = inf.name
for extension, processor in self.processors.items():
if extension is None:
continue
elif fname.endswith(extension):
return processor(inf, outf, cached)
else:
return self.default_processor(inf, outf, cached)
def doc_head(self, inf, outf, cached):
LOGGER.debug('doc_head inf=%s outf=%s' % (inf, outf))
buff = inf.read(2048)
header = processors.AutoBaseProcessor.auto_header(buff)
if header.encoding is None:
raise NotImplementedError
inf.seek(0)
reader = getreader(header.encoding)(inf)
processors.Processor.write_header(outf, header)
writer = getwriter(header.encoding)(outf)
for line in itertools.islice(reader, self.preview_lines):
writer.write(line)
def close(self):
for name, cache in self.caches.items():
try:
cache.close()
except:
LOGGER.exception('Closing cache [%s]=%s' % (name, cache))
self.caches.clear()
if self.workers is not None:
self.workers.finish()
self.workers.join()
self.workers = None
if self.search is not None:
self.search.close()
self.search = None
VarHost.close(self)
Element = namedtuple('Element', ['tag', 'attrib', 'text'])
def xhtml_head(stream, title, *head):
print('<?xml version="1.0" encoding="UTF-8" ?>', file = stream)
print('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">', file = stream)
print('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">', file = stream)
print('<head>', file = stream)
print(' <title>%s</title>' % cgi.escape(title), file = stream)
for element in head:
selement = ['<%s' % element.tag]
if element.attrib:
selement += [
' ',
' '.join(('%s="s%"' % (key, cgi.escape(value, True)) \
for key, value in element.attrib.items())),
]
if element.text:
selement.append(' >%s</%s>' % (element.text, element.tag))
else:
selement.append(' />')
print(''.join(selement), file = stream)
print('</head>\n<body>', file = stream)
def xhtml_foot(stream):
print('<p id="foot"><a href="/">Index</a> <a href=".search">Search</a></p>', file = stream)
print('</body>\n</html>', file = stream)
class IndexHandler(tornado.web.RequestHandler):
COUNT = 100
def check_fill_headers(self, start, filter_func = None):
LOGGER.debug('Getting headers for request')
prev_mtime = None
server = Server.get_instance()
try:
prev_mtime = date_parse(self.request.headers['If-Modified-Since'])
if prev_mtime.tzinfo is None:
prev_mtime = prev_mtime.replace(tzinfo = utc)
LOGGER.debug('Found If-Modified-Since=%s' % prev_mtime)
except KeyError:
pass
self.set_header('Content-Type', 'application/xhtml+xml; charset=UTF-8')
server = Server.get_instance()
files, less, more = server.search.find_by_path(start, start + self.COUNT, filter_func)
if not files:
return [], (start > 0), more
newest = max(files, key = lambda x: x.modified)
self.set_header('Last-Modified', format_datetime(newest.modified))
self.set_header('Cache-Control', ('no-cache' if filter_func else 'Public'))
if prev_mtime is not None and newest.modified.replace(microsecond = 0) <= prev_mtime:
LOGGER.debug('Returning 304 from modification time')
self.set_status(304)
return False, less, more
return files, less, more
FILTERS = [
('filter', search.PathFilter),
('search', search.ContentFilter),
]
def get_filter_func(self):
filters = []
for arg, func in self.FILTERS:
try:
filters.append(func(self.get_argument(arg, None)))
except ValueError:
continue
LOGGER.debug('get_filter_func => %s' % filters)
if not filters:
return None
elif len(filters) == 1:
return filters[0]
else:
return search.CompoundFilter(filters)
def head(self):
try:
start = int(self.get_argument('start', 0))
if start < 0:
start = 0
except ValueError:
start = 0
filter_func = self.get_filter_func()
LOGGER.debug('HEAD INDEX start=%d filter_func=%s' % (start, filter_func))
self.check_fill_headers(start, filter_func)
def get(self):
try:
start = int(self.get_argument('start', 0))
if start < 0:
start = 0
except ValueError:
start = 0
filter_func = self.get_filter_func()
LOGGER.debug('HEAD INDEX start=%d filter_func=%s' % (start, filter_func))
files, less, more = self.check_fill_headers(start, filter_func)
if files is False:
return
LOGGER.debug('Yielding %d files (more=%s, less=%s)' % (len(files), less, more))
xhtml_head(self, 'Search' if filter_func else 'Index')
if filter_func:
print('<h1>Search</h1>', file = self)
print('<p>Terms: %s</p>' % cgi.escape(str(filter_func)), file = self)
else:
print('<h1>Wiki Index</h1>', file = self)
print('<ul>', file = self)
server = Server.get_instance()
for f in files:
self.write('\t<li><a href="/%s">%s</a> @ %s (%f kB)' % (cgi.escape(f.name, True), cgi.escape(f.name), f.modified.astimezone(server.localzone).strftime('%c (%Z)'), (float(f.size) / 1024)))
if filter_func and server:
preview = server.get_preview(f.name)
if preview:
print(' <pre style="display: block;">%s</pre>' % cgi.escape(preview), file = self)
print('\t</li>', file = self)
print('</ul>', file = self)
print('<p>', file = self)
if less:
print('\t<a href="/?start=%d">Previous Page</a>' % max(start - self.COUNT, 0), file = self)
if less and more:
print('\t | ', file = self)
if more:
print('\t<a href="/?start=%d>Next Page</a>' % (start + self.COUNT), file = self)
print('</p>', file = self)
xhtml_foot(self)
class WikiHandler(tornado.web.RequestHandler):
def compute_etag(self):
return None
def check_fill_headers(self, entry, header = None):
LOGGER.debug('Getting headers for request')
prev_mtime = None
server = Server.get_instance()
try:
prev_mtime = date_parse(self.request.headers['If-Modified-Since'])
if prev_mtime.tzinfo is None:
prev_mtime = prev_mtime.replace(tzinfo = utc)
LOGGER.debug('Found If-Modified-Since=%s' % prev_mtime)
except KeyError:
pass
if header is None:
header = entry.header
if server.send_etags:
checksum = header.checksum
if checksum:
self.set_header('Etag', '"%s"' % binascii.hexlify(checksum).decode('ascii'))
self.set_header('Last-Modified', format_datetime(header.timestamp))
self.set_header('Cache-Control', 'Public')
content_header = processors.Processor.read_header(entry)
if content_header.encoding:
self.set_header('Content-Type', '%s; charset=%s' % (content_header.mime, content_header.encoding))
else:
self.set_header('Content-Type', content_header.mime)
if prev_mtime is not None and header.timestamp.replace(microsecond = 0) <= prev_mtime:
LOGGER.debug('Returning 304 from modification time')
self.set_status(304)
return False
elif server.send_etags and self.check_etag_header():
LOGGER.debug('Returning 304 from etags')
self.set_status(304)
return False
return True
def head(self, path):
LOGGER.debug('HEAD %s' % path)
try:
server = Server.get_instance()
wrap = server.cache[path]
with wrap as entry:
if isinstance(entry, cache.AutoProcess):
# NoCache
reader = worker.RWAdapter(entry)
server.workers.schedule(reader)
try:
with reader:
self.check_fill_headers(reader, entry.header)
finally:
reader.wait()
else:
self.check_fill_headers(entry)
except KeyError:
raise tornado.web.HTTPError(404)
def get(self, path):
LOGGER.debug('GET %s' % path)
try:
server = Server.get_instance()
wrap = server.cache[path]
with wrap as entry:
if isinstance(entry, cache.AutoProcess):
# NoCache
reader = worker.RWAdapter(entry)
server.workers.schedule(reader)
try:
with reader:
if not self.check_fill_headers(reader, entry.header):
return
copyfileobj(reader, self)
finally:
reader.wait()
else:
if not self.check_fill_headers(entry):
return
LOGGER.debug('Returning data')
copyfileobj(entry, self)
except KeyError:
raise tornado.web.HTTPError(404)
class SearchHandler(tornado.web.RequestHandler):
CONTENT = \
"""<form action="/" method="GET">
<fieldset>
<legend>Search Terms</legend>
<div>
<label for="filter">Title</label>
<input name="filter" id="filter" />
</div>
<div>
<label for="search">Terms</label>
<input name="search" id="search" />
</div>
</fieldset>
<input type="submit" />
</form>"""
def check_fill_headers(self):
self.set_header('Cache-Control', 'Public')
self.set_header('Content-Type', 'application/xhtml+xml; charset=UTF-8')
prev_mtime = None
try:
prev_mtime = date_parse(self.request.headers['If-Modified-Since'])
if prev_mtime.tzinfo is None:
prev_mtime = prev_mtime.replace(tzinfo = utc)
LOGGER.debug('Found If-Modified-Since=%s' % prev_mtime)
except KeyError:
pass
with filestuff.File(__file__) as info:
mtime = info.modified
self.set_header('Last-Modified', format_datetime(mtime))
if prev_mtime is not None and mtime.replace(microsecond = 0) <= prev_mtime:
LOGGER.debug('Returning 304 from modification time')
self.set_status(304)
return False
elif self.check_etag_header():
LOGGER.debug('Returning 304 from etags')
self.set_status(304)
return False
return True
def head(self):
self.check_fill_headers()
def get(self):
if not self.check_fill_headers():
return
xhtml_head(self, 'Search')
print(self.CONTENT, file = self)
xhtml_foot(self)
class SkipHandler(tornado.web.RequestHandler):
def head(self):
raise tornado.web.HTTPError(404)
def get(self):
raise tornado.web.HTTPError(404)
application = tornado.web.Application([
(r'^/$', IndexHandler),
(r'^/\.search$', SearchHandler),
(r'^/.*\brobots\.txt$', SkipHandler),
(r'^/.*\bfavicon\.ico$', SkipHandler),
(r'^/(.+)$', WikiHandler),
])
if __name__ == '__main__':
from argparse import ArgumentParser
def positive_int(s):
s = int(s)
if s < 1:
raise ValueError(s)
return s
parser = ArgumentParser(usage = '%(prog)s [ options ] -c config.xml ')
parser.add_argument('--config', '-c', required = True, metavar = 'CONFIG.XML', dest = 'configuration', help = 'XML configuration file')
parser.add_argument('--scrub', dest = 'scrub_only', action = 'store_true', default = False, help = 'Instead of running the server, just do a cache scrub')
parser.add_argument('--bind-address', dest = 'bind_address', metavar = 'ADDRESS', help = 'Bind to ADDRESS instead of the address specified in configuration')
parser.add_argument('--bind-port', dest = 'bind_port', metavar = 'ADDRESS', type = positive_int, help = 'Bind to ADDRESS instead of the port specified in configuration')
args = parser.parse_args()
cfg = None
with open(args.configuration, 'rb') as f:
cfg = config.Configuration(f, setlog = True)
if args.bind_address is not None:
cfg.bind_address = args.bind_address
if args.bind_port is not None:
cfg.bind_port = args.bind_port
if not args.scrub_only:
Server.set_instance(cfg)
try:
application.listen(cfg.bind_port, cfg.bind_address)
tornado.ioloop.IOLoop.instance().start()
finally:
Server.close_instance()
else:
def fake_process(inf, outf, cached):
raise RuntimeError('Cannot serve pages in scrub mode')
class FakeServer(VarHost):
__slots__ = 'root',
def __init__(self, configuration):
LOGGER.debug('Using fake server with runtime_vars=%s and root=%s' % (configuration.runtime_vars, configuration.source_dir))
VarHost.__init__(self, configuration.runtime_vars)
self.root = configuration.source_dir
def __del__(self):
self.close()
cfg.auto_scrub = False
for cache in Server.get_caches(cfg, fake_process).values():
cache.close()
if cfg.use_search_cache:
server = FakeServer(cfg)
try:
search.Search(server, path_join(cfg.cache_dir, 'search'), \
cfg.search_max_age, cfg.search_max_entries, cfg.search_auto_scrub).close()
finally:
server.close()
| 32.084942 | 190 | 0.702046 |
acf75b37e02fbc7808a8b71dcfb1de06623c5dbe | 7,311 | py | Python | benchmarks/bench_sample_without_replacement.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 50,961 | 2015-01-01T06:06:31.000Z | 2022-03-31T23:40:12.000Z | benchmarks/bench_sample_without_replacement.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 17,065 | 2015-01-01T02:01:58.000Z | 2022-03-31T23:48:34.000Z | benchmarks/bench_sample_without_replacement.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 26,886 | 2015-01-01T00:59:27.000Z | 2022-03-31T18:03:23.000Z | """
Benchmarks for sampling without replacement of integer.
"""
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = datetime.now() - t_start
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option(
"--n-times",
dest="n_times",
default=5,
type=int,
help="Benchmark results are average over n_times experiments",
)
op.add_option(
"--n-population",
dest="n_population",
default=100000,
type=int,
help="Size of the population to sample from.",
)
op.add_option(
"--n-step",
dest="n_steps",
default=5,
type=int,
help="Number of step interval between 0 and n_population.",
)
default_algorithms = (
"custom-tracking-selection,custom-auto,"
"custom-reservoir-sampling,custom-pool,"
"python-core-sample,numpy-permutation"
)
op.add_option(
"--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help=(
"Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default"
),
)
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(",")
for key in selected_algorithm:
if key not in default_algorithms.split(","):
raise ValueError(
'Unknown sampling algorithm "%s" not in (%s).'
% (key, default_algorithms)
)
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm[
"python-core-sample"
] = lambda n_population, n_sample: random.sample(range(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm[
"custom-auto"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="auto", random_state=random_state
)
###########################################################################
# Set custom tracking based method
sampling_algorithm[
"custom-tracking-selection"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="tracking_selection", random_state=random_state
)
###########################################################################
# Set custom reservoir based method
sampling_algorithm[
"custom-reservoir-sampling"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="reservoir_sampling", random_state=random_state
)
###########################################################################
# Set custom reservoir based method
sampling_algorithm[
"custom-pool"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="pool", random_state=random_state
)
###########################################################################
# Numpy permutation based
sampling_algorithm[
"numpy-permutation"
] = lambda n_population, n_sample: np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = {
key: value
for key, value in sampling_algorithm.items()
if key in selected_algorithm
}
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population, num=opts.n_steps).astype(
int
)
ratio = n_samples / opts.n_population
print("Benchmarks")
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in range(opts.n_steps):
for it in range(opts.n_times):
time[name][step, it] = bench_sample(
sampling_algorithm[name], opts.n_population, n_samples[step]
)
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print(
"%s \t | %s "
% (
"Arguments".ljust(16),
"Value".center(12),
)
)
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure("scikit-learn sample w/o replacement benchmark results")
plt.title("n_population = %s, n_times = %s" % (opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel("ratio of n_sample / n_population")
ax.set_ylabel("Time (s)")
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| 32.207048 | 87 | 0.525783 |
acf75c332b0d88329176825112406c10bf15170d | 128 | py | Python | src/conftest.py | denkasyanov/education-backend | c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5 | [
"MIT"
] | 151 | 2020-04-21T09:58:57.000Z | 2021-09-12T09:01:21.000Z | src/conftest.py | denkasyanov/education-backend | c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5 | [
"MIT"
] | 163 | 2020-05-29T20:52:00.000Z | 2021-09-11T12:44:56.000Z | src/conftest.py | boochamoocha/education-backend | c6ffb0c00bc066c8f1e0a8c0ffe4d0215c7c416a | [
"MIT"
] | 39 | 2020-04-21T12:28:16.000Z | 2021-09-12T15:33:47.000Z | pytest_plugins = [
'app.factory',
'app.fixtures',
'users.fixtures',
'orders.factory',
'diplomas.factory',
]
| 16 | 23 | 0.59375 |
acf75c88fe173082be164d3efc67ea8a2b02b6fd | 211 | py | Python | app/products/admin.py | chavez897/inventory-app | 9321551532ec65131ff1b2de7e1916ec0a6b3bbc | [
"MIT"
] | null | null | null | app/products/admin.py | chavez897/inventory-app | 9321551532ec65131ff1b2de7e1916ec0a6b3bbc | [
"MIT"
] | null | null | null | app/products/admin.py | chavez897/inventory-app | 9321551532ec65131ff1b2de7e1916ec0a6b3bbc | [
"MIT"
] | null | null | null | from django.contrib import admin
from products.models import Products
@admin.register(Products)
class ProductsModelAdmin(admin.ModelAdmin):
list_display = ("id", "name",)
list_display_links = ("id", ) | 23.444444 | 43 | 0.744076 |
acf75d195a7f9454dff3256ac3c4f362cd91d9cd | 529 | py | Python | core/network/Swin_T/__init__.py | ViTAE-Transformer/ViTAE-Transformer-Matting | 5cd1574cd46009a4e9660cabdc008718e20bc381 | [
"MIT"
] | 8 | 2022-03-31T05:58:45.000Z | 2022-03-31T13:24:18.000Z | core/network/Swin_T/__init__.py | ViTAE-Transformer/ViTAE-Transformer-Matting | 5cd1574cd46009a4e9660cabdc008718e20bc381 | [
"MIT"
] | null | null | null | core/network/Swin_T/__init__.py | ViTAE-Transformer/ViTAE-Transformer-Matting | 5cd1574cd46009a4e9660cabdc008718e20bc381 | [
"MIT"
] | null | null | null | from .swin_stem_pooling5_transformer import swin_stem_pooling5_encoder
from .swin_stem_pooling5_transformer import SwinStemPooling5TransformerMatting
from .decoder import SwinStemPooling5TransformerDecoderV1
__all__ = ['p3mnet_swin_t']
def p3mnet_swin_t(pretrained=True, img_size=512, **kwargs):
encoder = swin_stem_pooling5_encoder(pretrained=pretrained, img_size=img_size, **kwargs)
decoder = SwinStemPooling5TransformerDecoderV1()
model = SwinStemPooling5TransformerMatting(encoder, decoder)
return model
| 35.266667 | 92 | 0.835539 |
acf75dd865613f2a616a442667dc51886a25f526 | 850 | py | Python | docs/source/conf.py | yuin/rays | 62ce174fc46577d93fb6ee595baf8d91d77e89bd | [
"MIT"
] | null | null | null | docs/source/conf.py | yuin/rays | 62ce174fc46577d93fb6ee595baf8d91d77e89bd | [
"MIT"
] | null | null | null | docs/source/conf.py | yuin/rays | 62ce174fc46577d93fb6ee595baf8d91d77e89bd | [
"MIT"
] | 1 | 2019-04-17T08:20:59.000Z | 2019-04-17T08:20:59.000Z | # -*- coding: utf-8 -*-
import sys, os
from datetime import date
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from setup import spec
release = spec["version"]
project = spec["name"]
author = spec["author"]
copyright = '2009-%s, %s' % (str(date.today().year), author)
# Extension
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx']
intersphinx_mapping = {'http://docs.python.org/': None}
autoclass_content = "both"
# Source
master_doc = 'index'
templates_path = ['_templates']
source_suffix = '.rst'
exclude_trees = []
pygments_style = 'sphinx'
# html build settings
html_theme = 'default'
# htmlhelp settings
htmlhelp_basename = '%sdoc' % project
# latex build settings
latex_documents = [
('index', '%s.tex' % project, u'%s Documentation' % project,
author, 'manual'),
]
| 22.368421 | 68 | 0.662353 |
acf75dec5ebf6d82cec956c855fd51681223196c | 5,052 | py | Python | app.py | MartinoMensio/polito_aule_bot | 2f020f44e3c9c7cf9a398f898846b3cdec7429ef | [
"MIT"
] | 4 | 2018-01-07T19:02:19.000Z | 2019-10-25T14:38:12.000Z | app.py | MartinoMensio/polito_aule_bot | 2f020f44e3c9c7cf9a398f898846b3cdec7429ef | [
"MIT"
] | 3 | 2017-06-18T13:37:55.000Z | 2017-06-21T10:37:28.000Z | app.py | MartinoMensio/polito_aule_bot | 2f020f44e3c9c7cf9a398f898846b3cdec7429ef | [
"MIT"
] | null | null | null | import os
import re
import time
import telepot
from telepot.loop import MessageLoop
from pprint import pprint
from flask import Flask, request
from dotenv import load_dotenv, find_dotenv
import wit_client
import polito_client
try:
from Queue import Queue
except ImportError:
from queue import Queue
# load environment from file if exists
load_dotenv(find_dotenv())
app = Flask(__name__)
TOKEN = os.environ['TELEGRAM_TOKEN'] # put your token in heroku app as environment variable
SECRET = '/bot' + TOKEN
URL = os.environ['HEROKU_URL'] # paste the url of your application
UPDATE_QUEUE = Queue()
BOT = telepot.Bot(TOKEN)
examples = '_Quali aule libere ci sono ora?_\n_Domani alle 10 dove posso andare a studiare?_\n' + \
'\nPuoi specificare un riferimento temporale e un riferimento spaziale:\n-temporale: abbastanza generico, dovrei capirti comunque\n-spaziale: puoi dirmi di mostrarti solo alcuni risultati in una certa area del poli (ad esempio "cittadella politecnica" o altri valori che puoi vedere in un risultato non filtrato)\n' + \
'\n_Fra due ore quali aule sono libere al lingotto?_\n' + \
'Scrivi la domanda in modo naturale'
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print(content_type, chat_type, chat_id)
if content_type == 'text':
if msg['text'].startswith('/'):
# this is a command
if msg['text'] == '/start':
bot.sendMessage(
chat_id, 'Benvenuto! chiedimi informazioni sulle aule libere')
elif msg['text'] == '/help':
bot.sendMessage(
chat_id, 'Eccoti alcuni esempi di utilizzo:\n' + examples, parse_mode="Markdown")
else:
intent, entities = ent_extractor.parse(msg['text'])
# pprint(intent)
# pprint(entities)
if intent:
if intent['value'] == 'greetings':
bot.sendMessage(chat_id, 'ciao!')
elif intent['value'] == 'info':
bot.sendMessage(
chat_id, 'chiedimi informazioni sulle aule libere. Esempi:\n' + examples, parse_mode="Markdown")
elif intent['value'] == 'search_rooms':
datetime = entities.get('datetime')
area = entities.get('area')
loading_string = 'Sto cercando aule '
if datetime:
if not datetime.get('value'):
# if this is an interval
datetime = datetime['from']
date_param = datetime['value'].split('T')[0]
time_param = datetime['value'].split('T')[
1].split('+')[0]
loading_string += ' il giorno ' + date_param + ' ora ' + time_param
else:
date_param = None
time_param = None
if area:
loading_string += ' in ' + area['value']
bot.sendMessage(chat_id, loading_string)
result = data_provider.getRooms(
{'date': date_param, 'time': time_param})
# pprint(result)
all_rooms = result['aule_libere']
for curr_area, rooms in all_rooms.items():
# key is an area, value a list of rooms
if area:
delimiters = [' ', '_', ',', ', ', '-', '\n']
regex_pattern = '|'.join(
map(re.escape, delimiters))
search_filters = re.split(
regex_pattern, area['value'])
if not all(filter in curr_area.lower() for filter in search_filters):
continue
res = ''
for room in rooms:
res += ' ' + room['nome_aula']
bot.sendMessage(chat_id, curr_area + ': ' + res)
else:
bot.sendMessage(chat_id, 'intento ' + intent['value'])
else:
bot.sendMessage(chat_id, 'non ti capisco')
else:
bot.sendMessage(chat_id, 'non supporto ancora questo tipo di messaggi')
bot = telepot.Bot(os.environ['TELEGRAM_TOKEN'])
ent_extractor = wit_client.Extractor(os.environ['WIT_TOKEN'])
data_provider = polito_client.Client(os.environ['POLITO_TOKEN'])
BOT.message_loop({'chat': handle}, source=UPDATE_QUEUE) # take updates from queue
@app.route(SECRET, methods=['GET', 'POST'])
def pass_update():
UPDATE_QUEUE.put(request.data) # pass update to bot
return 'OK'
# set the telegram webhook
webhook_url = URL + SECRET
# https://github.com/nickoala/telepot/issues/165#issuecomment-256056446
if webhook_url != BOT.getWebhookInfo()['url']:
BOT.setWebhook(webhook_url)
| 38.272727 | 323 | 0.552652 |
acf75e2a519e5c791726114d2d69e61c89448cd1 | 8,719 | py | Python | mmdet/models/roi_heads/bbox_heads/final_bbox_head.py | CityU-AIM-Group/HTD | 0be9fd844118c275abc6053b3cbd5ffb589e62ee | [
"MIT"
] | 5 | 2022-02-18T16:26:29.000Z | 2022-03-07T07:25:20.000Z | build/lib/mmdet/models/roi_heads/bbox_heads/final_bbox_head.py | CityU-AIM-Group/HTD | 0be9fd844118c275abc6053b3cbd5ffb589e62ee | [
"MIT"
] | 1 | 2022-02-24T12:51:19.000Z | 2022-02-28T06:31:15.000Z | mmdet/models/roi_heads/bbox_heads/final_bbox_head.py | CityU-AIM-Group/HTD | 0be9fd844118c275abc6053b3cbd5ffb589e62ee | [
"MIT"
] | null | null | null | import torch.nn as nn
from mmcv.cnn import ConvModule, normal_init, xavier_init
import torch
from mmdet.models.builder import HEADS
from .bbox_head import BBoxHead
import torch.nn.functional as F
from mmdet.models.backbones.resnet import Bottleneck
import matplotlib.pyplot as plt
import numpy as np
# import ipdb
from mmdet.models.losses import accuracy
from mmdet.core.bbox.iou_calculators.iou2d_calculator import bbox_overlaps
from mmdet.core import multi_apply
from mmcv.runner import auto_fp16, force_fp32
from torch.optim import Adam
def see( data):
print('max: ', torch.max(data))
print('mean: ', torch.mean(data))
print('min: ', torch.min(data), '\n')
@HEADS.register_module()
class finalBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=2,
num_reg_convs=4,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=36),
*args,
**kwargs):
kwargs.setdefault('with_avg_pool', True)
super(finalBBoxHead, self).__init__(*args, **kwargs)
self.conv_kernel_size = 3
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_out_channels = 1024
self.fc_out_channels = 1024
self.relu = nn.ReLU(inplace=True)
self.gcn_in = 1024
self.gcn_out = 1024
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
self.fc_reg = nn.Linear(self.conv_out_channels , 4)
self.convs = []
self.middle_channel = 16 * 36
for i in range(self.num_reg_convs):
# in_channels = (
# self.in_channels if i == 0 else self.conv_out_channels)
# stride = 2 if i == 0 else 1
# in_channels = self.in_channels
stride = 1
padding = (self.conv_kernel_size - 1) // 2
if i == 0:
self.convs.append(
ConvModule(
self.in_channels,
self.middle_channel,
self.conv_kernel_size,
stride=stride,
padding=padding,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=False))
elif i == self.num_reg_convs-1:
self.convs.append(
ConvModule(
self.middle_channel,
1024,
self.conv_kernel_size,
stride=stride,
padding=padding,
conv_cfg=self.conv_cfg,
norm_cfg = None,
bias=False))
else:
self.convs.append(
ConvModule(
self.middle_channel,
self.middle_channel,
self.conv_kernel_size,
stride=stride,
padding=padding,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=False))
self.convs = nn.Sequential(*self.convs)
self.fcs = []
for i in range(self.num_cls_fcs):
fc_in_channels = (
self.in_channels *
self.roi_feat_area if i == 0 else self.fc_out_channels)
self.fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
self.fcs.append(self.relu)
self.fcs = nn.Sequential(*self.fcs)
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
# self.project = nn.Linear(1025, 256)
self.graph_lvl0_cls = nn.Linear(self.gcn_in, self.gcn_out)
self.graph_lvl1_cls = nn.Linear(self.gcn_in, self.gcn_out)
self.graph_lvl2_cls = nn.Linear(self.gcn_in, self.gcn_out)
self.graph_lvl3_cls = nn.Linear(self.gcn_in, self.gcn_out)
self.graph_layer_cls = [self.graph_lvl0_cls, self.graph_lvl1_cls, self.graph_lvl2_cls, self.graph_lvl3_cls]
def map_roi_levels(self, rois, num_levels):
finest_scale = 56
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def init_weights(self):
super(finalBBoxHead, self).init_weights()
normal_init(self.fc_cls, std=0.01)
normal_init(self.fc_reg, std=0.001)
for m in self.fcs.modules():
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
for m in self.graph_layer_cls:
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
"""Fuse global context feats with roi feats."""
assert roi_feats.size(0) == rois.size(0)
img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long()
fused_feats = torch.zeros_like(roi_feats)
for img_id in img_inds:
inds = (rois[:, 0] == img_id.item())
fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]
return fused_feats
def forward(self, x_cls, x_reg, feat, rois, fc_cls_0, pos_rois=None, global_feat=None):
prototype = torch.cat((fc_cls_0.weight,fc_cls_0.bias.unsqueeze(1)), 1).detach()
bs = int(torch.max(rois[...,0]))+ 1
if global_feat is not None:
x_cls_glb = self._fuse_glbctx(x_cls, global_feat, rois)
x_reg = self._fuse_glbctx(x_reg, global_feat, pos_rois)
x_cls_glb = self.fcs(x_cls_glb.flatten(1))
x_reg = self.convs(x_reg)
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), -1)
# cls head
x_cls = x_cls.flatten(1)
x_cls = self.fcs(x_cls)
sam = torch.mm(fc_cls_0(x_cls).softmax(-1),prototype)
target_lvls = self.map_roi_levels(rois, len(feat))
refined_feature_cls = x_cls.new_zeros(x_cls.size(0), self.gcn_out)
t = 0; tp = x_cls.new_zeros(1,1024)
for b in range(bs):
bs_indx = rois[...,0]==b
lvl_indx = [target_lvls == 0,target_lvls == 1, target_lvls == 2, target_lvls == 3]
for i in range(len(feat)):
bs_lvl_indx = torch.logical_and(lvl_indx[i], bs_indx)
if bs_lvl_indx.any():
#classification
sam_ = sam[bs_lvl_indx,:]
rois_ = rois[bs_lvl_indx, 1:5]
h_local_mask = bbox_overlaps(rois_, rois_).fill_diagonal_(1.)
h_local_mask[h_local_mask > 0] = 1.
D = torch.diag(torch.sum(h_local_mask, dim=-1).pow(-0.5))
A_local = torch.mm(torch.mm(D, h_local_mask), D)
h_global_mask = (1. - h_local_mask)
roi_feat = x_cls[bs_lvl_indx, :]
roi_feat_mixed = torch.mm(A_local, roi_feat)
sim = torch.mm(sam_, sam_.t())
A_global = (h_global_mask * sim).softmax(-1)
new_cls = self.relu(self.graph_layer_cls[i](torch.matmul(A_global, roi_feat_mixed)))
refined_feature_cls[bs_lvl_indx] = new_cls
else:
t += 0 * torch.sum(self.graph_layer_cls[i](tp))
# feat_cls_new = torch.cat((x_cls, refined_feature_cls), dim=1) + t
if global_feat is not None:
feat_cls_new = x_cls_glb + refined_feature_cls
else:
feat_cls_new = x_cls + refined_feature_cls
cls_score = self.fc_cls(feat_cls_new) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
| 41.717703 | 115 | 0.568184 |
acf75e634799abe1f52531dff4a80bdb1d1a4e64 | 36,348 | py | Python | tests/test_client.py | yamasite/pymilvus | 784565ae96716753947146f97a0570c0da8422ac | [
"Apache-2.0"
] | null | null | null | tests/test_client.py | yamasite/pymilvus | 784565ae96716753947146f97a0570c0da8422ac | [
"Apache-2.0"
] | null | null | null | tests/test_client.py | yamasite/pymilvus | 784565ae96716753947146f97a0570c0da8422ac | [
"Apache-2.0"
] | null | null | null | import logging
import numpy as np
import time
import random
import pytest
import sys
import ujson
sys.path.append('.')
from faker import Faker
from milvus import IndexType, MetricType, Prepare, Milvus, Status, ParamError, NotConnectError
from milvus.client.abstract import CollectionSchema, TopKQueryResult
from milvus.client.check import check_pass_param
from milvus.client.hooks import BaseSearchHook
from factorys import (
collection_schema_factory,
records_factory,
fake
)
from milvus.grpc_gen import milvus_pb2
logging.getLogger('faker').setLevel(logging.ERROR)
LOGGER = logging.getLogger(__name__)
faker = Faker(locale='en_US')
dim = 128
nb = 2000
nq = 10
class TestConnection:
def test_true_connect(self, gip):
cnn = Milvus()
cnn.connect(*gip)
assert cnn.status.OK
assert cnn.connected()
# Repeating connect
# _ = cnn.connect(*gip)
# status = cnn.connect()
# assert status == Status.CONNECT_FAILED
# @pytest.mark.skip
@pytest.mark.parametrize("url", ['tcp://145.98.234.181:9998', 'tcp://199.67.0.1:2'])
def test_false_connect(self, url):
cnn = Milvus()
with pytest.raises(NotConnectError):
cnn.connect(uri=url, timeout=1)
# def test_connected(self, gcon):
# assert gcon.connected()
# def test_non_connected(self):
# cnn = Milvus()
# # import pdb;pdb.set_trace()
# assert not cnn.connected()
def test_uri(self, ghandler, gip):
cnn = Milvus(handler=ghandler)
uri = 'tcp://{}:{}'.format(gip[0], gip[1])
cnn.connect(uri=uri)
assert cnn.status.OK()
@pytest.mark.parametrize("url",
['http://127.0.0.1:45678',
'tcp://127.0.a.1:9999',
'tcp://127.0.0.1:aaa'])
def test_uri_error(self, url):
with pytest.raises(Exception):
cnn = Milvus()
cnn.connect(uri=url)
@pytest.mark.parametrize("h", ['12234', 'aaa', '194.16834.200.200', '134.77.89.34'])
@pytest.mark.parametrize("p", ['...', 'a', '1', '800000'])
def test_host_port_error(self, h, p):
with pytest.raises(Exception):
Milvus().connect(host=h, port=p)
# def test_disconnected(self, gip):
# cnn = Milvus()
# cnn.connect(*gip)
#
# assert cnn.disconnect().OK()
# assert not cnn.connected()
#
# cnn.connect(*gip)
# assert cnn.connected()
# def test_disconnected_error(self):
# cnn = Milvus()
# with pytest.raises(NotConnectError):
# cnn.disconnect()
@pytest.mark.skip
def test_not_connect(self):
client = Milvus()
with pytest.raises(NotConnectError):
client.create_collection({})
with pytest.raises(NotConnectError):
client.has_collection("a")
with pytest.raises(NotConnectError):
client.describe_collection("a")
with pytest.raises(NotConnectError):
client.drop_collection("a")
with pytest.raises(NotConnectError):
client.create_index("a")
with pytest.raises(NotConnectError):
client.insert("a", [], None)
with pytest.raises(NotConnectError):
client.count_collection("a")
with pytest.raises(NotConnectError):
client.show_collections()
with pytest.raises(NotConnectError):
client.search("a", 1, 2, [], None)
with pytest.raises(NotConnectError):
client.search_in_files("a", [], [], 2, 1, None)
with pytest.raises(NotConnectError):
client._cmd("")
with pytest.raises(NotConnectError):
client.preload_collection("a")
with pytest.raises(NotConnectError):
client.describe_index("a")
with pytest.raises(NotConnectError):
client.drop_index("")
class TestCollection:
def test_create_collection(self, gcon):
param = collection_schema_factory()
param['collection_name'] = None
with pytest.raises(ParamError):
gcon.create_collection(param)
param = collection_schema_factory()
res = gcon.create_collection(param)
assert res.OK()
assert gcon.has_collection(param['collection_name'])
param = collection_schema_factory()
param['dimension'] = 'string'
with pytest.raises(ParamError):
res = gcon.create_collection(param)
param = '09998876565'
with pytest.raises(ParamError):
gcon.create_collection(param)
param = collection_schema_factory()
param['collection_name'] = 1234456
with pytest.raises(ParamError):
gcon.create_collection(param)
def test_create_collection_exception(self, gcon):
param = {
'collection_name': 'test_151314',
'dimension': 128,
'index_file_size': 999999
}
status = gcon.create_collection(param)
assert not status.OK()
def test_drop_collection(self, gcon, gcollection):
res = gcon.drop_collection(gcollection)
assert res.OK()
def test_false_drop_collection(self, gcon):
collection_name = 'fake_collection_name'
res = gcon.drop_collection(collection_name)
assert not res.OK()
def test_repeat_create_collection(self, gcon):
param = collection_schema_factory()
gcon.create_collection(param)
res = gcon.create_collection(param)
LOGGER.error(res)
assert not res.OK()
@pytest.mark.skip
def test_has_collection(self, gcon, gcollection):
collection_name = fake.collection_name()
status, result = gcon.has_collection(collection_name)
assert status.OK(), status.message
assert not result
result = gcon.has_collection(gcollection)
assert result
with pytest.raises(Exception):
gcon.has_collection(1111)
def test_has_collection_invalid_name(self, gcon, gcollection):
collection_name = "1234455"
status, result = gcon.has_collection(collection_name)
assert not status.OK()
class TestRecordCount:
def test_count_collection(self, gcon, gvector):
status, num = gcon.count_collection(gvector)
assert status.OK()
assert num > 0
class TestVector:
def test_insert(self, gcon, gcollection):
param = {
'collection_name': gcollection,
'records': records_factory(dim, nq)
}
res, ids = gcon.insert(**param)
assert res.OK()
assert isinstance(ids, list)
assert len(ids) == nq
@pytest.mark.skip
def test_insert_with_numpy(self, gcon, gcollection):
vectors = np.random.rand(nq, dim).astype(np.float32)
param = {
'collection_name': gcollection,
'records': vectors
}
res, ids = gcon.insert(**param)
assert res.OK()
assert isinstance(ids, list)
assert len(ids) == nq
def test_insert_with_ids(self, gcon, gcollection):
param = {
'collection_name': gcollection,
'records': records_factory(dim, nq),
'ids': [i + 1 for i in range(nq)]
}
res, ids = gcon.insert(**param)
assert res.OK()
assert isinstance(ids, list)
assert len(ids) == nq
def test_insert_with_wrong_ids(self, gcon, gcollection):
param = {
'collection_name': gcollection,
'records': records_factory(dim, nq),
'ids': [i + 1 for i in range(nq - 3)]
}
with pytest.raises(ParamError):
gcon.insert(**param)
def test_insert_with_no_right_dimension(self, gcon, gcollection):
param = {
'collection_name': gcollection,
'records': records_factory(dim + 1, nq)
}
res, ids = gcon.insert(**param)
assert not res.OK()
def test_insert_records_empty_list(self, gcon, gcollection):
param = {'collection_name': gcollection, 'records': [[]]}
with pytest.raises(Exception):
gcon.insert(**param)
def test_false_insert(self, gcon):
param = {
'collection_name': fake.collection_name(),
'records': records_factory(dim, nq)
}
res, ids = gcon.insert(**param)
assert not res.OK()
def test_insert_wrong_collection_name(self, gcon):
collection_name = "&*^%&&dvfdgd(()"
vectors = records_factory(dim, nq)
status, _ = gcon.insert(collection_name, vectors)
assert not status.OK()
# def test_add_vectors_wrong_insert_param(self, gcon, gvector):
# vectors = records_factory(dim, nq)
#
# with pytest.raises(ParamError):
# gcon.insert(gvector, vectors, insert_param="w353453")
class TestSearch:
def test_search_normal(self, gcon, gvector):
topk = random.randint(1, 10)
query_records = records_factory(dim, nq)
search_param = {
"nprobe": 10
}
param = {
'collection_name': gvector,
'query_records': query_records,
'top_k': topk,
'params': search_param
}
res, results = gcon.search(**param)
assert res.OK()
assert len(results) == nq
assert len(results[0]) == topk
assert results.shape[0] == nq
assert results.shape[1] == topk
def test_search_wrong_dim(self, gcon, gvector):
topk = random.randint(1, 10)
query_records = records_factory(dim + 1, nq)
search_param = {
"nprobe": 10
}
param = {
'collection_name': gvector,
'query_records': query_records,
'top_k': topk,
'params': search_param
}
res, results = gcon.search(**param)
assert not res.OK()
def test_search_wrong_collection_name(self, gcon, gvector):
topk = random.randint(1, 10)
query_records = records_factory(dim, nq)
search_param = {
"nprobe": 10
}
param = {
'collection_name': gvector + 'wrong',
'query_records': query_records,
'top_k': topk,
'params': search_param
}
res, _ = gcon.search(**param)
assert not res.OK()
def test_false_vector(self, gcon):
search_param = {
"nprobe": 10
}
param = {
'collection_name': fake.collection_name(),
'query_records': records_factory(dim, nq),
'top_k': 'string',
'params': search_param
}
with pytest.raises(ParamError):
gcon.search(**param)
param = {
'collection_name': fake.collection_name(),
'query_records': records_factory(dim, nq),
'top_k': 'string',
'params': search_param
}
with pytest.raises(ParamError):
gcon.search(**param)
def test_search_in_files(self, gcon, gvector):
search_param = {
"nprobe": 10
}
param = {
'collection_name': gvector,
'query_records': records_factory(dim, nq),
'file_ids': [],
'top_k': random.randint(1, 10),
'params': search_param
}
for id_ in range(5000):
param['file_ids'].clear()
param['file_ids'].append(str(id_))
sta, result = gcon.search_in_files(**param)
if sta.OK():
return
print("search in file failed")
assert False
def test_search_in_files_wrong_file_ids(self, gcon, gvector):
search_param = {
"nprobe": 10
}
param = {
'collection_name': gvector,
'query_records': records_factory(dim, nq),
'file_ids': ['3388833'],
'top_k': random.randint(1, 10),
'params': search_param
}
sta, results = gcon.search_in_files(**param)
assert not sta.OK()
class TestCollectionMeta:
def test_describe_collection(self, gcon, gcollection):
status, collection_schema = gcon.describe_collection(gcollection)
assert status.OK()
assert isinstance(collection_schema, CollectionSchema)
def test_false_decribe_collection(self, gcon):
collection_name = fake.collection_name()
res, collection_schema = gcon.describe_collection(collection_name)
assert not res.OK()
assert not collection_schema
def test_show_collections(self, gcon, gcollection):
res, collections = gcon.show_collections()
assert res.OK()
assert len(collections) == 1
def test_count_collection(self, gcon, gvector, gcollection):
res, count = gcon.count_collection(gvector)
assert res.OK()
assert count == 10000
def test_false_count_collection(self, gcon):
res, count = gcon.count_collection('fake_collection')
assert not res.OK()
def test_client_version(self, gcon):
res = gcon.client_version()
assert isinstance(res, str)
def test_server_status(self, gcon):
status, res = gcon.server_status()
assert status.OK()
class TestPrepare:
def test_collection_schema(self):
res = Prepare.table_schema(fake.collection_name(), random.randint(0, 999), 1024, MetricType.L2, {})
assert isinstance(res, milvus_pb2.TableSchema)
class TestCreateCollection:
def test_create_collection_normal(self, gcon):
param = collection_schema_factory()
status = gcon.create_collection(param)
assert status.OK()
def test_create_collection_default(self, gcon):
param = {
'collection_name': 'zilliz_test',
'dimension': 128
}
status = gcon.create_collection(param)
assert status.OK()
gcon.drop_collection('zilliz_test')
def test_create_collection_name_wrong(self, gcon):
param = collection_schema_factory()
param['collection_name'] = '.....'
status = gcon.create_collection(param)
LOGGER.error(status)
assert not status.OK()
class TestDescribeCollection:
def test_describe_collection_normal(self, gcon):
param = collection_schema_factory()
gcon.create_collection(param)
status, collection = gcon.describe_collection(param['collection_name'])
assert status.OK()
assert collection.collection_name == param['collection_name']
status, collection = gcon.describe_collection('collection_not_exists')
assert not status.OK()
class TestShowCollections:
def test_show_collections_normal(self, gcon):
status, collections = gcon.show_collections()
LOGGER.error(collections)
assert status.OK()
class TestDropCollection:
def test_drop_collection_normal(self, gcon):
param = collection_schema_factory()
s = gcon.create_collection(param)
assert s.OK()
_, collections = gcon.show_collections()
assert param['collection_name'] in collections
status = gcon.drop_collection(param['collection_name'])
_, collections = gcon.show_collections()
assert param['collection_name'] not in collections
def test_drop_collection(self, gcon, gcollection):
status = gcon.drop_collection(gcollection)
assert status.OK()
class TestHasCollection:
def test_has_collection(self, gcon):
param = collection_schema_factory()
s = gcon.create_collection(param)
assert s.OK()
status, flag = gcon.has_collection(param['collection_name'])
assert status.OK() and flag
class TestAddVectors:
def test_insert_normal(self, gcon, gcollection):
vectors = records_factory(dim, nq)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
assert len(ids) == nq
status = gcon.flush([gcollection])
assert status.OK(), status.message
status, count = gcon.count_collection(gcollection)
assert status.OK()
assert count == nq
gcon.preload_collection(gcollection)
def test_insert_numpy_array(self, gcon ,gcollection):
vectors = np.random.rand(10000, 128)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK(), status.message
def test_insert_ids(self, gcon, gcollection):
vectors = records_factory(dim, nb)
ids = [i for i in range(nb)]
status, vectors_ids = gcon.insert(gcollection, vectors, ids)
assert status.OK()
assert len(ids) == len(vectors_ids)
status = gcon.flush([gcollection])
assert status.OK(), status.message
status, count = gcon.count_collection(gcollection)
assert status.OK()
assert count == nb
class TestIndex:
@pytest.mark.skip
def test_available_index(self, gcon, gcollection):
for name, member in IndexType.__members__.items():
if member.value == 0:
continue
_index = {
'nlist': 4096
}
status = gcon.create_index(gcollection, member, _index)
assert status.OK(), "Index {} create failed: {}".format(member, status.message)
gcon.drop_index(gcollection)
def test_describe_index(self, gcon, gcollection):
vectors = records_factory(dim, nb)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
assert len(ids) == nb
status = gcon.flush([gcollection])
assert status.OK(), status.message
_index = {
'nlist': 4096
}
status = gcon.create_index(gcollection, IndexType.IVF_FLAT, _index)
assert status.OK(), status.message
status, index_schema = gcon.describe_index(gcollection)
assert status.OK()
print("\n{}\n".format(index_schema))
def test_describe_index_wrong_collection_name(self, gcon):
collection_name = "%&%&"
status, _ = gcon.describe_index(collection_name)
assert not status.OK()
def test_drop_index(self, gcon, gcollection):
vectors = records_factory(dim, nb)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
assert len(ids) == nb
status = gcon.flush([gcollection])
assert status.OK(), status.message
status, count = gcon.count_collection(gcollection)
assert status.OK()
assert count == nb
_index = {
'nlist': 16384
}
status = gcon.create_index(gcollection, IndexType.IVFLAT, _index)
assert status.OK()
status = gcon.drop_index(gcollection)
assert status.OK()
@pytest.mark.skip(reason="crud branch")
class TestSearchByID:
def test_search_by_id_normal(self, gcon, gcollection):
vectors = records_factory(dim, nq)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
status = gcon.flush([gcollection])
assert status.OK(), status.message
status, result = gcon.search_by_id(gcollection, 2, 10, ids[0])
assert status.OK()
print(result)
assert 1 == len(result)
assert 2 == len(result[0])
assert ids[0] == result[0][0].id
def test_search_by_id_with_partitions(self, gcon, gcollection):
tag = "search_by_id_partitions_tag"
status = gcon.create_partition(gcollection, tag)
assert status.OK()
vectors = records_factory(dim, nq)
status, ids = gcon.insert(gcollection, vectors, partition_tag=tag)
assert status.OK()
time.sleep(2)
status, result = gcon.search_by_id(gcollection, 2, 10, ids[0], partition_tag_array=[tag])
assert status.OK()
assert 1 == len(result)
assert 2 == len(result[0])
assert ids[0] == result[0][0].id
def test_search_by_id_with_wrong_param(self, gcon, gcollection):
with pytest.raises(ParamError):
gcon.search_by_id(gcollection, 'x', 1, 1)
with pytest.raises(ParamError):
gcon.search_by_id(gcollection, 1, '1', 1)
with pytest.raises(ParamError):
gcon.search_by_id(gcollection, 1, 1, 'aaa')
status, _ = gcon.search_by_id(gcollection, -1, 1, 1)
assert not status.OK()
status, _ = gcon.search_by_id(gcollection, 1, -1, 1)
assert not status.OK()
@pytest.mark.skip(reason="except empty result, return result with -1 id instead")
def test_search_by_id_with_exceed_id(self, gcon, gcollection):
vectors = records_factory(dim, nq)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
status, result = gcon.search_by_id(gcollection, 2, 10, ids[0] + 100)
assert status.OK()
print(result)
assert 0 == len(result)
class TestBuildIndex:
def test_build_index(self, gcon, gvector):
_D = 128
time.sleep(5)
_index = {
'nlist': 4096
}
print("Create index ... ")
status = gcon.create_index(gvector, IndexType.IVF_FLAT, _index)
assert status.OK()
def test_create_index_wrong_index(self, gcon, gvector):
_index = "23523423"
with pytest.raises(ParamError):
gcon.create_index(gvector, _index)
def test_create_index_wrong_collection_name(self, gcon, gvector):
_index = {
'nlist': 4096
}
status = gcon.create_index("*^&*^dtedge", IndexType.IVF_FLAT, _index, timeout=None)
assert not status.OK()
class TestCmd:
versions = ("0.5.3", "0.6.0", "0.7.0")
def test_client_version(self, gcon):
try:
import milvus
assert gcon.client_version() == milvus.__version__
except ImportError:
assert False, "Import error"
def test_server_version(self, gcon):
_, version = gcon.server_version()
assert version in self.versions
def test_server_status(self, gcon):
_, status = gcon.server_status()
assert status in ("OK", "ok")
def test_cmd(self, gcon):
_, info = gcon._cmd("version")
assert info in self.versions
_, info = gcon._cmd("status")
assert info in ("OK", "ok")
class TestChecking:
@pytest.mark.parametrize(
"key_, value_",
[("ids", [1, 2]), ("nprobe", 12), ("nlist", 4096), ("cmd", 'OK')]
)
def test_param_check_normal(self, key_, value_):
try:
check_pass_param(**{key_: value_})
except Exception:
assert False
@pytest.mark.parametrize(
"key_, value_",
[("ids", []), ("nprobe", "aaa"), ("nlist", "aaa"), ("cmd", 123)]
)
def test_param_check_error(self, key_, value_):
with pytest.raises(ParamError):
check_pass_param(**{key_: value_})
class TestQueryResult:
query_vectors = [[random.random() for _ in range(128)] for _ in range(200)]
def _get_response(self, gcon, gvector, topk, nprobe, nq):
search_param = {
"nprobe": nprobe
}
return gcon.search(gvector, topk, self.query_vectors[:nq], params=search_param)
def test_search_result(self, gcon, gvector):
try:
status, results = self._get_response(gcon, gvector, 2, 1, 1)
assert status.OK()
# test get_item
shape = results.shape
# test TopKQueryResult slice
rows = results[:1]
# test RowQueryResult
row = results[shape[0] - 1]
# test RowQueryResult slice
items = row[:1]
# test iter
for topk_result in results:
for item in topk_result:
print(item)
# test len
len(results)
# test print
print(results)
# test result for nq = 10, topk = 10
status, results = self._get_response(gcon, gvector, 10, 10, 10)
print(results)
except Exception:
assert False
def test_search_in_files_result(self, gcon, gvector):
try:
search_param = {
"nprobe": 1
}
for index in range(1000):
status, results = \
gcon.search_in_files(collection_name=gvector, top_k=1,
file_ids=[str(index)], query_records=self.query_vectors, params=search_param)
if status.OK():
break
# test get_item
shape = results.shape
item = results[shape[0] - 1][shape[1] - 1]
# test iter
for topk_result in results:
for item in topk_result:
print(item)
# test len
len(results)
# test print
print(results)
except Exception:
assert False
def test_empty_result(self, gcon, gcollection):
status, results = self._get_response(gcon, gcollection, 3, 3, 3)
shape = results.shape
for topk_result in results:
for item in topk_result:
print(item)
class TestPartition:
def test_create_partition_in_empty_collection(self, gcon, gcollection):
status = gcon.create_partition(collection_name=gcollection, partition_tag="1")
assert status.OK()
vectors = [[random.random() for _ in range(128)] for _ in range(100)]
status, _ = gcon.insert(gcollection, vectors, partition_tag="1")
assert status.OK()
def test_create_partition_after_insert(self, gcon, gvector):
status = gcon.create_partition(collection_name=gvector, partition_tag="1")
assert status.OK()
def test_insert_with_wrong_partition(self, gcon, gcollection):
status = gcon.create_partition(collection_name=gcollection, partition_tag="1")
assert status.OK()
vectors = [[random.random() for _ in range(128)] for _ in range(100)]
status, _ = gcon.insert(gcollection, vectors, partition_tag="2")
assert not status.OK()
def test_search_with_partition_first(self, gcon, gcollection):
status = gcon.create_partition(collection_name=gcollection, partition_tag="2")
assert status.OK()
status, partitions = gcon.show_partitions(gcollection)
assert status.OK()
vectors = [[random.random() for _ in range(128)] for _ in range(100)]
status, ids = gcon.insert(gcollection, vectors, partition_tag="2")
assert status.OK()
assert len(ids) == 100
gcon.flush([gcollection])
query_vectors = vectors[:1]
# search in global scope
search_param = {
"nprobe": 1
}
status, results = gcon.search(gcollection, 1, query_vectors, params=search_param)
assert status.OK()
assert results.shape == (1, 1)
# search in specific tags
status, results = gcon.search(gcollection, 1, query_vectors, partition_tags=["2"], params=search_param)
assert status.OK()
assert results.shape == (1, 1)
# search in non-existing tags
status, results = gcon.search(
gcollection, 1,
query_vectors,
partition_tags=["ee4tergdgdgedgdgergete5465erwtwtwtwtfdf"],
params=search_param)
assert status.OK()
print(results)
assert results.shape == (0, 0)
# @pytest.mark.skip
def test_search_with_partition_insert_first(self, gcon, gcollection):
vectors = [[random.random() for _ in range(128)] for _ in range(100)]
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
assert len(ids) == 100
# waiting for data prepared
time.sleep(5)
partition_tag = "partition_tag_" + faker.word()
status = gcon.create_partition(collection_name=gcollection, partition_tag=partition_tag)
assert status.OK()
status, partitions = gcon.show_partitions(gcollection)
assert status.OK()
query_vectors = [[random.random() for _ in range(128)] for _ in range(1)]
# search in global scope
search_param = {
"nprobe": 1
}
status, results = gcon.search(gcollection, 1, query_vectors, params=search_param)
assert status.OK()
assert results.shape == (1, 1)
# search in specific tags
status, results = gcon.search(gcollection, 1, query_vectors, partition_tags=[partition_tag], params=search_param)
assert status.OK()
print(results)
assert results.shape == (0, 0)
# search in wrong tags
status, results = gcon.search(gcollection, 1, query_vectors, partition_tags=[faker.word() + "wrong"], params=search_param)
assert status.OK(), status.message
print(results)
assert results.shape == (0, 0)
def test_drop_partition(self, gcon, gcollection):
status = gcon.create_partition(gcollection, "1")
assert status.OK()
vectors = [[random.random() for _ in range(128)] for _ in range(100)]
status, _ = gcon.insert(gcollection, vectors, partition_tag="1")
assert status.OK()
status = gcon.drop_partition(gcollection, "1")
assert status.OK(), status.message
class TestSegment:
def test_collection_info(self, gcon, gvector):
status, info = gcon.collection_info(gvector)
assert status.OK(), status.message
assert info.count == 10000
assert isinstance(info.partitions_stat, list)
par0 = info.partitions_stat[0]
assert par0.tag == "_default"
assert isinstance(par0.segments_stat, list)
print(info)
def test_collection_info_wrong_name(self, gcon):
status, _ = gcon.collection_info("124124122****")
assert not status.OK()
def test_get_segment_ids(self, gcon, gvector):
status, info = gcon.collection_info(gvector)
assert status.OK()
seg0 = info.partitions_stat[0].segments_stat[0]
status, ids = gcon.get_vector_ids(gvector, seg0.segment_name)
assert status.OK(), status.message
print(ids[:5])
def test_get_segment_invalid_ids(self, gcon):
with pytest.raises(ParamError):
gcon.get_vector_ids(123, "")
with pytest.raises(ParamError):
gcon.get_vector_ids("111", [])
def test_get_segment_non_existent_collection_segment(self, gcon, gcollection):
status, _ = gcon.get_vector_ids("ijojojononsfsfswgsw", "aaa")
assert not status.OK()
status, _ = gcon.get_vector_ids(gcollection, "aaaaaa")
assert not status.OK()
class TestGetVectorByID:
def test_get_vector_by_id(self, gcon, gcollection):
vectors = records_factory(128, 1000)
ids = [i for i in range(1000)]
status, ids_out = gcon.insert(collection_name=gcollection, records=vectors, ids=ids)
assert status.OK(), status.message
gcon.flush([gcollection])
status, vec = gcon.get_vector_by_id(gcollection, ids_out[0])
assert status.OK()
class TestDeleteByID:
def test_delete_by_id_normal(self, gcon, gcollection):
vectors = records_factory(dim, nq)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
time.sleep(2)
status = gcon.delete_by_id(gcollection, ids[0:10])
assert status.OK()
def test_delete_by_id_wrong_param(self, gcon, gcollection):
with pytest.raises(ParamError):
gcon.delete_by_id(gcollection, "aaa")
@pytest.mark.skip
def test_delete_by_id_succeed_id(self, gcon, gcollection):
vectors = records_factory(dim, nq)
status, ids = gcon.insert(gcollection, vectors)
assert status.OK()
time.sleep(2)
ids_exceed = [ids[-1] + 10]
status = gcon.delete_by_id(gcollection, ids_exceed)
assert not status.OK()
class TestFlush:
def test_flush(self, gcon):
collection_param = {
"collection_name": '',
"dimension": dim
}
collection_list = ["test_flush_1", "test_flush_2", "test_flush_3"]
vectors = records_factory(dim, nq)
for collection in collection_list:
collection_param["collection_name"] = collection
gcon.create_collection(collection_param)
gcon.insert(collection, vectors)
status = gcon.flush(collection_list)
assert status.OK()
for collection in collection_list:
gcon.drop_collection(collection)
def test_flush_with_none(self, gcon, gcollection):
collection_param = {
"collection_name": '',
"dimension": dim
}
collection_list = ["test_flush_1", "test_flush_2", "test_flush_3"]
vectors = records_factory(dim, nq)
for collection in collection_list:
collection_param["collection_name"] = collection
gcon.create_collection(collection_param)
gcon.insert(collection, vectors)
status = gcon.flush()
assert status.OK(), status.message
for collection in collection_list:
gcon.drop_collection(collection)
class TestCompact:
def test_compact_normal(self, gcon, gcollection):
vectors = [[random.random() for _ in range(128)] for _ in range(10000)]
status, ids = gcon.add_vectors(collection_name=gcollection, records=vectors)
assert status.OK()
status = gcon.compact(gcollection)
assert status.OK(), status.message
def test_compact_after_delete(self, gcon, gcollection):
vectors = [[random.random() for _ in range(128)] for _ in range(10000)]
status, ids = gcon.insert(collection_name=gcollection, records=vectors)
assert status.OK(), status.message
status = gcon.flush([gcollection])
assert status.OK(), status.message
status = gcon.delete_by_id(gcollection, ids[100:1000])
assert status, status.message
status = gcon.compact(gcollection)
assert status.OK(), status.message
def test_compact_with_empty_collection(self, gcon, gcollection):
status = gcon.compact(gcollection)
assert status.OK(), status.message
def test_compact_with_non_exist_name(self, gcon):
status = gcon.compact(collection_name="die333")
assert not status.OK()
def test_compact_with_invalid_name(self, gcon):
with pytest.raises(ParamError):
gcon.compact(collection_name=124)
class TestCollectionInfo:
def test_collection_info_normal(self, gcon, gcollection):
for _ in range(10):
records = records_factory(128, 10000)
status, _ = gcon.insert(gcollection, records)
assert status.OK()
gcon.flush([gcollection])
status, _ = gcon.collection_info(gcollection, timeout=None)
assert status.OK()
def test_collection_info_with_partitions(self, gcon, gcollection):
for i in range(5):
partition_tag = "tag_{}".format(i)
status = gcon.create_partition(gcollection, partition_tag)
assert status.OK(), status.message
for j in range(3):
records = records_factory(128, 10000)
status, _ = gcon.insert(gcollection, records, partition_tag=partition_tag)
assert status.OK(), status.message
status = gcon.flush([gcollection])
assert status.OK(), status.message
status, _ = gcon.collection_info(gcollection, timeout=None)
assert status.OK(), status.message
def test_collection_info_with_empty_collection(self, gcon, gcollection):
status, _ = gcon.collection_info(gcollection)
assert status.OK(), status.message
def test_collection_info_with_non_exist_collection(self, gcon):
status, _ = gcon.collection_info("Xiaxiede")
assert not status.OK(), status.message
| 30.264779 | 130 | 0.610405 |
acf75e8c019d47072125aaef8057daef09335b00 | 3,159 | py | Python | Scripts/l2ff/dynamic_resources_exp2.py | radical-experiments/campaign_manager | 337660cf07a97933b9b516d6612353bd3f6592a8 | [
"MIT"
] | null | null | null | Scripts/l2ff/dynamic_resources_exp2.py | radical-experiments/campaign_manager | 337660cf07a97933b9b516d6612353bd3f6592a8 | [
"MIT"
] | null | null | null | Scripts/l2ff/dynamic_resources_exp2.py | radical-experiments/campaign_manager | 337660cf07a97933b9b516d6612353bd3f6592a8 | [
"MIT"
] | null | null | null | from radical.cm.planner import L2FFPlanner
from random import gauss, uniform
import pandas as pd
import numpy as np
import sys
from time import time
def campaign_creator(num_workflows):
tmp_campaign = list()
tmp_num_oper = list()
for i in range(num_workflows):
workflow = {'description':None}
workflow['id'] = i + 1
workflow['num_oper'] = 75000
tmp_campaign.append(workflow)
tmp_num_oper.append(workflow['num_oper'])
return tmp_campaign, tmp_num_oper
def resdf_to_dict(res_df, size):
tmp_resources = list()
for i in range(size):
#point = res_df.loc[i]
tmp_res = {'id': i + 1,
'performance': 1.0}
tmp_resources.append(tmp_res)
return tmp_resources
def get_makespan(curr_plan, num_resources, workflow_inaccur, positive=False, dynamic_res=False):
'''
Calculate makespan
'''
under = False
reactive_resource_usage = [0] * num_resources
resource_usage = [0] * num_resources
expected = [0] * num_resources
tmp_idx = [0] * num_resources
for placement in curr_plan:
workflow = placement[0]
resource = placement[1]
resource_id = resource['id']
expected_finish = placement[3]
if dynamic_res:
perf = gauss(resource['performance'], resource['performance'] * 0.0644)
else:
pref = resource['performance']
if positive:
inaccur = uniform(0, workflow_inaccur)
else:
inaccur = uniform(-workflow_inaccur, workflow_inaccur)
exec_time = (workflow['num_oper'] * (1 + inaccur)) / perf
reactive_resource_usage[resource_id - 1] += exec_time
resource_usage[resource_id - 1] = max(resource_usage[resource_id - 1] + exec_time, expected_finish)
expected[resource_id - 1] = expected_finish
tmp_idx[resource_id - 1] += 1
return max(resource_usage), max(reactive_resource_usage), max(expected)
if __name__ == "__main__":
repetitions = int(sys.argv[1])
dyn_resources = np.load('../../Data/homogeneous_resources_dyn.npy')
total_resources = pd.read_csv('../../Data/heterogeneous_resources.csv')
num_resources = [256]
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
campaign, num_oper = campaign_creator(num_workflows=1024)
for res_num in num_resources:
print('Number of resources: %d' % res_num)
resources = resdf_to_dict(res_df=total_resources, size=res_num)
for _ in range(repetitions):
planner = L2FFPlanner(campaign=campaign, resources=resources, num_oper=num_oper, sid='test1')
tic = time()
plan = planner.plan()
toc = time()
makespan, reactive, expected = get_makespan(plan, res_num, 0, dynamic_res=True)
results.loc[len(results)] = [res_num, 'L2FF', plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
del planner
results.to_csv('../../Data/l2ff/DynHomoResources_StHomoCampaignsL2FF2.csv', index=False)
| 35.897727 | 141 | 0.648306 |
acf75f01a379bfda3d4193f014a175f7c5632d76 | 1,161 | py | Python | aztk/version.py | lachiemurray/aztk | 8d00a2c444313e77b6b0662f8287fcd9fd67898c | [
"MIT"
] | null | null | null | aztk/version.py | lachiemurray/aztk | 8d00a2c444313e77b6b0662f8287fcd9fd67898c | [
"MIT"
] | null | null | null | aztk/version.py | lachiemurray/aztk | 8d00a2c444313e77b6b0662f8287fcd9fd67898c | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__version__ = '0.7.0b9'
| 44.653846 | 77 | 0.774332 |
acf75f7cad669812fd756b3f165a43a461b350d7 | 487 | py | Python | rldb/db/paper__dqn2013/algo__hneat_pixel/__init__.py | seungjaeryanlee/sotarl | 8c471c4666d6210c68f3cb468e439a2b168c785d | [
"MIT"
] | 45 | 2019-05-13T17:39:33.000Z | 2022-03-07T23:44:13.000Z | rldb/db/paper__dqn2013/algo__hneat_pixel/__init__.py | seungjaeryanlee/sotarl | 8c471c4666d6210c68f3cb468e439a2b168c785d | [
"MIT"
] | 2 | 2019-03-29T01:41:59.000Z | 2019-07-02T02:48:31.000Z | rldb/db/paper__dqn2013/algo__hneat_pixel/__init__.py | seungjaeryanlee/sotarl | 8c471c4666d6210c68f3cb468e439a2b168c785d | [
"MIT"
] | 2 | 2020-04-07T20:57:30.000Z | 2020-07-08T12:55:15.000Z | """
HyperNEAT Pixel scores from DQN2013 paper.
7 entries
------------------------------------------------------------------------
7 unique entries
"""
from .entries import entries
# Specify ALGORITHM
algo = {
# ALGORITHM
"algo-title": "HyperNEAT Pixel",
"algo-nickname": "HNeat Pixel",
"algo-source-title": "A Neuroevolution Approach to General Atari Game Playing",
}
# Populate entries
entries = [{**entry, **algo} for entry in entries]
assert len(entries) == 7
| 20.291667 | 83 | 0.583162 |
acf7600ad421b7477632dd47b85ac8ee61e01e2b | 18,213 | py | Python | ax/utils/testing/backend_simulator.py | trsvchn/Ax | 0b430641c6b33920757dd09ae4318ea487fb4136 | [
"MIT"
] | 1,803 | 2019-05-01T16:04:15.000Z | 2022-03-31T16:01:29.000Z | ax/utils/testing/backend_simulator.py | trsvchn/Ax | 0b430641c6b33920757dd09ae4318ea487fb4136 | [
"MIT"
] | 810 | 2019-05-01T07:17:47.000Z | 2022-03-31T23:58:46.000Z | ax/utils/testing/backend_simulator.py | trsvchn/Ax | 0b430641c6b33920757dd09ae4318ea487fb4136 | [
"MIT"
] | 220 | 2019-05-01T05:37:22.000Z | 2022-03-29T04:30:45.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
import time
from dataclasses import dataclass
from typing import Dict, List, Optional
from ax.core.base_trial import TrialStatus
from ax.utils.common.logger import get_logger
logger = get_logger(__name__)
@dataclass
class SimTrial:
"""Container for the simulation tasks.
Attributes:
trial_index: The index of the trial (should match Ax trial index).
sim_runtime: The runtime of the trial (sampled at creation).
sim_start_time: When the trial started running (or exits queued state).
sim_queued_time: When the trial was initially queued.
sim_completed_time: When the trial was marked as completed. Currently,
this is used by an early-stopper via ``stop_trial``.
"""
# The (Ax) trial index
trial_index: int
# The simulation runtime in seconds
sim_runtime: float
# the start time in seconds
sim_start_time: Optional[float] = None
# the queued time in seconds
sim_queued_time: Optional[float] = None
# the completed time (used for early stopping)
sim_completed_time: Optional[float] = None
@dataclass
class SimStatus:
"""Container for status of the simulation.
Attributes:
queued: List of indices of queued trials.
running: List of indices of running trials.
failed: List of indices of failed trials.
time_remaining: List of sim time remaining for running trials.
completed: List of indicies of completed trials.
"""
queued: List[int]
running: List[int]
failed: List[int]
time_remaining: List[float]
completed: List[int]
@dataclass
class BackendSimulatorOptions:
"""Settings for the BackendSimulator.
Args:
max_concurrency: The maximum number of trials that can be run
in parallel.
time_scaling: The factor to scale down the runtime of the tasks by.
If ``runtime`` is the actual runtime of a trial, the simulation
time will be ``runtime / time_scaling``.
failure_rate: The rate at which the trials are failing. For now, trials
fail independently with at coin flip based on that rate.
internal_clock: The initial state of the internal clock. If `None`,
the simulator uses ``time.time()`` as the clock.
use_update_as_start_time: Whether the start time of a new trial should be logged
as the current time (at time of update) or end time of previous trial.
This makes sense when using the internal clock and the BackendSimulator
is simulated forward by an external process (such as Scheduler).
"""
max_concurrency: int = 1
time_scaling: float = 1.0
failure_rate: float = 0.0
internal_clock: Optional[float] = None
use_update_as_start_time: bool = False
@dataclass
class BackendSimulatorState:
"""State of the BackendSimulator.
Args:
options: The BackendSimulatorOptions associated with this simulator.
verbose_logging: Whether the simulator is using verbose logging.
queued: Currently queued trials.
running: Currently running trials.
failed: Currently failed trials.
completed: Currently completed trials.
"""
options: BackendSimulatorOptions
verbose_logging: bool
queued: List[Dict[str, Optional[float]]]
running: List[Dict[str, Optional[float]]]
failed: List[Dict[str, Optional[float]]]
completed: List[Dict[str, Optional[float]]]
class BackendSimulator:
"""Simulator for a backend deployment with concurrent dispatch and a queue."""
def __init__(
self,
options: Optional[BackendSimulatorOptions] = None,
queued: Optional[List[SimTrial]] = None,
running: Optional[List[SimTrial]] = None,
failed: Optional[List[SimTrial]] = None,
completed: Optional[List[SimTrial]] = None,
verbose_logging: bool = True,
) -> None:
"""A simulator for a concurrent dispatch with a queue.
Args:
options: A ``BackendSimulatorOptions`` object with various settings
of the backend simulator.
queued: A list of SimTrial objects representing the queued trials
(only used for testing particular initialization cases)
running: A list of SimTrial objects representing the running trials
(only used for testing particular initialization cases)
failed: A list of SimTrial objects representing the failed trials
(only used for testing particular initialization cases)
completed: A list of SimTrial objects representing the completed trials
(only used for testing particular initialization cases)
verbose_logging: If False, sets the logging level to WARNING.
"""
if not verbose_logging:
logger.setLevel(logging.WARNING) # pragma: no cover
if options is None:
options = BackendSimulatorOptions()
self.max_concurrency = options.max_concurrency
self.time_scaling = options.time_scaling
self.failure_rate = options.failure_rate
self.use_update_as_start_time = options.use_update_as_start_time
self._queued: List[SimTrial] = queued or []
self._running: List[SimTrial] = running or []
self._failed: List[SimTrial] = failed or []
self._completed: List[SimTrial] = completed or []
self._internal_clock = options.internal_clock
self._verbose_logging = verbose_logging
self._init_state = self.state()
self._create_index_to_trial_map()
@property
def num_queued(self) -> int:
"""The number of queued trials (to run as soon as capacity is available)."""
return len(self._queued)
@property
def num_running(self) -> int:
"""The number of currently running trials."""
return len(self._running)
@property
def num_failed(self) -> int:
"""The number of failed trials."""
return len(self._failed)
@property
def num_completed(self) -> int:
"""The number of completed trials."""
return len(self._completed)
@property
def use_internal_clock(self) -> bool:
"""Whether or not we are using the internal clock."""
return self._internal_clock is not None
@property
def time(self) -> float:
"""The current time."""
return self._internal_clock if self.use_internal_clock else time.time()
@property
def all_trials(self) -> List[SimTrial]:
"""All trials on the simulator."""
return self._queued + self._running + self._completed + self._failed
def update(self) -> None:
"""Update the state of the simulator."""
if self.use_internal_clock:
self._internal_clock += 1
self._update(self.time)
state = self.state()
logger.info(
"\n-----------\n"
f"Updated backend simulator state (time = {self.time}):\n"
f"** Queued:\n{format(state.queued)}\n"
f"** Running:\n{format(state.running)}\n"
f"** Failed:\n{format(state.failed)}\n"
f"** Completed:\n{format(state.completed)}\n"
f"-----------\n"
)
def reset(self) -> None:
"""Reset the simulator."""
self.max_concurrency = self._init_state.options.max_concurrency
self.time_scaling = self._init_state.options.time_scaling
self._internal_clock = self._init_state.options.internal_clock
self._queued = [SimTrial(**args) for args in self._init_state.queued]
self._running = [SimTrial(**args) for args in self._init_state.running]
self._failed = [SimTrial(**args) for args in self._init_state.failed]
self._completed = [SimTrial(**args) for args in self._init_state.completed]
self._create_index_to_trial_map()
def state(self) -> BackendSimulatorState:
"""Return a ``BackendSimulatorState`` containing the state of the simulator."""
options = BackendSimulatorOptions(
max_concurrency=self.max_concurrency,
time_scaling=self.time_scaling,
failure_rate=self.failure_rate,
internal_clock=self._internal_clock,
use_update_as_start_time=self.use_update_as_start_time,
)
return BackendSimulatorState(
options=options,
verbose_logging=self._verbose_logging,
queued=[q.__dict__.copy() for q in self._queued],
running=[r.__dict__.copy() for r in self._running],
failed=[r.__dict__.copy() for r in self._failed],
completed=[c.__dict__.copy() for c in self._completed],
)
@classmethod
def from_state(cls, state: BackendSimulatorState):
"""Construct a simulator from a state.
Args:
state: A ``BackendSimulatorState`` to set the simulator to.
Returns:
A ``BackendSimulator`` with the desired state.
"""
trial_types = {
"queued": state.queued,
"running": state.running,
"failed": state.failed,
"completed": state.completed,
}
trial_kwargs = {
key: [SimTrial(**kwargs) for kwargs in trial_types[key]] # pyre-ignore [6]
for key in ("queued", "running", "failed", "completed")
}
return cls(
options=state.options, verbose_logging=state.verbose_logging, **trial_kwargs
)
def run_trial(self, trial_index: int, runtime: float) -> None:
"""Run a simulated trial.
Args:
trial_index: The index of the trial (usually the Ax trial index)
runtime: The runtime of the simulation. Typically sampled from the
runtime model of a simulation model.
Internally, the runtime is scaled by the `time_scaling` factor, so that
the simulation can run arbitrarily faster than the underlying evaluation.
"""
# scale runtime to simulation
sim_runtime = runtime / self.time_scaling
# flip a coin to see if the trial fails (for now fail instantly)
# TODO: Allow failure behavior based on a survival rate
if self.failure_rate > 0:
if random.random() < self.failure_rate:
self._failed.append(
SimTrial(
trial_index=trial_index,
sim_runtime=sim_runtime,
sim_start_time=self.time,
)
)
return
if self.num_running < self.max_concurrency:
# note that though these are running for simulation purposes,
# the trial status does not yet get updated (this is also how it
# works in the real world, this requires updating the trial status manually)
curr_time = self.time
new_trial = SimTrial(
trial_index=trial_index,
sim_runtime=sim_runtime,
sim_start_time=curr_time,
sim_queued_time=curr_time,
)
self.new_trial(trial=new_trial, status=TrialStatus.RUNNING)
else:
new_trial = SimTrial(
trial_index=trial_index,
sim_runtime=sim_runtime,
sim_queued_time=self.time,
)
self.new_trial(trial=new_trial, status=TrialStatus.STAGED)
def new_trial(self, trial: SimTrial, status: TrialStatus) -> None:
"""Register a trial into the simulator.
Args:
trial: A new trial to add.
status: The status of the new trial, either STAGED (add to ``self._queued``)
or RUNNING (add to ``self._running``).
"""
if status == TrialStatus.STAGED:
self._queued.append(trial)
elif status == TrialStatus.RUNNING:
self._running.append(trial)
else:
raise ValueError("New trials must be either staged or running.")
self._index_to_trial_map[trial.trial_index] = trial
def stop_trial(self, trial_index: int) -> None:
"""Stop a simulated trial by setting the completed time to the current time.
Args:
trial_index: The index of the trial to stop.
"""
trial_status = self.lookup_trial_index_status(trial_index)
if trial_status is not TrialStatus.RUNNING:
logger.info(
f"Trial {trial_index} is not currently running (has status "
f"{trial_status}) and cannot be stopped."
)
else:
trial = self._index_to_trial_map[trial_index]
trial.sim_completed_time = self.time
logger.info(
f"Trial {trial_index} stopped at time {trial.sim_completed_time}."
)
def status(self) -> SimStatus:
"""Return the internal status of the simulator.
Returns:
A ``SimStatus`` object representing the current simulator state.
"""
now = self.time
return SimStatus(
queued=[t.trial_index for t in self._queued],
running=[t.trial_index for t in self._running],
failed=[t.trial_index for t in self._failed],
time_remaining=[
# pyre-fixme[58]: `+` is not supported for operand types
# `Optional[float]` and `float`.
t.sim_start_time + t.sim_runtime - now
for t in self._running
],
completed=[t.trial_index for t in self._completed],
)
def lookup_trial_index_status(self, trial_index: int) -> Optional[TrialStatus]:
"""Lookup the trial status of a ``trial_index``.
Args:
trial_index: The index of the trial to check.
Returns:
A ``TrialStatus``.
"""
sim_status = self.status()
if trial_index in sim_status.queued:
return TrialStatus.STAGED
elif trial_index in sim_status.running:
return TrialStatus.RUNNING
elif trial_index in sim_status.completed:
return TrialStatus.COMPLETED
elif trial_index in sim_status.failed:
return TrialStatus.FAILED
return None
def get_sim_trial_by_index(self, trial_index: int) -> Optional[SimTrial]:
"""Get a ``SimTrial`` by ``trial_index``.
Args:
trial_index: The index of the trial to return.
Returns:
A ``SimTrial`` with the index ``trial_index`` or None if not found.
"""
return self._index_to_trial_map.get(trial_index)
def _update_completed(self, timestamp: float) -> List[SimTrial]:
"""Look through running trials and see if any trials have completed
since the last check. Such trials could have completed naturally (in
this case, ``sim_completed_time`` is None) or have been given a artificial
completion time (``sim_completed_time`` is not None) via early stopping.
Args:
timestamp: The current timestamp.
"""
completed_since_last = []
new_running = []
for trial in self._running:
# pyre-fixme[58]: `+` is not supported for operand types
# `Optional[float]` and `float`.
if timestamp >= trial.sim_start_time + trial.sim_runtime:
completed_since_last.append(trial)
trial.sim_completed_time = (
trial.sim_start_time + trial.sim_runtime # pyre-ignore[58]
)
elif (
trial.sim_completed_time is not None
and timestamp >= trial.sim_completed_time
):
completed_since_last.append(trial) # was early stopped
else:
new_running.append(trial)
self._running = new_running
self._completed.extend(completed_since_last)
return completed_since_last
def _update(self, timestamp: float) -> None:
"""Check if trials have completed (or stopped) and update the simulator.
Args:
timestamp: The current timestamp.
"""
completed_since_last = self._update_completed(timestamp)
# if no trial has finished since the last call we're done
if len(completed_since_last) == 0:
return
# if at least one trial has finished, we need to graduate queued trials to
# running trials. Since all we need to keep track of is the start_time, we can
# do this retroactively.
# TODO: Improve performance / make less ad hoc by using a priority queue
for c in completed_since_last:
if self.num_queued > 0:
new_running_trial = self._queued.pop(0)
sim_start_time = (
# pyre-fixme[58]: `+` is not supported for operand types
# `Optional[float]` and `float`.
c.sim_start_time + c.sim_runtime
if not self.use_update_as_start_time
else self.time
)
new_running_trial.sim_start_time = sim_start_time
self._running.append(new_running_trial)
# since these graduated trials could both have started and finished in between
# the simulation updates, we need to re-run the update with the new state
self._update(timestamp)
def _create_index_to_trial_map(self) -> None:
"""Create the index to trial map, which is useful for getting
the ``SimTrial`` objects, as in ``get_sim_trial_by_index``."""
self._index_to_trial_map = {t.trial_index: t for t in self.all_trials}
def format(trial_list: List[Dict[str, Optional[float]]]) -> str:
"""Helper function for formatting a list."""
trial_list_str = [str(i) for i in trial_list]
return "\n".join(trial_list_str)
| 38.751064 | 88 | 0.624279 |
acf760788e21874d8f18f048469846a096268e0e | 14,266 | py | Python | Course 4/Week 2/KerasTutorial/Keras+-+Tutorial+-+Happy+House+v2.py | raja17021998/deep-learning.ai-Coursera-Course-Andrew-Ng | 3321a1b7767edea75b5e7ea43d106fb86968599b | [
"MIT"
] | 77 | 2017-09-19T19:11:47.000Z | 2022-01-23T17:20:24.000Z | Course 4/Week 2/KerasTutorial/Keras+-+Tutorial+-+Happy+House+v2.py | WJULYW/Deep-LearningAI | de914fc247785f2e6c5a7ee56c1b02b76f60d1c7 | [
"MIT"
] | null | null | null | Course 4/Week 2/KerasTutorial/Keras+-+Tutorial+-+Happy+House+v2.py | WJULYW/Deep-LearningAI | de914fc247785f2e6c5a7ee56c1b02b76f60d1c7 | [
"MIT"
] | 61 | 2017-09-29T01:43:52.000Z | 2022-03-28T16:01:06.000Z |
# coding: utf-8
# # Keras tutorial - the Happy House
#
# Welcome to the first assignment of week 2. In this assignment, you will:
# 1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
# 2. See how you can in a couple of hours build a deep learning algorithm.
#
# Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
#
# In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
# In[1]:
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
get_ipython().magic('matplotlib inline')
# **Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
# ## 1 - The Happy House
#
# For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
#
# <img src="images/happy-house.jpg" style="width:350px;height:270px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
#
#
# As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
#
# You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
#
# <img src="images/house-members.png" style="width:550px;height:250px;">
#
# Run the following code to normalize the dataset and learn about its shapes.
# In[2]:
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# **Details of the "Happy" dataset**:
# - Images are of shape (64,64,3)
# - Training: 600 pictures
# - Test: 150 pictures
#
# It is now time to solve the "Happy" Challenge.
# ## 2 - Building a model in Keras
#
# Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
#
# Here is an example of a model in Keras:
#
# ```python
# def model(input_shape):
# # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
# X_input = Input(input_shape)
#
# # Zero-Padding: pads the border of X_input with zeroes
# X = ZeroPadding2D((3, 3))(X_input)
#
# # CONV -> BN -> RELU Block applied to X
# X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
#
# # MAXPOOL
# X = MaxPooling2D((2, 2), name='max_pool')(X)
#
# # FLATTEN X (means convert it to a vector) + FULLYCONNECTED
# X = Flatten()(X)
# X = Dense(1, activation='sigmoid', name='fc')(X)
#
# # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
# model = Model(inputs = X_input, outputs = X, name='HappyModel')
#
# return model
# ```
#
# Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
#
# **Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
#
# **Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
# In[16]:
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
### END CODE HERE ###
# You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
# 1. Create the model by calling the function above
# 2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
# 3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
# 4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
#
# If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
#
# **Exercise**: Implement step 1, i.e. create the model.
# In[17]:
### START CODE HERE ### (1 line)
happyModel = HappyModel(X_train.shape[1:])
### END CODE HERE ###
# **Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
# In[19]:
### START CODE HERE ### (1 line)
happyModel.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics= ['accuracy'])
### END CODE HERE ###
# **Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
# In[20]:
### START CODE HERE ### (1 line)
happyModel.fit(X_train, Y_train, epochs = 50, batch_size = 50)
### END CODE HERE ###
# Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
#
# **Exercise**: Implement step 4, i.e. test/evaluate the model.
# In[21]:
### START CODE HERE ### (1 line)
preds = happyModel.evaluate(X_test, Y_test, batch_size = 30)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
# If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
#
# To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
#
# If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
#
# - Try using blocks of CONV->BATCHNORM->RELU such as:
# ```python
# X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
# ```
# until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
# - You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
# - Change your optimizer. We find Adam works well.
# - If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
# - Run on more epochs, until you see the train accuracy plateauing.
#
# Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
#
# **Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
#
# ## 3 - Conclusion
#
# Congratulations, you have solved the Happy House challenge!
#
# Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
# <font color='blue'>
# **What we would like you to remember from this assignment:**
# - Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
# - Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
# ## 4 - Test with your own image (Optional)
#
# Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Write your image's name in the following code
# 4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
#
# The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
# In[22]:
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
# ## 5 - Other useful functions in Keras (Optional)
#
# Two other basic features of Keras that you'll find useful are:
# - `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
# - `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
#
# Run the following code.
# In[23]:
happyModel.summary()
# In[24]:
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
| 48.359322 | 726 | 0.71709 |
acf76149891552597e2762fa5dc3f626db14273f | 4,086 | py | Python | test/test_grad.py | rosinality/depthwise-conv-pytorch | ce0d7097d7548cfe1393415864c0d3fc24e02f25 | [
"BSL-1.0",
"Apache-2.0"
] | 45 | 2020-02-20T18:17:51.000Z | 2021-12-22T05:39:32.000Z | test/test_grad.py | rosinality/depthwise-conv-pytorch | ce0d7097d7548cfe1393415864c0d3fc24e02f25 | [
"BSL-1.0",
"Apache-2.0"
] | 3 | 2020-07-20T08:34:28.000Z | 2021-12-19T12:59:27.000Z | test/test_grad.py | rosinality/depthwise-conv-pytorch | ce0d7097d7548cfe1393415864c0d3fc24e02f25 | [
"BSL-1.0",
"Apache-2.0"
] | 2 | 2020-03-27T03:28:12.000Z | 2021-12-27T02:50:37.000Z | import torch
from torch.autograd import gradcheck
from torch_dwconv import depthwise_conv2d
BATCH_SIZE = 8
def make_tensor(N, C, H, W, kernel_size, input_grad=False, kernel_grad=False):
x = torch.randn(N, C, H, W).double().to('cuda')
k = torch.randn(C, 1, kernel_size, kernel_size).double().to('cuda')
x.requires_grad = input_grad
k.requires_grad = kernel_grad
return x, k
def check_input_grad(N, C, H, W, kernel_size, stride, padding):
x, k = make_tensor(N, C, H, W, kernel_size, input_grad=True)
result = gradcheck(
lambda x_i: depthwise_conv2d(x_i, k, stride=stride, padding=padding).sum(),
x,
raise_exception=False,
)
return result
def check_kernel_grad(N, C, H, W, kernel_size, stride, padding):
x, k = make_tensor(N, C, H, W, kernel_size, kernel_grad=True)
result = gradcheck(
lambda k_i: depthwise_conv2d(x, k_i, stride=stride, padding=padding).sum(),
k,
raise_exception=False,
)
return result
def test_input_grad_large_size():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 3, 1, 1)
def test_input_grad_large_size_large_kernel():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 5, 1, 2)
def test_input_grad_large_size_stride():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 3, 2, 1)
def test_input_grad_large_size_large_kernel_stride():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 5, 2, 2)
def test_input_grad_large_size_no_pad():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 3, 1, 0)
def test_input_grad_large_size_large_kernel_no_pad():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 5, 1, 0)
def test_input_grad_large_size_stride_no_pad():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 3, 2, 0)
def test_input_grad_large_size_large_kernel_stride_no_pad():
assert check_input_grad(BATCH_SIZE, 8, 34, 34, 5, 2, 0)
def test_input_grad_large_size_odd():
assert check_input_grad(BATCH_SIZE, 8, 33, 35, 3, 1, 1)
def test_input_grad_large_size_odd_large_kernel():
assert check_input_grad(BATCH_SIZE, 8, 33, 35, 5, 1, 2)
def test_input_grad_large_size_odd_stride():
assert check_input_grad(BATCH_SIZE, 8, 33, 35, 3, 2, 1)
def test_input_grad_large_size_odd_large_kernel_stride():
assert check_input_grad(BATCH_SIZE, 8, 33, 35, 5, 2, 2)
def test_kernel_grad_large_size():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 3, 1, 1)
def test_kernel_grad_large_size_large_kernel():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 5, 1, 2)
def test_kernel_grad_large_size_stride():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 3, 2, 1)
def test_kernel_grad_large_size_large_kernel_stride():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 5, 2, 2)
def test_kernel_grad_large_size_no_pad():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 3, 1, 0)
def test_kernel_grad_large_size_large_kernel_no_pad():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 5, 1, 0)
def test_kernel_grad_large_size_stride_no_pad():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 3, 2, 0)
def test_kernel_grad_large_size_large_kernel_stride_no_pad():
assert check_kernel_grad(BATCH_SIZE, 8, 34, 34, 5, 2, 0)
def test_kernel_grad_large_size_odd():
assert check_kernel_grad(BATCH_SIZE, 8, 33, 35, 3, 1, 1)
def test_kernel_grad_large_size_odd_large_kernel():
assert check_kernel_grad(BATCH_SIZE, 8, 33, 35, 5, 1, 2)
def test_kernel_grad_large_size_odd_stride():
assert check_kernel_grad(BATCH_SIZE, 8, 33, 35, 3, 2, 1)
def test_kernel_grad_large_size_odd_large_kernel_stride():
assert check_kernel_grad(BATCH_SIZE, 8, 33, 35, 5, 2, 2)
def test_input_grad_small_size():
assert check_input_grad(BATCH_SIZE, 8, 16, 16, 3, 1, 1)
def test_input_grad_small_size_large_kernel():
assert check_input_grad(BATCH_SIZE, 8, 16, 16, 5, 1, 2)
def test_kernel_grad_small_size():
assert check_kernel_grad(BATCH_SIZE, 8, 16, 16, 3, 1, 1)
def test_kernel_grad_small_size_large_kernel():
assert check_kernel_grad(BATCH_SIZE, 8, 16, 16, 5, 1, 2)
| 26.532468 | 83 | 0.731522 |
acf763006ab77b24a46bcabe5ce51d30da5cc643 | 1,544 | py | Python | openstack/identity/v3/group.py | gthiemonge/openstacksdk | e1c6f233eb6b07f488fe4acded72312d8d265b33 | [
"Apache-2.0"
] | null | null | null | openstack/identity/v3/group.py | gthiemonge/openstacksdk | e1c6f233eb6b07f488fe4acded72312d8d265b33 | [
"Apache-2.0"
] | null | null | null | openstack/identity/v3/group.py | gthiemonge/openstacksdk | e1c6f233eb6b07f488fe4acded72312d8d265b33 | [
"Apache-2.0"
] | 1 | 2021-03-12T14:28:28.000Z | 2021-03-12T14:28:28.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource
class Group(resource.Resource):
resource_key = 'group'
resources_key = 'groups'
base_path = '/groups'
service = identity_service.IdentityService()
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
commit_method = 'PATCH'
_query_mapping = resource.QueryParameters(
'domain_id', 'name',
)
# Properties
#: The description of this group. *Type: string*
description = resource.Body('description')
#: References the domain ID which owns the group; if a domain ID is not
#: specified by the client, the Identity service implementation will
#: default it to the domain ID to which the client's token is scoped.
#: *Type: string*
domain_id = resource.Body('domain_id')
#: Unique group name, within the owning domain. *Type: string*
name = resource.Body('name')
| 34.311111 | 75 | 0.715674 |
acf763889014b9d491a209bef36089e2868cd193 | 375 | py | Python | pyggi/lib/config.py | nak/pyggi | 139a72d72c1a3bb17005e0c9c64e06ba4e2cd329 | [
"BSD-3-Clause"
] | 1 | 2017-12-22T06:58:47.000Z | 2017-12-22T06:58:47.000Z | pyggi/lib/config.py | nak/pyggi | 139a72d72c1a3bb17005e0c9c64e06ba4e2cd329 | [
"BSD-3-Clause"
] | null | null | null | pyggi/lib/config.py | nak/pyggi | 139a72d72c1a3bb17005e0c9c64e06ba4e2cd329 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2011 by Tobias Heinzen
:license: BSD, see LICENSE for more details
"""
import ConfigParser
import os
config = ConfigParser.SafeConfigParser()
def load_config():
path = os.path.abspath(os.path.dirname(__file__))
config_file = os.path.abspath(os.path.join(path, '../../config.cfg'))
config.read(config_file)
| 20.833333 | 73 | 0.674667 |
acf763d2b6b419d96e94c2c7e6a7b06c2fbe7128 | 185 | py | Python | man_sim/urls.py | terry-brett/SimulatorX | ee2f15655c77daab551936c908e84a284e7c425a | [
"MIT"
] | null | null | null | man_sim/urls.py | terry-brett/SimulatorX | ee2f15655c77daab551936c908e84a284e7c425a | [
"MIT"
] | 1 | 2022-03-13T13:57:03.000Z | 2022-03-13T13:57:03.000Z | man_sim/urls.py | terry-brett/SimulatorX | ee2f15655c77daab551936c908e84a284e7c425a | [
"MIT"
] | 2 | 2022-01-04T12:27:11.000Z | 2022-03-17T10:22:09.000Z | from django.urls import include, path
from .views import *
app_name = "man_sim"
urlpatterns = [
path("", getIndex, name="index"),
path("/result", getResult, name="result"),
]
| 18.5 | 46 | 0.659459 |
acf7650f9966f6d0e7da80858989b6dc1ca0bfc0 | 14,161 | py | Python | src/sage/schemes/elliptic_curves/ell_torsion.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/schemes/elliptic_curves/ell_torsion.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/schemes/elliptic_curves/ell_torsion.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Torsion subgroups of elliptic curves over number fields (including `\QQ`)
AUTHORS:
- Nick Alexander: original implementation over `\QQ`
- Chris Wuthrich: original implementation over number fields
- John Cremona: rewrote p-primary part to use division
polynomials, added some features, unified Number Field and `\QQ` code.
"""
# ****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.rings.all import RationalField
import sage.groups.additive_abelian.additive_abelian_wrapper as groups
from sage.structure.richcmp import richcmp_method, richcmp
@richcmp_method
class EllipticCurveTorsionSubgroup(groups.AdditiveAbelianGroupWrapper):
r"""
The torsion subgroup of an elliptic curve over a number field.
EXAMPLES:
Examples over `\QQ`::
sage: E = EllipticCurve([-4, 0]); E
Elliptic Curve defined by y^2 = x^3 - 4*x over Rational Field
sage: G = E.torsion_subgroup(); G
Torsion Subgroup isomorphic to Z/2 + Z/2 associated to the Elliptic Curve defined by y^2 = x^3 - 4*x over Rational Field
sage: G.order()
4
sage: G.gen(0)
(-2 : 0 : 1)
sage: G.gen(1)
(0 : 0 : 1)
sage: G.ngens()
2
::
sage: E = EllipticCurve([17, -120, -60, 0, 0]); E
Elliptic Curve defined by y^2 + 17*x*y - 60*y = x^3 - 120*x^2 over Rational Field
sage: G = E.torsion_subgroup(); G
Torsion Subgroup isomorphic to Trivial group associated to the Elliptic Curve defined by y^2 + 17*x*y - 60*y = x^3 - 120*x^2 over Rational Field
sage: G.gens()
()
sage: e = EllipticCurve([0, 33076156654533652066609946884,0,\
347897536144342179642120321790729023127716119338758604800,\
1141128154369274295519023032806804247788154621049857648870032370285851781352816640000])
sage: e.torsion_order()
16
Constructing points from the torsion subgroup::
sage: E = EllipticCurve('14a1')
sage: T = E.torsion_subgroup()
sage: [E(t) for t in T]
[(0 : 1 : 0),
(9 : 23 : 1),
(2 : 2 : 1),
(1 : -1 : 1),
(2 : -5 : 1),
(9 : -33 : 1)]
An example where the torsion subgroup is not cyclic::
sage: E = EllipticCurve([0,0,0,-49,0])
sage: T = E.torsion_subgroup()
sage: [E(t) for t in T]
[(0 : 1 : 0), (0 : 0 : 1), (-7 : 0 : 1), (7 : 0 : 1)]
An example where the torsion subgroup is trivial::
sage: E = EllipticCurve('37a1')
sage: T = E.torsion_subgroup()
sage: T
Torsion Subgroup isomorphic to Trivial group associated to the Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
sage: [E(t) for t in T]
[(0 : 1 : 0)]
Examples over other Number Fields::
sage: E = EllipticCurve('11a1')
sage: K.<i> = NumberField(x^2+1)
sage: EK = E.change_ring(K)
sage: from sage.schemes.elliptic_curves.ell_torsion import EllipticCurveTorsionSubgroup
sage: EllipticCurveTorsionSubgroup(EK)
Torsion Subgroup isomorphic to Z/5 associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in i with defining polynomial x^2 + 1
sage: E = EllipticCurve('11a1')
sage: K.<i> = NumberField(x^2+1)
sage: EK = E.change_ring(K)
sage: T = EK.torsion_subgroup()
sage: T.ngens()
1
sage: T.gen(0)
(5 : -6 : 1)
Note: this class is normally constructed indirectly as follows::
sage: T = EK.torsion_subgroup(); T
Torsion Subgroup isomorphic to Z/5 associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in i with defining polynomial x^2 + 1
sage: type(T)
<class 'sage.schemes.elliptic_curves.ell_torsion.EllipticCurveTorsionSubgroup_with_category'>
AUTHORS:
- Nick Alexander: initial implementation over `\QQ`.
- Chris Wuthrich: initial implementation over number fields.
- John Cremona: additional features and unification.
"""
def __init__(self, E):
r"""
Initialization function for EllipticCurveTorsionSubgroup class
INPUT:
- ``E`` -- An elliptic curve defined over a number field (including `\Q`)
EXAMPLES::
sage: from sage.schemes.elliptic_curves.ell_torsion import EllipticCurveTorsionSubgroup
sage: E = EllipticCurve('11a1')
sage: K.<i> = NumberField(x^2+1)
sage: EK = E.change_ring(K)
sage: EllipticCurveTorsionSubgroup(EK)
Torsion Subgroup isomorphic to Z/5 associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in i with defining polynomial x^2 + 1
Note: this class is normally constructed indirectly as follows::
sage: T = EK.torsion_subgroup(); T
Torsion Subgroup isomorphic to Z/5 associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in i with defining polynomial x^2 + 1
sage: type(T)
<class 'sage.schemes.elliptic_curves.ell_torsion.EllipticCurveTorsionSubgroup_with_category'>
sage: T == loads(dumps(T)) # known bug, see http://trac.sagemath.org/sage_trac/ticket/11599#comment:7
True
"""
self.__E = E
self.__K = E.base_field()
if self.__K is RationalField():
G = self.__E.pari_curve().elltors()
structure = G[1].sage()
gens = G[2].sage()
self.__torsion_gens = [self.__E(P) for P in gens]
groups.AdditiveAbelianGroupWrapper.__init__(self, self.__E(0).parent(), self.__torsion_gens, structure)
return
T1 = E(0) # these will be the two generators
T2 = E(0)
k1 = 1 # with their order
k2 = 1
# find a multiple of the order of the torsion group
bound = torsion_bound(E, number_of_places=20)
# now do prime by prime
for p, e in bound.factor():
ptor = E._p_primary_torsion_basis(p, e)
if ptor:
T1 += ptor[0][0]
k1 *= p**(ptor[0][1])
if len(ptor) > 1:
T2 += ptor[1][0]
k2 *= p**(ptor[1][1])
if k1 == 1:
structure = []
gens = []
elif k2 == 1:
structure = [k1]
gens = [T1]
else:
structure = [k1, k2]
gens = [T1, T2]
#self.__torsion_gens = gens
self._structure = structure
groups.AdditiveAbelianGroupWrapper.__init__(self, T1.parent(),
[T1, T2], structure)
def _repr_(self):
"""
String representation of an instance of the EllipticCurveTorsionSubgroup class.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: K.<i> = NumberField(x^2+1)
sage: EK = E.change_ring(K)
sage: T = EK.torsion_subgroup(); T._repr_()
'Torsion Subgroup isomorphic to Z/5 associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in i with defining polynomial x^2 + 1'
"""
return "Torsion Subgroup isomorphic to %s associated to the %s" % (self.short_name(), self.__E)
def __richcmp__(self, other, op):
r"""
Compare two torsion groups by simply comparing the elliptic curves.
EXAMPLES::
sage: E = EllipticCurve('37a1')
sage: tor = E.torsion_subgroup()
sage: tor == tor
True
"""
if type(self) != type(other):
return NotImplemented
return richcmp(self.__E, other.__E, op)
def curve(self):
"""
Return the curve of this torsion subgroup.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: K.<i> = NumberField(x^2+1)
sage: EK = E.change_ring(K)
sage: T = EK.torsion_subgroup()
sage: T.curve() is EK
True
"""
return self.__E
@cached_method
def points(self):
"""
Return a list of all the points in this torsion subgroup.
The list is cached.
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1)
sage: E = EllipticCurve(K,[0,0,0,1,0])
sage: tor = E.torsion_subgroup()
sage: tor.points()
[(0 : 1 : 0), (0 : 0 : 1), (-i : 0 : 1), (i : 0 : 1)]
"""
return [x.element() for x in self]
def torsion_bound(E, number_of_places=20):
r"""
Return an upper bound on the order of the torsion subgroup.
INPUT:
- ``E`` -- an elliptic curve over `\QQ` or a number field
- ``number_of_places`` (positive integer, default = 20) -- the
number of places that will be used to find the bound
OUTPUT:
(integer) An upper bound on the torsion order.
ALGORITHM:
An upper bound on the order of the torsion group of the elliptic
curve is obtained by counting points modulo several primes of good
reduction. Note that the upper bound returned by this function is
a multiple of the order of the torsion group, and in general will
be greater than the order.
To avoid nontrivial arithmetic in the base field (in particular,
to avoid having to compute the maximal order) we only use prime
`P` above rational primes `p` which do not divide the discriminant
of the equation order.
EXAMPLES::
sage: CDB = CremonaDatabase()
sage: from sage.schemes.elliptic_curves.ell_torsion import torsion_bound
sage: [torsion_bound(E) for E in CDB.iter([14])]
[6, 6, 6, 6, 6, 6]
sage: [E.torsion_order() for E in CDB.iter([14])]
[6, 6, 2, 6, 2, 6]
An example over a relative number field (see :trac:`16011`)::
sage: R.<x> = QQ[]
sage: F.<a> = QuadraticField(5)
sage: K.<b> = F.extension(x^2-3)
sage: E = EllipticCurve(K,[0,0,0,b,1])
sage: E.torsion_subgroup().order()
1
An example of a base-change curve from `\QQ` to a degree 16 field::
sage: from sage.schemes.elliptic_curves.ell_torsion import torsion_bound
sage: f = PolynomialRing(QQ,'x')([5643417737593488384,0,
....: -11114515801179776,0,-455989850911004,0,379781901872,
....: 0,14339154953,0,-1564048,0,-194542,0,-32,0,1])
sage: K = NumberField(f,'a')
sage: E = EllipticCurve(K, [1, -1, 1, 824579, 245512517])
sage: torsion_bound(E)
16
sage: E.torsion_subgroup().invariants()
(4, 4)
"""
from sage.rings.integer_ring import ZZ
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.schemes.elliptic_curves.constructor import EllipticCurve
K = E.base_field()
# Special case K = QQ
if K is RationalField():
bound = ZZ.zero()
k = 0
p = ZZ(2) # so we start with 3
E = E.integral_model()
disc_E = E.discriminant()
while k < number_of_places:
p = p.next_prime()
if p.divides(disc_E):
continue
k += 1
Fp = GF(p)
new_bound = E.reduction(p).cardinality()
bound = bound.gcd(new_bound)
if bound == 1:
return bound
return bound
# In case K is a relative extension we absolutize:
absK = K.absolute_field('a_')
f = absK.defining_polynomial()
abs_map = absK.structure()[1]
# Ensure f is monic and in ZZ[x]
f = f.monic()
den = f.denominator()
if den != 1:
x = f.parent().gen()
n = f.degree()
f = den**n * f(x/den)
disc_f = f.discriminant()
d = K.absolute_degree()
# Now f is monic in ZZ[x] of degree d and defines the extension K = Q(a)
# Make sure that we have a model for E with coefficients in ZZ[a]
E = E.integral_model()
disc_E = E.discriminant().norm()
ainvs = [abs_map(c) for c in E.a_invariants()]
bound = ZZ.zero()
k = 0
p = ZZ(2) # so we start with 3
try: # special case, useful for base-changes from QQ
ainvs = [ZZ(ai) for ai in ainvs]
while k < number_of_places:
p = p.next_prime()
if p.divides(disc_E) or p.divides(disc_f):
continue
k += 1
for fi, ei in f.factor_mod(p):
di = fi.degree()
Fp = GF(p)
new_bound = EllipticCurve(Fp, ainvs).cardinality(extension_degree=di)
bound = bound.gcd(new_bound)
if bound == 1:
return bound
return bound
except (ValueError, TypeError):
pass
# General case
while k < number_of_places:
p = p.next_prime()
if p.divides(disc_E) or p.divides(disc_f):
continue
k += 1
for fi, ei in f.factor_mod(p):
di = fi.degree()
Fq = GF((p, di))
ai = fi.roots(Fq, multiplicities=False)[0]
def red(c):
return Fq.sum(Fq(c[j]) * ai**j for j in range(d))
new_bound = EllipticCurve([red(c) for c in ainvs]).cardinality()
bound = bound.gcd(new_bound)
if bound == 1:
return bound
return bound
| 34.122892 | 191 | 0.572276 |
acf7657d60aa95af608bdfd50a837d669b2d459c | 559 | py | Python | face_api/migrations/0004_auto_20210525_1708.py | glen-s-abraham/face-detection-api | ce671a9750065c0fc82d0dd668299738f1c07508 | [
"MIT"
] | null | null | null | face_api/migrations/0004_auto_20210525_1708.py | glen-s-abraham/face-detection-api | ce671a9750065c0fc82d0dd668299738f1c07508 | [
"MIT"
] | null | null | null | face_api/migrations/0004_auto_20210525_1708.py | glen-s-abraham/face-detection-api | ce671a9750065c0fc82d0dd668299738f1c07508 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-25 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('face_api', '0003_auto_20210525_1707'),
]
operations = [
migrations.AlterField(
model_name='imageuploads',
name='email',
field=models.EmailField(max_length=100),
),
migrations.AlterField(
model_name='knowledgedatabase',
name='email',
field=models.EmailField(max_length=100),
),
]
| 23.291667 | 52 | 0.588551 |
acf7661065a8142bd0768088d1966b397b73b854 | 1,062 | py | Python | CMSIS/DSP/SDFTools/examples/example7/sched.py | shosakam/CMSIS_5 | 18205c6c2b68e7e96f40dc941c47efdbdd9f7d01 | [
"Apache-2.0"
] | 1 | 2022-03-12T13:50:01.000Z | 2022-03-12T13:50:01.000Z | CMSIS/DSP/SDFTools/examples/example7/sched.py | shosakam/CMSIS_5 | 18205c6c2b68e7e96f40dc941c47efdbdd9f7d01 | [
"Apache-2.0"
] | null | null | null | CMSIS/DSP/SDFTools/examples/example7/sched.py | shosakam/CMSIS_5 | 18205c6c2b68e7e96f40dc941c47efdbdd9f7d01 | [
"Apache-2.0"
] | null | null | null | #
# Generated with CMSIS-DSP SDF Scripts.
# The generated code is not covered by CMSIS-DSP license.
#
# The support classes and code is covered by CMSIS-DSP license.
#
import sys
import numpy as np
import cmsisdsp as dsp
from cmsisdsp.sdf.nodes.simu import *
from appnodes import *
from custom import *
DEBUGSCHED=False
#
# FIFO buffers
#
FIFOSIZE0=128
buf0=np.zeros(FIFOSIZE0,dtype=np.int16)
FIFOSIZE1=128
buf1=np.zeros(FIFOSIZE1,dtype=np.int16)
def scheduler():
sdfError=0
nbSchedule=0
#
# Create FIFOs objects
#
fifo0=FIFO(FIFOSIZE0,buf0)
fifo1=FIFO(FIFOSIZE1,buf1)
#
# Create node objects
#
proc = Processing(128,128,fifo0,fifo1)
sink = VHTSink(128,fifo1,0)
src = VHTSource(128,fifo0,0)
while(sdfError==0):
nbSchedule = nbSchedule + 1
sdfError = src.run()
if sdfError < 0:
break
sdfError = proc.run()
if sdfError < 0:
break
sdfError = sink.run()
if sdfError < 0:
break
return(nbSchedule,sdfError)
| 17.129032 | 63 | 0.640301 |
acf7671567a6d3fe7be306a3a8d743cad4ae2d2e | 188 | py | Python | UAT_scripts/time_script1.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | 1 | 2020-01-15T11:04:16.000Z | 2020-01-15T11:04:16.000Z | UAT_scripts/time_script1.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | 2 | 2021-03-31T19:36:19.000Z | 2021-06-10T22:29:26.000Z | UAT_scripts/time_script1.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | null | null | null | #!/usr/bin/python3
import time
value1 = 0
while True:
value1 = value1 + 1
print("Hello")
time.sleep(value1)
if value1 == 3:
print("End of the loop")
break
| 17.090909 | 32 | 0.574468 |
acf767473822a86a96d58014958d228946296b0c | 943 | py | Python | setup.py | jolespin/fastq_preprocessor | 630face4c434c7829eede6c4a728b82c96ca942a | [
"BSD-3-Clause"
] | 1 | 2022-02-17T16:30:46.000Z | 2022-02-17T16:30:46.000Z | setup.py | jolespin/fastq_preprocessor | 630face4c434c7829eede6c4a728b82c96ca942a | [
"BSD-3-Clause"
] | null | null | null | setup.py | jolespin/fastq_preprocessor | 630face4c434c7829eede6c4a728b82c96ca942a | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
# Version
version = None
with open("./fastq_preprocessor/__init__.py", "r") as f:
for line in f.readlines():
line = line.strip()
if line.startswith("__version__"):
version = line.split("=")[-1].strip().strip('"')
assert version is not None, "Check version in fastq_preprocessor/__init__.py"
setup(name='fastq_preprocessor',
version=version,
description='Fast short read fastq preprocessor with optional contamination removal',
url='https://github.com/jolespin/fastq_preprocessor',
author='Josh L. Espinoza',
author_email='jespinoz@jcvi.org',
license='BSD-3',
packages=["fastq_preprocessor"],
install_requires=[
"genopype >=2020.3.27",
"soothsayer_utils >=2022.1.19",
"pandas >=0.24",
"numpy",
"tqdm",
"scandir",
],
include_package_data=True,
scripts=["bin/fastq_preprocessor"],
)
| 29.46875 | 91 | 0.640509 |
acf767ea9f0e8daabc68f31015f6f6528cb832d7 | 1,209 | py | Python | connection.py | Gusty-wind/PythonChats | 2b4ab3ff5bd1898adeecf4d989ce33c27b8ec809 | [
"Apache-2.0"
] | 1 | 2021-09-17T01:13:29.000Z | 2021-09-17T01:13:29.000Z | connection.py | yaolei/PythonChats | 3e3f090657fb6372c01524c8c7fa180c3ab614eb | [
"Apache-2.0"
] | 3 | 2021-09-17T07:06:33.000Z | 2021-09-17T08:21:54.000Z | connection.py | yaolei/PythonChats | 3e3f090657fb6372c01524c8c7fa180c3ab614eb | [
"Apache-2.0"
] | null | null | null | import psycopg2
import logging
POSTGRES_DB = "postgres"
POSTGRES_USER = "admin"
POSTGRES_PASSWORD = "12345678"
POSTGRES_SERVER = "127.0.0.1"
POSTGRES_PORT = "5432"
logger = logging.getLogger(__name__)
async def connect_to_db() -> None:
try:
logger.warn("---😏😏 DB CONNECTION START ---")
db = psycopg2.connect(
database = POSTGRES_DB,
user = POSTGRES_USER,
password = POSTGRES_PASSWORD,
host = POSTGRES_SERVER,
port = POSTGRES_PORT)
logger.warn("---😘😘 **** DB connect success **** ---")
except Exception as e:
logger.warn("--- DB CONNECTION ERROR ---")
logger.warn(e)
logger.warn("--- DB CONNECTION ERROR ---")
return db
async def close_db_connection(conn, cursor) -> None:
try:
logger.warn("--- 😚😚DB CLOSE START ---")
cursor.close()
conn.close()
logger.warn("--- 😚😚DB CLOSE SUCCESS ---")
except Exception as e:
logger.warn("--- DB COLOSE ERROR ---")
logger.warn(e)
logger.warn("--- DB COLOSE ERROR ---")
| 31 | 65 | 0.519438 |
acf768ef1922ef70a0c14dc0866670553066263a | 5,690 | py | Python | pydrill/client/result.py | dzamo/pydrill | e2e540960f56ad1017855c65b8c660ab2bb90fb8 | [
"MIT"
] | null | null | null | pydrill/client/result.py | dzamo/pydrill | e2e540960f56ad1017855c65b8c660ab2bb90fb8 | [
"MIT"
] | null | null | null | pydrill/client/result.py | dzamo/pydrill | e2e540960f56ad1017855c65b8c660ab2bb90fb8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pydrill.exceptions import ImproperlyConfigured
import logging
import re
logger = logging.getLogger('pydrill')
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
DRILL_PANDAS_TYPE_MAP = {
'BIGINT': 'Int64',
'BINARY': 'object',
'BIT': 'bool',
'DATE': 'datetime64[ns]',
'FLOAT4': 'float32',
'FLOAT8': 'float64',
'INT': 'Int32',
'INTERVALDAY': 'string' if pd.__version__ >= '1' else 'object',
'INTERVALYEAR': 'string' if pd.__version__ >= '1' else 'object',
'SMALLINT': 'Int32',
'TIME': 'timedelta64[ns]',
'TIMESTAMP': 'datetime64[ns]',
'VARDECIMAL': 'object',
'VARCHAR' : 'string' if pd.__version__ >= '1' else 'object'
} if PANDAS_AVAILABLE else None
class Result(object):
def __init__(self, response, data, duration, *args, **kwargs):
self.response = response
self.duration = duration
self.data = data
class ResultQuery(Result):
"""
Class responsible for maintaining information returned from Drill.
It is iterable.
"""
def __init__(self, response, data, duration, *args, **kwargs):
super(ResultQuery, self).__init__(response, data, duration, *args, **kwargs)
self.rows = data.get('rows', [])
self.columns = data.get('columns', [])
self.metadata = data.get('metadata', [])
def __iter__(self):
for row in self.rows:
yield row
def to_dataframe(self, dtype=None, convert_dtypes=False) -> pd.DataFrame:
if not PANDAS_AVAILABLE:
raise ImproperlyConfigured("Please install pandas to use ResultQuery.to_dataframe().")
if len(self.rows) == 0:
return pd.DataFrame(columns=self.columns)
if dtype:
# the user has specified a single dtype for the entire dataframe
return pd.DataFrame.from_dict(self.rows, dtype=dtype)[self.columns]
df = pd.DataFrame.from_dict(self.rows)[self.columns]
if not convert_dtypes: return df
# The columns in df all have a dtype of object because Drill's HTTP API
# always quotes the values in the JSON it returns, thereby providing
# DataFrame.from_dict(...) with a dict of strings. We now use the
# metadata returned by Drill to correct this
for i in range(len(self.columns)):
col_name = self.columns[i]
# strip any precision information that might be in the metdata e.g. VARCHAR(10)
col_drill_type = re.sub(r'\(.*\)', '', self.metadata[i])
if col_drill_type not in DRILL_PANDAS_TYPE_MAP:
logger.warn('No known mapping of Drill column {} of type {} to a Pandas dtype'.format(col_name, m))
else:
col_dtype = DRILL_PANDAS_TYPE_MAP[col_drill_type]
logger.debug('Mapping column {} of Drill type {} to dtype {}'.format(col_name, col_drill_type, col_dtype))
# Pandas < 1.0.0 cannot handle null ints so we sometimes cannot cast to an int dtype
can_cast = True
if col_drill_type == 'BIT':
df[col_name] = df[col_name] == 'true'
elif col_drill_type == 'TIME': # col_name in ['TIME', 'INTERVAL']: # parsing of ISO-8601 intervals appears broken as of Pandas 1.0.3
df[col_name] = pd.to_timedelta(df[col_name])
elif col_drill_type in ['FLOAT4', 'FLOAT8']:
# coerce errors when float parsing to handle the case when Drill returns 'NaN'
df[col_name] = pd.to_numeric(df[col_name], errors='coerce')
elif col_drill_type in ['BIGINT', 'INT', 'SMALLINT']:
df[col_name] = pd.to_numeric(df[col_name])
if pd.__version__ < '1' and df[col_name].isnull().values.any():
logger.warn('Column {} of Drill type {} contains nulls so cannot be converted to an integer dtype in Pandas < 1.0.0'.format(col_name, col_drill_type))
can_cast = False
if can_cast:
df[col_name] = df[col_name].astype(col_dtype)
return df
class Drillbit(object):
def __init__(self, id, address, status, *args, **kwargs):
self.id = id
self.address = address
self.status = status
class Stats(Result):
def __init__(self, response, data, duration, *args, **kwargs):
super(Stats, self).__init__(response, data, duration, *args, **kwargs)
self.drillbits = []
for metric in data:
value, name = metric['value'], metric['name']
if name == 'Number of Drill Bits':
self.drillbits_number = value
elif name.startswith('Bit #'):
address, status = value.split()
self.drillbits.append(Drillbit(id=name.split('#')[-1], address=address, status=status))
elif name == 'Data Port Address':
self.data_port_address = value
elif name == 'User Port Address':
self.user_port_address = value
elif name == 'Control Port Address':
self.control_port_address = value
elif name == 'Maximum Direct Memory':
self.max_direct_memory = value
class Profiles(Result):
def __init__(self, response, data, duration, *args, **kwargs):
super(Profiles, self).__init__(response, data, duration, *args, **kwargs)
self.running_queries = data.get('runningQueries')
self.finished_queries = data.get('finishedQueries')
| 39.79021 | 174 | 0.596485 |
acf7697a5a2cbcf0276d0229de24317453239b37 | 415 | py | Python | backend/delicate_moon_30350/wsgi.py | crowdbotics-apps/delicate-moon-30350 | 9e03df3cff4e7d2929fdd7ffdcd5e2631e80bb30 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/delicate_moon_30350/wsgi.py | crowdbotics-apps/delicate-moon-30350 | 9e03df3cff4e7d2929fdd7ffdcd5e2631e80bb30 | [
"FTL",
"AML",
"RSA-MD"
] | 28 | 2021-09-06T11:48:54.000Z | 2022-01-16T15:20:34.000Z | backend/delicate_moon_30350/wsgi.py | crowdbotics-apps/delicate-moon-30350 | 9e03df3cff4e7d2929fdd7ffdcd5e2631e80bb30 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for delicate_moon_30350 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'delicate_moon_30350.settings')
application = get_wsgi_application()
| 24.411765 | 79 | 0.79759 |
acf76a168252ca98add2e784108e3d6ae0952768 | 2,500 | py | Python | poly_dis/dl_modules/chord_decoder.py | ZZWaang/icm-deep-music-generation | bcb9de482584d6a0c834316f8d404a8b5b9522e2 | [
"MIT"
] | 1 | 2021-11-20T07:30:57.000Z | 2021-11-20T07:30:57.000Z | poly_dis/dl_modules/chord_decoder.py | ZZWaang/icm-deep-music-generation | bcb9de482584d6a0c834316f8d404a8b5b9522e2 | [
"MIT"
] | null | null | null | poly_dis/dl_modules/chord_decoder.py | ZZWaang/icm-deep-music-generation | bcb9de482584d6a0c834316f8d404a8b5b9522e2 | [
"MIT"
] | null | null | null | import torch
from torch import nn
import random
class ChordDecoder(nn.Module):
def __init__(self, input_dim=36, z_input_dim=256,
hidden_dim=512, z_dim=256, num_step=32):
super(ChordDecoder, self).__init__()
self.z2dec_hid = nn.Linear(z_dim, hidden_dim)
self.z2dec_in = nn.Linear(z_dim, z_input_dim)
self.gru = nn.GRU(input_dim + z_input_dim, hidden_dim,
batch_first=True,
bidirectional=False)
self.init_input = nn.Parameter(torch.rand(36))
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.z_dim = z_dim
self.root_out = nn.Linear(hidden_dim, 12)
self.chroma_out = nn.Linear(hidden_dim, 24)
self.bass_out = nn.Linear(hidden_dim, 12)
self.num_step = num_step
def forward(self, z_chd, inference, tfr, c=None):
# z_chd: (B, z_chd_size)
bs = z_chd.size(0)
z_chd_hid = self.z2dec_hid(z_chd).unsqueeze(0)
z_chd_in = self.z2dec_in(z_chd).unsqueeze(1)
if inference:
tfr = 0.
token = self.init_input.repeat(bs, 1).unsqueeze(1)
recon_root = []
recon_chroma = []
recon_bass = []
for t in range(int(self.num_step / 4)):
chd, z_chd_hid = \
self.gru(torch.cat([token, z_chd_in], dim=-1), z_chd_hid)
r_root = self.root_out(chd) # (bs, 1, 12)
r_chroma = self.chroma_out(chd).view(bs, 1, 12, 2).contiguous()
r_bass = self.bass_out(chd) # (bs, 1, 12)
recon_root.append(r_root)
recon_chroma.append(r_chroma)
recon_bass.append(r_bass)
t_root = torch.zeros(bs, 1, 12).to(z_chd.device).float()
t_root[torch.arange(0, bs), 0, r_root.max(-1)[-1]] = 1.
t_chroma = r_chroma.max(-1)[-1].float()
t_bass = torch.zeros(bs, 1, 12).to(z_chd.device).float()
t_bass[torch.arange(0, bs), 0, r_bass.max(-1)[-1]] = 1.
token = torch.cat([t_root, t_chroma, t_bass], dim=-1)
if t == self.num_step - 1:
break
teacher_force = random.random() < tfr
if teacher_force and not inference:
token = c[:, t].unsqueeze(1)
recon_root = torch.cat(recon_root, dim=1)
recon_chroma = torch.cat(recon_chroma, dim=1)
recon_bass = torch.cat(recon_bass, dim=1)
return recon_root, recon_chroma, recon_bass
| 40.322581 | 75 | 0.574 |
acf76be84af86addceed517a455735431de2e087 | 2,141 | py | Python | pcapkit/protocols/data/application/httpv1.py | JarryShaw/jspcap | 9d4c980598e3c5e0af864044976f91b9b96e2e3e | [
"BSD-3-Clause"
] | 3 | 2018-01-21T15:22:21.000Z | 2018-06-22T01:27:59.000Z | pcapkit/protocols/data/application/httpv1.py | JarryShaw/jspcap | 9d4c980598e3c5e0af864044976f91b9b96e2e3e | [
"BSD-3-Clause"
] | null | null | null | pcapkit/protocols/data/application/httpv1.py | JarryShaw/jspcap | 9d4c980598e3c5e0af864044976f91b9b96e2e3e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""data model for HTTP/1.* protocol"""
from typing import TYPE_CHECKING
from pcapkit.corekit.infoclass import Info
from pcapkit.protocols.data.application.http import HTTP as DataType_HTTP
if TYPE_CHECKING:
from typing import Optional
from typing_extensions import Literal
from pcapkit.corekit.multidict import OrderedMultiDict
__all__ = [
'HTTP',
'Header',
'RequestHeader', 'ResponseHeader',
]
class HTTP(DataType_HTTP):
"""Data model for HTTP/1.* protocol."""
#: HTTP receipt.
receipt: 'Header'
#: HTTP header.
header: 'OrderedMultiDict[str, str]'
#: HTTP body.
body: 'Optional[bytes]'
if TYPE_CHECKING:
def __init__(self, receipt: 'Header', header: 'OrderedMultiDict[str, str]', body: 'Optional[bytes]') -> None: ... # pylint: disable=unused-argument,super-init-not-called,multiple-statements,line-too-long,redefined-builtin
class Header(Info):
"""Data model for HTTP/1.* header line."""
#: Receipt type.
type: 'Literal["request", "response"]'
class RequestHeader(Header):
"""Data model for HTTP/1.* request header line."""
#: HTTP request header line.
type: 'Literal["request"]'
#: HTTP method.
method: 'str'
#: HTTP request URI.
uri: 'str'
#: HTTP request version.
version: 'str'
if TYPE_CHECKING:
def __init__(self, type: 'Literal["request"]', method: 'str', uri: 'str', version: 'str') -> 'None': ... # pylint: disable=unused-argument,super-init-not-called,multiple-statements,line-too-long,redefined-builtin
class ResponseHeader(Header):
"""Data model for HTTP/1.* response header line."""
#: HTTP response header line.
type: 'Literal["response"]'
#: HTTP response version.
version: 'str'
#: HTTP response status.
status: 'int'
#: HTTP response status message.
message: 'str'
if TYPE_CHECKING:
def __init__(self, type: 'Literal["response"]', version: 'str', status: 'int', message: 'str') -> 'None': ... # pylint: disable=unused-argument,super-init-not-called,multiple-statements,line-too-long,redefined-builtin
| 28.546667 | 230 | 0.663709 |
acf76c5622f0bf91b77bd89a2ac15160b6d96914 | 3,283 | py | Python | src/python/tests/core/fuzzing/coverage_uploader_test.py | ABHIsHEk122811/clusterfuzz | 7cac0ee869787e6f547a4b3dac18196c60f03383 | [
"Apache-2.0"
] | 4 | 2019-11-26T01:50:51.000Z | 2021-08-14T20:32:43.000Z | src/python/tests/core/fuzzing/coverage_uploader_test.py | ABHIsHEk122811/clusterfuzz | 7cac0ee869787e6f547a4b3dac18196c60f03383 | [
"Apache-2.0"
] | 22 | 2019-12-26T17:02:34.000Z | 2022-03-21T22:16:52.000Z | src/python/tests/core/fuzzing/coverage_uploader_test.py | ABHIsHEk122811/clusterfuzz | 7cac0ee869787e6f547a4b3dac18196c60f03383 | [
"Apache-2.0"
] | 2 | 2019-02-09T09:09:20.000Z | 2019-02-15T05:25:13.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the coverage_uploader module."""
import datetime
import os
from pyfakefs import fake_filesystem_unittest
from fuzzing import coverage_uploader
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
def _mock_config_get(_, param):
"""Handle test configuration options."""
if param == 'coverage.fuzzer-testcases.bucket':
return 'test-coverage-testcases'
return None
class FakeGSUtilRunner(object):
"""Fake gsutil runner for testing."""
rsync_calls = []
def rsync(self, source, destination):
FakeGSUtilRunner.rsync_calls.append((source, destination))
class UploadTestsToCloudStorageTest(fake_filesystem_unittest.TestCase):
"""Tests for upload_tests_to_cloud_storage."""
def setUp(self):
test_helpers.patch_environ(self)
test_helpers.patch(self, [
'base.utils.utcnow',
'config.local_config.ProjectConfig.get',
'datastore.locks.acquire_lock',
'datastore.locks.release_lock',
'google_cloud_utils.gsutil.GSUtilRunner',
'google_cloud_utils.storage.list_blobs',
'google_cloud_utils.storage.read_data',
'google_cloud_utils.storage.write_data',
])
test_utils.set_up_pyfakefs(self)
self.mock.write_data.return_value = True
self.mock.utcnow.side_effect = lambda: datetime.datetime(2018, 11, 1, 0, 0)
FakeGSUtilRunner.calls = []
self.mock.GSUtilRunner.side_effect = FakeGSUtilRunner
self.mock.get.side_effect = _mock_config_get
os.environ['BOT_NAME'] = 'test-bot'
os.environ['BOT_TMPDIR'] = '/tmp'
os.environ['FAIL_RETRIES'] = '1'
os.environ['TRADITIONAL_FUZZER_COVERAGE'] = 'True'
def test_tests_created_in_correct_bucket(self):
"""Ensure that we invoke gsutil correctly to store tests."""
files = ['/a/b/file1.txt', '/a/file2.txt', '/b/c/file3.txt']
coverage_uploader.upload_testcases_if_needed('test_fuzzer', files, '/a/')
self.mock.write_data.assert_called_with(
'b/file1.txt\nfile2.txt',
'gs://test-coverage-testcases/2018-11-01/test_fuzzer/'
'5b680a295e1f3a81160a0bd71ca2abbcb8d19521/file_list.txt')
self.assertEquals(
FakeGSUtilRunner.rsync_calls,
[('/a/', 'gs://test-coverage-testcases/2018-11-01/test_fuzzer/'
'5b680a295e1f3a81160a0bd71ca2abbcb8d19521')])
def test_data_directory_ignored(self):
"""Ensure that we do nothing if the output directory is empty."""
files = ['/data/b/file1.txt', '/data/file2.txt', '/data/c/file3.txt']
coverage_uploader.upload_testcases_if_needed('test_fuzzer', files,
'/testcases/')
self.assertEquals(FakeGSUtilRunner.rsync_calls, [])
| 35.301075 | 79 | 0.716113 |
acf76c681e6248b0091d99490c1123c56c9878b3 | 2,191 | py | Python | AgodaCrawler.py | alex856236/Crawler | 5171616660000b4771ee17923819837c258b5653 | [
"MIT"
] | null | null | null | AgodaCrawler.py | alex856236/Crawler | 5171616660000b4771ee17923819837c258b5653 | [
"MIT"
] | null | null | null | AgodaCrawler.py | alex856236/Crawler | 5171616660000b4771ee17923819837c258b5653 | [
"MIT"
] | null | null | null | import time
import urllib
from bs4 import BeautifulSoup as btfs
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
BASIC_URL = 'https://www.agoda.com/'
URL = 'https://www.agoda.com/zh-tw/pages/agoda/default/DestinationSearchResult.aspx'
driver = webdriver.Chrome()
parameter = {'city': 4951, # 台北4951
# 'area':80303, #板橋
'checkIn': '2019-02-20', # 入住時間
'checkOut': '2019-02-22', # 離開時間
'rooms' : 1, # 客房數
'adults': 2, # 大人人數
'children': 0, # 小孩人數
}
def get_tag(parent, tag):
try:
element = parent.find_element_by_tag_name(tag)
except:
return None
return element
def get_class(parent, class_name):
try:
element = parent.find_element_by_class_name(class_name)
except:
return None
return element
def get_css(parent, css_selector):
try:
element = parent.find_element_by_css_selector(css_selector)
except:
return None
return element
correct_URL = URL + '?' + '&'.join(['%s=%s' % (key, value) for (key, value) in parameter.items()])
driver.get(correct_URL)
hotel_blocks = driver.find_elements_by_class_name('PropertyCardItem')
for block in hotel_blocks:
driver.execute_script('arguments[0].scrollIntoView();', block)
hotel_url = get_tag(block, 'a')
hotel_url = hotel_url.get_attribute('href') if hotel_url else hotel_url
hotel_name = get_class(block, 'hotel-name')
hotel_name = hotel_name.text if hotel_name else hotel_name
hotel_star = get_css(block, 'i[data-selenium="hotel-star-rating"]')
hotel_star = hotel_star.get_attribute('title') if hotel_star else hotel_star
hotel_location = get_class(block, 'areacity-name-text')
hotel_location = hotel_location.text if hotel_location else hotel_location
hotel_score = get_class(block, 'ReviewScore-Number')
hotel_score = hotel_score.text if hotel_score else hotel_score
print((hotel_name, hotel_star, hotel_location, hotel_score, hotel_url))
time.sleep(1)
| 30.430556 | 98 | 0.691921 |
acf76d02c0e8e7ad08b105a4abc194fc1509f8f7 | 2,187 | py | Python | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/matrix_lie_group_tangent.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/matrix_lie_group_tangent.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/matrix_lie_group_tangent.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | # coding=utf-8
from contracts import contract
from .differentiable_manifold import DifferentiableManifold
from .matrix_lie_group import MatrixLieGroup
__all__ = ['MatrixLieGroupTangent']
class MatrixLieGroupTangent(DifferentiableManifold):
''' This class represents the tangent bundle of a matrix Lie group
using a tuble (base, v0), where v0 is in the algebra.
Compare with the generic TangentBundle that uses the representation
(base, vel) where vel is tangent at base (it holds that vel=base*v0).
(MatrixLieGroup has different representation)
'''
# TODO: the tangent bundle of a matrix Lie group has more properties than
# this.
# TODO: create tests for all of this
def __init__(self, base_group):
assert isinstance(base_group, MatrixLieGroup)
self.base = base_group
dimension = 2 * base_group.get_dimension()
DifferentiableManifold.__init__(self, dimension=dimension)
def __str__(self):
return "T%se" % self.base
@contract(x='tuple[2]')
def belongs(self, x):
self.base.belongs(x[0])
self.base.get_algebra().belongs(x[1])
def belongs_ts(self, bv):
# TODO: implement
raise ValueError('Not supported')
def project_ts(self, bv): # TODO: test
# TODO: implement
raise ValueError('Not supported')
@contract(a='belongs', b='belongs', returns='>=0')
def distance(self, a, b):
# TODO: implement
raise ValueError('Not supported')
@contract(base='belongs', p='belongs', returns='belongs_ts')
def logmap(self, base, p):
raise ValueError('Not supported')
@contract(bv='belongs_ts', returns='belongs')
def expmap(self, bv):
raise ValueError('Not supported')
@contract(returns='list(belongs)')
def interesting_points(self):
# TODO: write this
return []
@contract(a='belongs')
def friendly(self, a):
'''
Returns a friendly description string for a point on the manifold.
'''
v = self.base.get_algebra().vector_from_algebra(a[1])
return "V(%s,%s)" % (self.base.friendly(a[0]), v.tolist())
| 31.242857 | 78 | 0.650206 |
acf76eb29e46d14d0ba2a55c24b820300bfcac0e | 2,375 | py | Python | squeak/whereis.py | almonds0166/finneasj | fd66139bec3382df70c1216e1604db8de5ec0324 | [
"MIT"
] | null | null | null | squeak/whereis.py | almonds0166/finneasj | fd66139bec3382df70c1216e1604db8de5ec0324 | [
"MIT"
] | 1 | 2021-05-19T22:27:31.000Z | 2021-05-19T22:27:31.000Z | squeak/whereis.py | almonds0166/finneasj | fd66139bec3382df70c1216e1604db8de5ec0324 | [
"MIT"
] | null | null | null |
import asyncio
from urllib.parse import urlencode, quote
import json
import discord
import aiohttp
from .util import cap_at_n, SearchResults
HEADERS = {"User-Agent": "finneasj/1.0"}
# Returns content string, embed object, and whether there was one result
async def search(query, debug=False):
q = urlencode({"q": query}, quote_via=quote)
url = "https://whereis.mit.edu/search?type=query&output=json&{}".format(q)
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=HEADERS) as response:
result = json.loads(await response.text())
if debug: print(result)
if not result: # No results
return SearchResults("", None, 0)
# prepare bot message
embed = discord.Embed()
embed.url = "https://whereis.mit.edu/?{}".format(q)
content = ""
# unpack http response
if len(result) > 1:
embed.title = "Found {} location results!".format(len(result))
locations = []
for location in result:
if all(k in location for k in ("bldgnum", "street")):
locations.append("{name} ({bldgnum}, at {street})".format(**location))
else:
locations.append(location["name"])
embed.description = cap_at_n(locations, 500)
else:
location = result[0]
embed.description = "(`{lat_wgs84},{long_wgs84}`)".format(**location)
if all(k in location for k in ("bldgnum", "street")):
embed.description = "Building {bldgnum}\n{street} ".format(**location) + embed.description
embed.title = location["name"]
if "bldgimg" in location:
embed.set_image(url=location["bldgimg"])
return SearchResults(content, embed, len(result))
if __name__ == "__main__":
print("Give me a map query.")
try:
while True:
q = input("> ")
content, embed, num_results = asyncio.run(search(q, debug=True))
print("Content")
print("=======")
print(content)
print()
print("Embed")
print("=====")
if embed:
print("title:", embed.title)
print("url:", embed.url)
print("description:", embed.description)
print("image url:", embed.image.url)
else:
print(embed)
print()
print("Results:", num_results)
print()
except KeyboardInterrupt:
print("^C") | 31.25 | 99 | 0.604211 |
acf76ed531df6b0cf312894a366854f0a346a2ad | 6,439 | py | Python | docs/py/customized.py | djokester/autokeras | 7a95eaa7dd154245597934accc558c39394052e7 | [
"Apache-2.0"
] | 4,704 | 2017-12-03T02:40:27.000Z | 2019-12-19T23:23:34.000Z | docs/py/customized.py | djokester/autokeras | 7a95eaa7dd154245597934accc558c39394052e7 | [
"Apache-2.0"
] | 939 | 2019-04-02T18:13:53.000Z | 2022-03-31T16:25:08.000Z | docs/py/customized.py | djokester/autokeras | 7a95eaa7dd154245597934accc558c39394052e7 | [
"Apache-2.0"
] | 826 | 2019-04-02T00:53:31.000Z | 2022-03-31T10:11:02.000Z | """shell
pip install autokeras
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import autokeras as ak
"""
In this tutorial, we show how to customize your search space with
[AutoModel](/auto_model/#automodel-class) and how to implement your own block
as search space. This API is mainly for advanced users who already know what
their model should look like.
## Customized Search Space
First, let us see how we can build the following neural network using the
building blocks in AutoKeras.
<div class="mermaid">
graph LR
id1(ImageInput) --> id2(Normalization)
id2 --> id3(Image Augmentation)
id3 --> id4(Convolutional)
id3 --> id5(ResNet V2)
id4 --> id6(Merge)
id5 --> id6
id6 --> id7(Classification Head)
</div>
We can make use of the [AutoModel](/auto_model/#automodel-class) API in
AutoKeras to implemented as follows.
The usage is the same as the [Keras functional
API](https://www.tensorflow.org/guide/keras/functional).
Since this is just a demo, we use small amount of `max_trials` and `epochs`.
"""
input_node = ak.ImageInput()
output_node = ak.Normalization()(input_node)
output_node1 = ak.ConvBlock()(output_node)
output_node2 = ak.ResNetBlock(version="v2")(output_node)
output_node = ak.Merge()([output_node1, output_node2])
output_node = ak.ClassificationHead()(output_node)
auto_model = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
"""
Whild building the model, the blocks used need to follow this topology:
`Preprocessor` -> `Block` -> `Head`. `Normalization` and `ImageAugmentation`
are `Preprocessor`s.
`ClassificationHead` is `Head`. The rest are `Block`s.
In the code above, we use `ak.ResNetBlock(version='v2')` to specify the version
of ResNet to use. There are many other arguments to specify for each building
block. For most of the arguments, if not specified, they would be tuned
automatically. Please refer to the documentation links at the bottom of the
page for more details.
Then, we prepare some data to run the model.
"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape) # (60000, 28, 28)
print(y_train.shape) # (60000,)
print(y_train[:3]) # array([7, 2, 1], dtype=uint8)
# Feed the AutoModel with training data.
auto_model.fit(x_train[:100], y_train[:100], epochs=1)
# Predict with the best model.
predicted_y = auto_model.predict(x_test)
# Evaluate the best model with testing data.
print(auto_model.evaluate(x_test, y_test))
"""
For multiple input nodes and multiple heads search space, you can refer to
[this section](/tutorial/multi/#customized-search-space).
## Validation Data
If you would like to provide your own validation data or change the ratio of
the validation data, please refer to the Validation Data section of the
tutorials of [Image
Classification](/tutorial/image_classification/#validation-data), [Text
Classification](/tutorial/text_classification/#validation-data), [Structured
Data
Classification](/tutorial/structured_data_classification/#validation-data),
[Multi-task and Multiple Validation](/tutorial/multi/#validation-data).
## Data Format
You can refer to the documentation of
[ImageInput](/node/#imageinput-class),
[StructuredDataInput](/node/#structureddatainput-class),
[TextInput](/node/#textinput-class),
[RegressionHead](/block/#regressionhead-class),
[ClassificationHead](/block/#classificationhead-class),
for the format of different types of data.
You can also refer to the Data Format section of the tutorials of
[Image Classification](/tutorial/image_classification/#data-format),
[Text Classification](/tutorial/text_classification/#data-format),
[Structured Data
Classification](/tutorial/structured_data_classification/#data-format).
## Implement New Block
You can extend the [Block](/base/#block-class)
class to implement your own building blocks and use it with
[AutoModel](/auto_model/#automodel-class).
The first step is to learn how to write a build function for
[KerasTuner](https://keras-team.github.io/keras-tuner/#usage-the-basics). You
need to override the [build function](/base/#build-method) of the block. The
following example shows how to implement a single Dense layer block whose
number of neurons is tunable.
"""
class SingleDenseLayerBlock(ak.Block):
def build(self, hp, inputs=None):
# Get the input_node from inputs.
input_node = tf.nest.flatten(inputs)[0]
layer = tf.keras.layers.Dense(
hp.Int("num_units", min_value=32, max_value=512, step=32)
)
output_node = layer(input_node)
return output_node
"""
You can connect it with other blocks and build it into an
[AutoModel](/auto_model/#automodel-class).
"""
# Build the AutoModel
input_node = ak.Input()
output_node = SingleDenseLayerBlock()(input_node)
output_node = ak.RegressionHead()(output_node)
auto_model = ak.AutoModel(input_node, output_node, overwrite=True, max_trials=1)
# Prepare Data
num_instances = 100
x_train = np.random.rand(num_instances, 20).astype(np.float32)
y_train = np.random.rand(num_instances, 1).astype(np.float32)
x_test = np.random.rand(num_instances, 20).astype(np.float32)
y_test = np.random.rand(num_instances, 1).astype(np.float32)
# Train the model
auto_model.fit(x_train, y_train, epochs=1)
print(auto_model.evaluate(x_test, y_test))
"""
## Reference
[AutoModel](/auto_model/#automodel-class)
**Nodes**:
[ImageInput](/node/#imageinput-class),
[Input](/node/#input-class),
[StructuredDataInput](/node/#structureddatainput-class),
[TextInput](/node/#textinput-class).
**Preprocessors**:
[FeatureEngineering](/block/#featureengineering-class),
[ImageAugmentation](/block/#imageaugmentation-class),
[LightGBM](/block/#lightgbm-class),
[Normalization](/block/#normalization-class),
[TextToIntSequence](/block/#texttointsequence-class),
[TextToNgramVector](/block/#texttongramvector-class).
**Blocks**:
[ConvBlock](/block/#convblock-class),
[DenseBlock](/block/#denseblock-class),
[Embedding](/block/#embedding-class),
[Merge](/block/#merge-class),
[ResNetBlock](/block/#resnetblock-class),
[RNNBlock](/block/#rnnblock-class),
[SpatialReduction](/block/#spatialreduction-class),
[TemporalReduction](/block/#temporalreduction-class),
[XceptionBlock](/block/#xceptionblock-class),
[ImageBlock](/block/#imageblock-class),
[StructuredDataBlock](/block/#structureddatablock-class),
[TextBlock](/block/#textblock-class).
"""
| 34.994565 | 80 | 0.757105 |
acf76f003482ca06edd9620906f2e30a3b13579d | 787 | py | Python | src/wallabag/api/api_token.py | artur-shaik/wallabag-client | 6c03a3beebcf27f51076e0eb11bb99f618f8daa3 | [
"MIT"
] | 16 | 2020-09-30T23:08:45.000Z | 2022-03-30T02:34:17.000Z | src/wallabag/api/api_token.py | artur-shaik/wallabag-client | 6c03a3beebcf27f51076e0eb11bb99f618f8daa3 | [
"MIT"
] | 15 | 2020-11-05T09:22:38.000Z | 2022-03-11T16:56:18.000Z | src/wallabag/api/api_token.py | artur-shaik/wallabag-client | 6c03a3beebcf27f51076e0eb11bb99f618f8daa3 | [
"MIT"
] | 1 | 2021-04-02T11:00:57.000Z | 2021-04-02T11:00:57.000Z | # -*- coding: utf-8 -*-
from wallabag.api.api import Api, ApiMethod
from wallabag.config import Options, Sections
class ApiToken(Api):
def __init__(self, config):
Api.__init__(self, config)
self.skip_auth = True
def _get_api_url(self):
return self._build_url(ApiMethod.TOKEN)
def _make_request(self, request):
return self._request_get(request)
def _get_params(self):
return {
'grant_type': "password",
'client_id': self.config.get(Sections.OAUTH2, Options.CLIENT),
'client_secret': self.config.get(Sections.OAUTH2, Options.SECRET),
'username': self.config.get(Sections.API, Options.USERNAME),
'password': self.config.get(Sections.API, Options.PASSWORD)
}
| 28.107143 | 78 | 0.644219 |
acf76f060961571ffb5520383e4a4e86dbcd56c5 | 10,810 | py | Python | nuitka/codegen/ComparisonCodes.py | jvalduvieco/Nuitka | b93046d5d1c162d416d392d835790936d15a2cf8 | [
"Apache-2.0"
] | null | null | null | nuitka/codegen/ComparisonCodes.py | jvalduvieco/Nuitka | b93046d5d1c162d416d392d835790936d15a2cf8 | [
"Apache-2.0"
] | null | null | null | nuitka/codegen/ComparisonCodes.py | jvalduvieco/Nuitka | b93046d5d1c162d416d392d835790936d15a2cf8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Comparison related codes.
Rich comparisons, "in", and "not in", also "is", and "is not", and the
"isinstance" check as used in conditions, as well as exception matching.
"""
from nuitka.containers.oset import OrderedSet
from nuitka.nodes.shapes.BuiltinTypeShapes import tshape_bool
from . import OperatorCodes
from .CodeHelpers import generateExpressionCode, pickCodeHelper
from .ErrorCodes import getErrorExitBoolCode, getReleaseCodes
specialized_cmp_helpers_set = OrderedSet(
(
"RICH_COMPARE_xx_OBJECT_OBJECT_OBJECT",
"RICH_COMPARE_xx_CBOOL_OBJECT_OBJECT",
"RICH_COMPARE_xx_NBOOL_OBJECT_OBJECT",
# "RICH_COMPARE_xx_OBJECT_INT",
# "RICH_COMPARE_xx_OBJECT_LONG",
# "RICH_COMPARE_xx_OBJECT_STR",
# "RICH_COMPARE_xx_OBJECT_UNICODE",
# "RICH_COMPARE_xx_OBJECT_TUPLE",
# "RICH_COMPARE_xx_OBJECT_LIST",
# "RICH_COMPARE_xx_OBJECT_BYTES",
# "RICH_COMPARE_xx_INT_OBJECT",
# "RICH_COMPARE_xx_LONG_OBJECT",
# "RICH_COMPARE_xx_STR_OBJECT",
# "RICH_COMPARE_xx_UNICODE_OBJECT",
# "RICH_COMPARE_xx_TUPLE_OBJECT",
# "RICH_COMPARE_xx_LIST_OBJECT",
# "RICH_COMPARE_xx_BYTES_OBJECT",
# "RICH_COMPARE_xx_INT_INT",
# "RICH_COMPARE_xx_LONG_LONG",
# "RICH_COMPARE_xx_STR_STR",
# "RICH_COMPARE_xx_UNICODE_UNICODE",
# "RICH_COMPARE_xx_TUPLE_TUPLE",
# "RICH_COMPARE_xx_LIST_LIST",
# "RICH_COMPARE_xx_BYTES_BYTES",
"RICH_COMPARE_xx_OBJECT_INT_INT",
"RICH_COMPARE_xx_CBOOL_INT_INT",
"RICH_COMPARE_xx_NBOOL_INT_INT",
"RICH_COMPARE_xx_OBJECT_OBJECT_INT",
"RICH_COMPARE_xx_CBOOL_OBJECT_INT",
"RICH_COMPARE_xx_NBOOL_OBJECT_INT",
"RICH_COMPARE_xx_OBJECT_INT_OBJECT",
"RICH_COMPARE_xx_CBOOL_INT_OBJECT",
"RICH_COMPARE_xx_NBOOL_INT_OBJECT",
"RICH_COMPARE_xx_OBJECT_FLOAT_FLOAT",
"RICH_COMPARE_xx_CBOOL_FLOAT_FLOAT",
"RICH_COMPARE_xx_NBOOL_FLOAT_FLOAT",
"RICH_COMPARE_xx_OBJECT_OBJECT_FLOAT",
"RICH_COMPARE_xx_CBOOL_OBJECT_FLOAT",
"RICH_COMPARE_xx_NBOOL_OBJECT_FLOAT",
"RICH_COMPARE_xx_OBJECT_FLOAT_OBJECT",
"RICH_COMPARE_xx_CBOOL_FLOAT_OBJECT",
"RICH_COMPARE_xx_NBOOL_FLOAT_OBJECT",
"RICH_COMPARE_xx_OBJECT_TUPLE_TUPLE",
"RICH_COMPARE_xx_CBOOL_TUPLE_TUPLE",
"RICH_COMPARE_xx_NBOOL_TUPLE_TUPLE",
"RICH_COMPARE_xx_OBJECT_OBJECT_TUPLE",
"RICH_COMPARE_xx_CBOOL_OBJECT_TUPLE",
"RICH_COMPARE_xx_NBOOL_OBJECT_TUPLE",
"RICH_COMPARE_xx_OBJECT_TUPLE_OBJECT",
"RICH_COMPARE_xx_CBOOL_TUPLE_OBJECT",
"RICH_COMPARE_xx_NBOOL_TUPLE_OBJECT",
# "RICH_COMPARE_xx_CBOOL_OBJECT_LONG",
# "RICH_COMPARE_xx_CBOOL_OBJECT_STR",
# "RICH_COMPARE_xx_CBOOL_OBJECT_UNICODE",
# "RICH_COMPARE_xx_CBOOL_OBJECT_LIST",
# "RICH_COMPARE_xx_CBOOL_OBJECT_BYTES",
# "RICH_COMPARE_xx_CBOOL_LONG_OBJECT",
# "RICH_COMPARE_xx_CBOOL_STR_OBJECT",
# "RICH_COMPARE_xx_CBOOL_OBJECT_FLOAT",
# "RICH_COMPARE_xx_CBOOL_UNICODE_OBJECT",
# "RICH_COMPARE_xx_CBOOL_TUPLE_OBJECT",
# "RICH_COMPARE_xx_CBOOL_LIST_OBJECT",
# "RICH_COMPARE_xx_CBOOL_BYTES_OBJECT",
# "RICH_COMPARE_xx_CBOOL_LONG_LONG",
# "RICH_COMPARE_xx_CBOOL_STR_STR",
# "RICH_COMPARE_xx_CBOOL_UNICODE_UNICODE",
# "RICH_COMPARE_xx_CBOOL_TUPLE_TUPLE",
# "RICH_COMPARE_xx_CBOOL_LIST_LIST",
# "RICH_COMPARE_xx_CBOOL_BYTES_BYTES",
)
)
def generateComparisonExpressionCode(to_name, expression, emit, context):
left = expression.subnode_left
right = expression.subnode_right
comparator = expression.getComparator()
type_name = "PyObject *"
if comparator in ("Is", "IsNot"):
if left.getTypeShape() is tshape_bool and right.getTypeShape() is tshape_bool:
type_name = "nuitka_bool"
left_name = context.allocateTempName("compexpr_left", type_name=type_name)
right_name = context.allocateTempName("compexpr_right", type_name=type_name)
generateExpressionCode(
to_name=left_name, expression=left, emit=emit, context=context
)
generateExpressionCode(
to_name=right_name, expression=right, emit=emit, context=context
)
if comparator in OperatorCodes.containing_comparison_codes:
needs_check = right.mayRaiseExceptionIn(BaseException, expression.subnode_left)
res_name = context.getIntResName()
emit(
"%s = PySequence_Contains(%s, %s);"
% (res_name, right_name, left_name) # sequence goes first in the API.
)
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_names=(left_name, right_name),
needs_check=needs_check,
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name,
condition="%s == %d" % (res_name, 1 if comparator == "In" else 0),
emit=emit,
)
elif comparator == "Is":
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s == %s" % (left_name, right_name), emit=emit
)
getReleaseCodes(
release_names=(left_name, right_name), emit=emit, context=context
)
elif comparator == "IsNot":
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s != %s" % (left_name, right_name), emit=emit
)
getReleaseCodes(
release_names=(left_name, right_name), emit=emit, context=context
)
elif comparator in OperatorCodes.rich_comparison_codes:
needs_check = expression.mayRaiseExceptionComparison()
# TODO: This is probably not really worth it, but we used to do it.
# if comparator == "Eq" and not context.mayRecurse():
# suffix = "_NORECURSE"
# else:
# suffix = ""
helper = pickCodeHelper(
prefix="RICH_COMPARE_xx",
suffix="",
target_type=to_name.getCType(),
left_shape=left.getTypeShape(),
right_shape=expression.subnode_right.getTypeShape(),
helpers=specialized_cmp_helpers_set,
nonhelpers=(),
# TODO: Only temporary, we need to be more complete with these.
source_ref=None, # expression.source_ref,
)
# Lets patch this up here, instead of having one set per comparison operation.
helper.helper_name = helper.helper_name.replace(
"xx", OperatorCodes.rich_comparison_codes[comparator]
)
helper.emitHelperCall(
to_name=to_name,
arg_names=(left_name, right_name),
ref_count=1,
needs_check=needs_check,
emit=emit,
context=context,
)
elif comparator in ("exception_match", "exception_mismatch"):
needs_check = expression.mayRaiseExceptionComparison()
res_name = context.getIntResName()
emit("%s = EXCEPTION_MATCH_BOOL(%s, %s);" % (res_name, left_name, right_name))
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_names=(left_name, right_name),
needs_check=needs_check,
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name,
condition="%s %s 0"
% (res_name, "!=" if comparator == "exception_match" else "=="),
emit=emit,
)
else:
assert False, comparator
def generateBuiltinIsinstanceCode(to_name, expression, emit, context):
inst_name = context.allocateTempName("isinstance_inst")
cls_name = context.allocateTempName("isinstance_cls")
generateExpressionCode(
to_name=inst_name,
expression=expression.subnode_instance,
emit=emit,
context=context,
)
generateExpressionCode(
to_name=cls_name,
expression=expression.subnode_classes,
emit=emit,
context=context,
)
context.setCurrentSourceCodeReference(expression.getCompatibleSourceReference())
res_name = context.getIntResName()
emit("%s = Nuitka_IsInstance(%s, %s);" % (res_name, inst_name, cls_name))
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_names=(inst_name, cls_name),
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s != 0" % res_name, emit=emit
)
def generateBuiltinIssubclassCode(to_name, expression, emit, context):
cls_name = context.allocateTempName("issubclass_cls")
classes_name = context.allocateTempName("issubclass_classes")
generateExpressionCode(
to_name=cls_name,
expression=expression.subnode_cls,
emit=emit,
context=context,
)
generateExpressionCode(
to_name=classes_name,
expression=expression.subnode_classes,
emit=emit,
context=context,
)
context.setCurrentSourceCodeReference(expression.getCompatibleSourceReference())
res_name = context.getIntResName()
emit("%s = PyObject_IsSubclass(%s, %s);" % (res_name, cls_name, classes_name))
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_names=(cls_name, classes_name),
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s != 0" % res_name, emit=emit
)
| 36.894198 | 87 | 0.651711 |
acf76fc7be01b72fbadcea10881bb3191aaba41f | 169 | py | Python | one_fm/grd/doctype/pifss_monthly_deduction_tool_table/test_pifss_monthly_deduction_tool_table.py | askmetoo/One-FM | c93ed63695a3e62ee8129bd9adf563116b749030 | [
"MIT"
] | 16 | 2021-06-14T23:56:47.000Z | 2022-03-22T12:05:06.000Z | one_fm/grd/doctype/pifss_monthly_deduction_tool_table/test_pifss_monthly_deduction_tool_table.py | askmetoo/One-FM | c93ed63695a3e62ee8129bd9adf563116b749030 | [
"MIT"
] | 119 | 2020-08-17T16:27:45.000Z | 2022-03-28T12:42:56.000Z | one_fm/grd/doctype/pifss_monthly_deduction_tool_table/test_pifss_monthly_deduction_tool_table.py | askmetoo/One-FM | c93ed63695a3e62ee8129bd9adf563116b749030 | [
"MIT"
] | 12 | 2021-05-16T13:35:40.000Z | 2022-02-21T12:41:04.000Z | # Copyright (c) 2021, omar jaber and Contributors
# See license.txt
# import frappe
import unittest
class TestPIFSSMonthlyDeductionToolTable(unittest.TestCase):
pass
| 18.777778 | 60 | 0.804734 |
acf7708a91ab6897543c12d4dccd1afcc0121165 | 697 | py | Python | src/leads/migrations/0007_auto_20210806_1150.py | dhavall13/CRM-System | f0635ec2eb85cc98817f0a7ead89e24ae320980e | [
"MIT"
] | 2 | 2021-12-15T17:01:23.000Z | 2021-12-15T17:02:23.000Z | src/leads/migrations/0007_auto_20210806_1150.py | dhavall13/CRM-System | f0635ec2eb85cc98817f0a7ead89e24ae320980e | [
"MIT"
] | null | null | null | src/leads/migrations/0007_auto_20210806_1150.py | dhavall13/CRM-System | f0635ec2eb85cc98817f0a7ead89e24ae320980e | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-06 06:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('leads', '0006_auto_20210806_1143'),
]
operations = [
migrations.RemoveField(
model_name='lead',
name='category',
),
migrations.AddField(
model_name='agent',
name='organization',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='leads.userprofile'),
preserve_default=False,
),
migrations.DeleteModel(
name='Category',
),
]
| 24.892857 | 116 | 0.593974 |
acf771151a63a80a41cea773fd55c66b5743840a | 27,651 | py | Python | agent_stable_baselines/stable_baselines/ppo2/ppo2.py | Jannkar/doom_actionspace | 37663341f60a05943202b77394a4203d070fad95 | [
"MIT"
] | 1 | 2020-04-24T13:54:01.000Z | 2020-04-24T13:54:01.000Z | agent_stable_baselines/stable_baselines/ppo2/ppo2.py | Jannkar/doom_actionspace | 37663341f60a05943202b77394a4203d070fad95 | [
"MIT"
] | null | null | null | agent_stable_baselines/stable_baselines/ppo2/ppo2.py | Jannkar/doom_actionspace | 37663341f60a05943202b77394a4203d070fad95 | [
"MIT"
] | null | null | null | import time
import sys
import multiprocessing
from collections import deque
import gym
import numpy as np
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import explained_variance, ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.common.policies import LstmPolicy, ActorCriticPolicy
from stable_baselines.a2c.utils import total_episode_reward_logger
class PPO2(ActorCriticRLModel):
"""
Proximal Policy Optimization algorithm (GPU version).
Paper: https://arxiv.org/abs/1707.06347
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) Discount factor
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param ent_coef: (float) Entropy coefficient for the loss caculation
:param learning_rate: (float or callable) The learning rate, it can be a function
:param vf_coef: (float) Value function coefficient for the loss calculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param nminibatches: (int) Number of training minibatches per update. For recurrent policies,
the number of environments run in parallel should be a multiple of nminibatches.
:param noptepochs: (int) Number of epoch when optimizing the surrogate
:param cliprange: (float or callable) Clipping parameter, it can be a function
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param freeze_base_nn: (bool) Whether to keep main nn body frozen (do not calculate gradients)
:param freeze_vf: (bool) Whether to freeze value function weights
:freeze_last_fc: (bool) Whether to freeze last fully connected layer weights (output layer)
"""
def __init__(self, policy, env, gamma=0.99, n_steps=128, ent_coef=0.01, learning_rate=2.5e-4, vf_coef=0.5,
max_grad_norm=0.5, lam=0.95, nminibatches=4, noptepochs=4, cliprange=0.2, verbose=0,
tensorboard_log=None, _init_setup_model=True, policy_kwargs=None, freeze_base_nn=False, freeze_vf=False, freeze_last_fc=False):
super(PPO2, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs)
self.learning_rate = learning_rate
self.cliprange = cliprange
self.n_steps = n_steps
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.gamma = gamma
self.lam = lam
self.nminibatches = nminibatches
self.noptepochs = noptepochs
self.tensorboard_log = tensorboard_log
self.graph = None
self.sess = None
self.action_ph = None
self.advs_ph = None
self.rewards_ph = None
self.old_neglog_pac_ph = None
self.old_vpred_ph = None
self.learning_rate_ph = None
self.clip_range_ph = None
self.entropy = None
self.vf_loss = None
self.pg_loss = None
self.approxkl = None
self.clipfrac = None
self.params = None
self._train = None
self.loss_names = None
self.train_model = None
self.act_model = None
self.step = None
self.proba_step = None
self.value = None
self.initial_state = None
self.n_batch = None
self.summary = None
self.episode_reward = None
# If we should freeze the main body of the network or value function
self.freeze_base_nn = freeze_base_nn
self.freeze_vf = freeze_vf
self.freeze_last_fc = freeze_last_fc
if _init_setup_model:
self.setup_model()
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO2 model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.n_batch = self.n_envs * self.n_steps
n_cpu = multiprocessing.cpu_count()
if sys.platform == 'darwin':
n_cpu //= 2
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(num_cpu=n_cpu, graph=self.graph)
n_batch_step = None
n_batch_train = None
if issubclass(self.policy, LstmPolicy):
assert self.n_envs % self.nminibatches == 0, "For recurrent policies, "\
"the number of environments run in parallel should be a multiple of nminibatches."
n_batch_step = self.n_envs
n_batch_train = self.n_batch // self.nminibatches
act_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs // self.nminibatches, self.n_steps, n_batch_train,
reuse=True, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.action_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
self.advs_ph = tf.placeholder(tf.float32, [None], name="advs_ph")
self.rewards_ph = tf.placeholder(tf.float32, [None], name="rewards_ph")
self.old_neglog_pac_ph = tf.placeholder(tf.float32, [None], name="old_neglog_pac_ph")
self.old_vpred_ph = tf.placeholder(tf.float32, [None], name="old_vpred_ph")
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
self.clip_range_ph = tf.placeholder(tf.float32, [], name="clip_range_ph")
neglogpac = train_model.proba_distribution.neglogp(self.action_ph)
self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())
vpred = train_model._value
vpredclipped = self.old_vpred_ph + tf.clip_by_value(
train_model._value - self.old_vpred_ph, - self.clip_range_ph, self.clip_range_ph)
vf_losses1 = tf.square(vpred - self.rewards_ph)
vf_losses2 = tf.square(vpredclipped - self.rewards_ph)
self.vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(self.old_neglog_pac_ph - neglogpac)
pg_losses = -self.advs_ph * ratio
pg_losses2 = -self.advs_ph * tf.clip_by_value(ratio, 1.0 - self.clip_range_ph, 1.0 +
self.clip_range_ph)
self.pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
self.approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - self.old_neglog_pac_ph))
self.clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), self.clip_range_ph)))
loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef
tf.summary.scalar('entropy_loss', self.entropy)
tf.summary.scalar('policy_gradient_loss', self.pg_loss)
tf.summary.scalar('value_function_loss', self.vf_loss)
tf.summary.scalar('approximate_kullback-leiber', self.approxkl)
tf.summary.scalar('clip_factor', self.clipfrac)
tf.summary.scalar('loss', loss)
with tf.variable_scope('model'):
self.params = tf.trainable_variables()
for var in self.params:
tf.summary.histogram(var.name, var)
if self.freeze_base_nn or self.freeze_vf:
# Parse out non-trainable variables
trainable_params = []
print("Non-trainable params:")
for param in self.params:
if "base_nn_param" in param.name and self.freeze_base_nn:
print(param.name)
pass
elif ("model/q" in param.name or "model/pi" in param.name) and self.freeze_last_fc:
print(param.name)
pass
elif "vf" in param.name and self.freeze_vf:
print(param.name)
pass
else:
trainable_params.append(param)
else:
trainable_params = self.params
print("Trainable params:")
for param in trainable_params:
print(param.name, param.shape)
grads = tf.gradients(loss, trainable_params)
if self.max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, trainable_params))
trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph, epsilon=1e-5)
self._train = trainer.apply_gradients(grads)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))
tf.summary.histogram('discounted_rewards', self.rewards_ph)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
tf.summary.histogram('learning_rate', self.learning_rate_ph)
tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))
tf.summary.histogram('advantage', self.advs_ph)
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_range_ph))
tf.summary.histogram('clip_range', self.clip_range_ph)
tf.summary.scalar('old_neglog_action_probabilty', tf.reduce_mean(self.old_neglog_pac_ph))
tf.summary.histogram('old_neglog_action_probabilty', self.old_neglog_pac_ph)
tf.summary.scalar('old_value_pred', tf.reduce_mean(self.old_vpred_ph))
tf.summary.histogram('old_value_pred', self.old_vpred_ph)
if len(self.observation_space.shape) == 3:
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.proba_step = act_model.proba_step
self.value = act_model.value
self.initial_state = act_model.initial_state
tf.global_variables_initializer().run(session=self.sess) # pylint: disable=E1101
self.summary = tf.summary.merge_all()
def _train_step(self, learning_rate, cliprange, obs, returns, masks, actions, values, neglogpacs, update,
writer, states=None):
"""
Training of PPO2 Algorithm
:param learning_rate: (float) learning rate
:param cliprange: (float) Clipping factor
:param obs: (np.ndarray) The current observation of the environment
:param returns: (np.ndarray) the rewards
:param masks: (np.ndarray) The last masks for done episodes (used in recurent policies)
:param actions: (np.ndarray) the actions
:param values: (np.ndarray) the values
:param neglogpacs: (np.ndarray) Negative Log-likelihood probability of Actions
:param update: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param states: (np.ndarray) For recurrent policies, the internal state of the recurrent model
:return: policy gradient loss, value function loss, policy entropy,
approximation of kl divergence, updated clipping range, training update operation
"""
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {self.train_model.obs_ph: obs, self.action_ph: actions, self.advs_ph: advs, self.rewards_ph: returns,
self.learning_rate_ph: learning_rate, self.clip_range_ph: cliprange,
self.old_neglog_pac_ph: neglogpacs, self.old_vpred_ph: values}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.masks_ph] = masks
if states is None:
update_fac = self.n_batch // self.nminibatches // self.noptepochs + 1
else:
update_fac = self.n_batch // self.nminibatches // self.noptepochs // self.n_steps + 1
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if (1 + update) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % (update * update_fac))
else:
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map)
writer.add_summary(summary, (update * update_fac))
else:
policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train], td_map)
return policy_loss, value_loss, policy_entropy, approxkl, clipfrac
def learn(self, total_timesteps, callback=None, seed=None, log_interval=1, tb_log_name="PPO2"):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn(seed)
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
nupdates = total_timesteps // self.n_batch
for update in range(1, nupdates + 1):
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
timestep = ((update * self.noptepochs * self.n_batch + epoch_num * self.n_batch + start) //
batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=timestep))
else: # recurrent version
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for start in range(0, self.n_envs, envs_per_batch):
timestep = ((update * self.noptepochs * self.n_envs + epoch_num * self.n_envs + start) //
envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=timestep,
writer=writer, states=mb_states))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, update * (self.n_batch + 1))
if self.verbose >= 1 and (update % log_interval == 0 or update == 1):
explained_var = explained_variance(values, returns)
logger.logkv("serial_timesteps", update * self.n_steps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update * self.n_batch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) == False:
break
return self
def save(self, save_path):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"lam": self.lam,
"nminibatches": self.nminibatches,
"noptepochs": self.noptepochs,
"cliprange": self.cliprange,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
for _ in range(self.n_steps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
def get_schedule_fn(value_schedule):
"""
Transform (if needed) learning rate and clip range
to callable.
:param value_schedule: (callable or float)
:return: (function)
"""
# If the passed schedule is a float
# create a constant function
if isinstance(value_schedule, float):
value_schedule = constfn(value_schedule)
else:
assert callable(value_schedule)
return value_schedule
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def swap_and_flatten(arr):
"""
swap and then flatten axes 0 and 1
:param arr: (np.ndarray)
:return: (np.ndarray)
"""
shape = arr.shape
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
def constfn(val):
"""
Create a function that returns a constant
It is useful for learning rate schedule (to avoid code duplication)
:param val: (float)
:return: (function)
"""
def func(_):
return val
return func
def safe_mean(arr):
"""
Compute the mean of an array if there is at least one element.
For empty array, return nan. It is used for logging only.
:param arr: (np.ndarray)
:return: (float)
"""
return np.nan if len(arr) == 0 else np.mean(arr)
| 52.073446 | 145 | 0.583089 |
acf771fc4ff15c5a894f457251a7177c12a04804 | 8,280 | py | Python | Alfred.alfredpreferences/workflows/user.workflow.89454126-BB98-4515-9F17-0E98C6CBCC0A/libs/faker/cli.py | Hunter-Gu/my-alfred-config | e33c006520e5e5c49332490a94e0b923d856a64a | [
"MIT"
] | 1 | 2022-01-07T03:34:39.000Z | 2022-01-07T03:34:39.000Z | faker/cli.py | Saber-xxf/faker1 | c966a144b370f7abb568a5154c4ef704e846722e | [
"MIT"
] | null | null | null | faker/cli.py | Saber-xxf/faker1 | c966a144b370f7abb568a5154c4ef704e846722e | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import argparse
from faker import Faker, documentor
from faker import VERSION
from faker.config import AVAILABLE_LOCALES, DEFAULT_LOCALE, META_PROVIDERS_MODULES
if sys.version < '3':
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
__author__ = 'joke2k'
def print_provider(doc, provider, formatters, excludes=None, output=None):
output = output or sys.stdout
if excludes is None:
excludes = []
print(file=output)
print("### {0}".format(
doc.get_provider_name(provider)), file=output)
print(file=output)
for signature, example in formatters.items():
if signature in excludes:
continue
try:
lines = text_type(example).expandtabs().splitlines()
except UnicodeDecodeError:
# The example is actually made of bytes.
# We could coerce to bytes, but that would fail anyway when we wiil
# try to `print` the line.
lines = ["<bytes>"]
except UnicodeEncodeError:
raise Exception('error on "{0}" with value "{1}"'.format(
signature, example))
margin = max(30, doc.max_name_len + 1)
remains = 150 - margin
separator = '#'
for line in lines:
for i in range(0, (len(line) // remains) + 1):
print("\t{fake:<{margin}}{separator} {example}".format(
fake=signature,
separator=separator,
example=line[i * remains:(i + 1) * remains],
margin=margin
), file=output)
signature = separator = ' '
def print_doc(provider_or_field=None,
args=None, lang=DEFAULT_LOCALE, output=None, includes=None):
args = args or []
output = output or sys.stdout
fake = Faker(locale=lang, includes=includes)
from faker.providers import BaseProvider
base_provider_formatters = [f for f in dir(BaseProvider)]
if provider_or_field:
if '.' in provider_or_field:
parts = provider_or_field.split('.')
locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else lang
fake = Faker(locale, providers=[provider_or_field], includes=includes)
doc = documentor.Documentor(fake)
doc.already_generated = base_provider_formatters
print_provider(
doc,
fake.get_providers()[0],
doc.get_provider_formatters(fake.get_providers()[0]),
output=output)
else:
try:
print(fake.format(provider_or_field, *args), end='', file=output)
except AttributeError:
raise ValueError('No faker found for "{0}({1})"'.format(
provider_or_field, args))
else:
doc = documentor.Documentor(fake)
formatters = doc.get_formatters(with_args=True, with_defaults=True)
for provider, fakers in formatters:
print_provider(doc, provider, fakers, output=output)
for language in AVAILABLE_LOCALES:
if language == lang:
continue
print(file=output)
print('## LANGUAGE {0}'.format(language), file=output)
fake = Faker(locale=language)
d = documentor.Documentor(fake)
for p, fs in d.get_formatters(with_args=True, with_defaults=True,
locale=language,
excludes=base_provider_formatters):
print_provider(d, p, fs, output=output)
class Command(object):
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def execute(self):
"""
Given the command-line arguments, this creates a parser appropriate
to that command, and runs it.
"""
# retrieve default language from system environment
default_locale = os.environ.get('LANG', 'en_US').split('.')[0]
if default_locale not in AVAILABLE_LOCALES:
default_locale = DEFAULT_LOCALE
epilog = """supported locales:
{0}
faker can take a locale as an argument, to return localized data. If no
localized provider is found, the factory falls back to the default en_US
locale.
examples:
$ faker address
968 Bahringer Garden Apt. 722
Kristinaland, NJ 09890
$ faker -l de_DE address
Samira-Niemeier-Allee 56
94812 Biedenkopf
$ faker profile ssn,birthdate
{{'ssn': u'628-10-1085', 'birthdate': '2008-03-29'}}
$ faker -r=3 -s=";" name
Willam Kertzmann;
Josiah Maggio;
Gayla Schmitt;
""".format(', '.join(sorted(AVAILABLE_LOCALES)))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(
prog=self.prog_name,
description='{0} version {1}'.format(self.prog_name, VERSION),
epilog=epilog,
formatter_class=formatter_class)
parser.add_argument("--version", action="version",
version="%(prog)s {0}".format(VERSION))
parser.add_argument('-o', metavar="output",
type=argparse.FileType('w'),
default=sys.stdout,
help="redirect output to a file")
parser.add_argument('-l', '--lang',
choices=AVAILABLE_LOCALES,
default=default_locale,
metavar='LOCALE',
help="specify the language for a localized "
"provider (e.g. de_DE)")
parser.add_argument('-r', '--repeat',
default=1,
type=int,
help="generate the specified number of outputs")
parser.add_argument('-s', '--sep',
default='\n',
help="use the specified separator after each "
"output")
parser.add_argument('-i',
'--include',
default=META_PROVIDERS_MODULES,
nargs='*',
help="list of additional custom providers to "
"user, given as the import path of the module "
"containing your Provider class (not the provider "
"class itself)")
parser.add_argument('fake',
action='store',
nargs='?',
help="name of the fake to generate output for "
"(e.g. profile)")
parser.add_argument('fake_args',
metavar="fake argument",
action='store',
nargs='*',
help="optional arguments to pass to the fake "
"(e.g. the profile fake takes an optional "
"list of comma separated field names as the "
"first argument)")
arguments = parser.parse_args(self.argv[1:])
for i in range(arguments.repeat):
print_doc(arguments.fake,
arguments.fake_args,
lang=arguments.lang,
output=arguments.o,
includes=arguments.include
)
print(arguments.sep, file=arguments.o)
if not arguments.fake:
# repeat not supported for all docs
break
def execute_from_command_line(argv=None):
"""A simple method that runs a Command."""
if sys.stdout.encoding is None:
print('please set python env PYTHONIOENCODING=UTF-8, example: '
'export PYTHONIOENCODING=UTF-8, when writing to stdout',
file=sys.stderr)
exit(1)
command = Command(argv)
command.execute()
| 34.5 | 82 | 0.542391 |
acf7734be170ae460676d4f728e42c5c1445044c | 249 | py | Python | manage.py | gitgik/photo-editing-app | 730f88a1946d425cbe790cd4ed0689a1938a8cd0 | [
"MIT"
] | 5 | 2017-02-23T14:24:22.000Z | 2021-02-23T03:43:18.000Z | manage.py | gitgik/photo-editing-app | 730f88a1946d425cbe790cd4ed0689a1938a8cd0 | [
"MIT"
] | 1 | 2021-06-08T19:14:01.000Z | 2021-06-08T19:14:01.000Z | manage.py | gitgik/photo-editing-app | 730f88a1946d425cbe790cd4ed0689a1938a8cd0 | [
"MIT"
] | 2 | 2019-01-21T20:16:05.000Z | 2019-06-23T14:30:50.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pictor.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.636364 | 70 | 0.771084 |
acf7737078b1d5bb7a3c693bf9e37b49bfc0b8e0 | 9,477 | py | Python | tests/test_getobservations.py | NVE/varsomdata | 9de2757b66c157feb731827dfa14f7b58d3d4ccd | [
"MIT"
] | 1 | 2020-03-19T13:48:31.000Z | 2020-03-19T13:48:31.000Z | tests/test_getobservations.py | NVE/varsomdata | 9de2757b66c157feb731827dfa14f7b58d3d4ccd | [
"MIT"
] | 12 | 2020-10-05T16:05:36.000Z | 2021-06-28T11:11:41.000Z | tests/test_getobservations.py | NVE/varsomdata | 9de2757b66c157feb731827dfa14f7b58d3d4ccd | [
"MIT"
] | 1 | 2020-06-16T10:58:23.000Z | 2020-06-16T10:58:23.000Z | import unittest as ut
from varsomdata import getobservations as go
import pandas as pd
class TestGetSingeFormsMethods(ut.TestCase):
def test_get_general_observation(self):
general_obs = go.get_general_observation('2018-01-20', '2018-02-01')
general_obs_df = go.get_general_observation('2018-01-20', '2018-02-01', output='DataFrame')
general_obs_count = go.get_general_observation('2018-01-20', '2018-02-01', output='Count')
self.assertEqual(len(general_obs), general_obs_count)
self.assertIsInstance(general_obs_df, pd.DataFrame)
def test_get_incident(self):
incident = go.get_incident('2012-03-01', '2012-03-10')
incident_df = go.get_incident('2012-03-01', '2012-03-10', output='DataFrame')
incident_count = go.get_incident('2012-03-01', '2012-03-10', output='Count')
self.assertEqual(len(incident), incident_count)
self.assertIsInstance(incident_df, pd.DataFrame)
def test_danger_sign(self):
danger_signs = go.get_danger_sign('2017-12-13', '2017-12-16', geohazard_tids=10)
danger_signs_df = go.get_danger_sign('2017-12-13', '2017-12-16', output='DataFrame')
danger_signs_count = go.get_danger_sign('2017-12-13', '2017-12-16', output='Count')
self.assertTrue(danger_signs_count <= len(danger_signs) <= 3*danger_signs_count)
self.assertIsInstance(danger_signs_df, pd.DataFrame)
def test_get_damage_observation(self):
damages = go.get_damage_observation('2017-01-01', '2018-02-01')
damages_df = go.get_damage_observation('2017-01-01', '2018-02-01', output='DataFrame')
damages_count = go.get_damage_observation('2017-01-01', '2018-02-01', output='Count')
self.assertTrue(damages_count <= len(damages) <= 3*damages_count)
self.assertIsInstance(damages_df, pd.DataFrame)
def test_weather_observation(self):
weather = go.get_weather_observation('2018-01-28', '2018-02-01')
weather_df = go.get_weather_observation('2018-01-28', '2018-02-01', output='DataFrame')
weather_count = go.get_weather_observation('2018-01-28', '2018-02-01', output='Count')
self.assertEqual(len(weather), weather_count)
self.assertIsInstance(weather_df, pd.DataFrame)
def test_get_snow_surface_observation(self):
snow_surface = go.get_snow_surface_observation('2018-01-29', '2018-02-01')
snow_surface_df = go.get_snow_surface_observation('2018-01-29', '2018-02-01', output='DataFrame')
snow_surface_conut = go.get_snow_surface_observation('2018-01-29', '2018-02-01', output='Count')
self.assertEqual(len(snow_surface), snow_surface_conut)
self.assertIsInstance(snow_surface_df, pd.DataFrame)
def test_get_tests(self):
tests = go.get_tests('2018-01-25', '2018-02-01')
tests_df = go.get_tests('2018-02-01', '2018-02-05', output='DataFrame')
tests_count = go.get_tests('2018-02-01', '2018-02-05', output='Count')
self.assertTrue(tests_count <= len(tests) <= 3*tests_count)
self.assertIsInstance(tests_df, pd.DataFrame)
def test_avalanche_obs(self):
avalanche_obs = go.get_avalanche('2015-03-01', '2015-03-10')
avalanche_obs_df = go.get_avalanche('2015-03-01', '2015-03-10', output='DataFrame')
avalanche_obs_count = go.get_avalanche('2015-03-01', '2015-03-10', output='Count')
self.assertEqual(len(avalanche_obs), avalanche_obs_count)
self.assertIsInstance(avalanche_obs_df, pd.DataFrame)
def test_avalanche_activity(self):
from_date, to_date = '2015-03-01', '2015-03-10'
avalanche_activity = go.get_avalanche_activity(from_date, to_date)
avalanche_activity_df = go.get_avalanche_activity(from_date, to_date, output='DataFrame')
avalanche_activity_count = go.get_avalanche_activity(from_date, to_date, output='Count')
self.assertIsInstance(avalanche_activity[0], go.AvalancheActivityObs)
self.assertIsInstance(avalanche_activity_df, pd.DataFrame)
avalanche_activity_obs = go.get_data(from_date, to_date, registration_types=27)
self.assertEqual(avalanche_activity_count, len(avalanche_activity_obs))
def test_avalanche_activity_2(self):
from_date, to_date = '2017-03-01', '2017-03-10'
avalanche_activity_2 = go.get_avalanche_activity_2(from_date, to_date)
avalanche_activity_2_df = go.get_avalanche_activity_2(from_date, to_date, output='DataFrame')
avalanche_activity_2_count = go.get_avalanche_activity_2(from_date, to_date, output='Count')
self.assertIsInstance(avalanche_activity_2[0], go.AvalancheActivityObs2)
self.assertIsInstance(avalanche_activity_2_df, pd.DataFrame)
avalanche_activity_2_obs = go.get_data(from_date, to_date, registration_types=33)
self.assertEqual(avalanche_activity_2_count, len(avalanche_activity_2_obs))
def test_get_avalanche_evaluation(self):
avalanche_evaluations = go.get_avalanche_evaluation('2012-03-01', '2012-03-10')
avalanche_evaluations_df = go.get_avalanche_evaluation('2012-03-01', '2012-03-10', output='DataFrame')
avalanche_evaluations_count = go.get_avalanche_evaluation('2012-03-01', '2012-03-10', output='Count')
self.assertEqual(len(avalanche_evaluations), avalanche_evaluations_count)
self.assertIsInstance(avalanche_evaluations_df, pd.DataFrame)
def test_get_avalanche_evaluation_2(self):
avalanche_evaluations_2 = go.get_avalanche_evaluation_2('2013-03-01', '2013-03-10')
avalanche_evaluations_2_df = go.get_avalanche_evaluation_2('2013-03-01', '2013-03-10', output='DataFrame')
avalanche_evaluations_2_count = go.get_avalanche_evaluation_2('2013-03-01', '2013-03-10', output='Count')
self.assertEqual(len(avalanche_evaluations_2), avalanche_evaluations_2_count)
self.assertIsInstance(avalanche_evaluations_2_df, pd.DataFrame)
def test_get_avalanche_evaluation_3(self):
avalanche_evaluations_3 = go.get_avalanche_evaluation_3('2017-03-01', '2017-03-10')
avalanche_evaluations_3_df = go.get_avalanche_evaluation_3('2017-03-01', '2017-03-10', output='DataFrame')
avalanche_evaluations_3_count = go.get_avalanche_evaluation_3('2017-03-01', '2017-03-10', output='Count')
self.assertEqual(len(avalanche_evaluations_3), avalanche_evaluations_3_count)
self.assertIsInstance(avalanche_evaluations_3_df, pd.DataFrame)
def test_get_avalanche_problem_2(self):
problems = go.get_avalanche_problem_2('2017-03-01', '2017-03-10')
problems_df = go.get_avalanche_problem_2('2017-03-01', '2017-03-10', output='DataFrame')
problems_count = go.get_avalanche_problem_2('2017-03-01', '2017-03-10', output='Count')
self.assertTrue(problems_count <= len(problems) <= 3*problems_count)
self.assertIsInstance(problems_df, pd.DataFrame)
def test_get_snow_profile(self):
snow_profiles = go.get_snow_profile('2018-12-13', '2018-12-16')
snow_profiles_df = go.get_snow_profile('2018-12-13', '2018-12-16', output='DataFrame')
snow_profiles_count = go.get_snow_profile('2018-12-13', '2018-12-16', output='Count')
self.assertEqual(len(snow_profiles), snow_profiles_count)
self.assertIsInstance(snow_profiles_df, pd.DataFrame)
def test_get_ice_thickness(self):
ice_thicks = go.get_ice_thickness('2018-01-20', '2018-02-10')
ice_thicks_df = go.get_ice_thickness('2018-01-20', '2018-02-10', output='DataFrame')
ice_thicks_count = go.get_ice_thickness('2018-01-20', '2018-02-10', output='Count')
self.assertEqual(len(ice_thicks), ice_thicks_count)
self.assertIsInstance(ice_thicks_df, pd.DataFrame)
def test_get_ice_cover(self):
ice_cover = go.get_ice_cover('2012-03-01', '2012-03-10')
ice_cover_df = go.get_ice_cover('2012-03-01', '2012-03-10', output='DataFrame')
ice_cover_count = go.get_ice_cover('2012-03-01', '2012-03-10', output='Count')
self.assertEqual(len(ice_cover), ice_cover_count)
self.assertIsInstance(ice_cover_df, pd.DataFrame)
def test_get_water_level(self):
water_levels = go.get_water_level('2015-01-01', '2016-01-01')
water_levels_df = go.get_water_level('2015-01-01', '2016-01-01', output='DataFrame')
water_levels_count = go.get_water_level('2015-01-01', '2016-01-01', output='Count')
self.assertEqual(len(water_levels), water_levels_count)
self.assertIsInstance(water_levels_df, pd.DataFrame)
def test_get_water_level_2(self):
new_water_levels = go.get_water_level_2('2017-06-01', '2018-02-01')
new_water_levels_df = go.get_water_level_2('2017-06-01', '2018-02-01', output='DataFrame')
new_water_levels_count = go.get_water_level_2('2017-06-01', '2018-02-01', output='Count')
self.assertEqual(len(new_water_levels), new_water_levels_count)
self.assertIsInstance(new_water_levels_df, pd.DataFrame)
def test_get_land_slide_obs(self):
land_slides = go.get_land_slide_obs('2018-01-01', '2018-02-01')
land_slides_df = go.get_land_slide_obs('2018-01-01', '2018-02-01', output='DataFrame')
land_slides_count = go.get_land_slide_obs('2018-01-01', '2018-02-01', output='Count')
self.assertEqual(len(land_slides), land_slides_count)
self.assertIsInstance(land_slides_df, pd.DataFrame)
if __name__ == '__main__':
ut.main()
| 58.141104 | 114 | 0.718054 |
acf773af7e2a8fff45564f1e9ab8fb6b8d772e93 | 24,661 | py | Python | official/nlp/modeling/networks/bert_encoder_test.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2020-09-14T10:46:07.000Z | 2020-09-14T10:46:07.000Z | official/nlp/modeling/networks/bert_encoder_test.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 8 | 2020-05-19T00:52:30.000Z | 2020-06-04T23:57:20.000Z | official/nlp/modeling/networks/bert_encoder_test.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 2 | 2021-10-07T04:47:04.000Z | 2021-12-18T04:18:19.000Z | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.networks import bert_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class BertEncoderTest(keras_parameterized.TestCase):
def tearDown(self):
super(BertEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
("encoder_v2", bert_encoder.BertEncoderV2),
("encoder_v1", bert_encoder.BertEncoder),
)
def test_dict_outputs_network_creation(self, encoder_cls):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
if encoder_cls is bert_encoder.BertEncoderV2:
kwargs = {}
else:
kwargs = dict(dict_outputs=True)
test_network = encoder_cls(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
@parameterized.named_parameters(
("encoder_v2", bert_encoder.BertEncoderV2),
("encoder_v1", bert_encoder.BertEncoder),
)
def test_dict_outputs_all_encoder_outputs_network_creation(self, encoder_cls):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
@parameterized.named_parameters(
("encoder_v2", bert_encoder.BertEncoderV2),
("encoder_v1", bert_encoder.BertEncoder),
)
def test_dict_outputs_network_creation_with_float16_dtype(self, encoder_cls):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence_encoder_v1", bert_encoder.BertEncoder, None, 21),
("output_range_encoder_v1", bert_encoder.BertEncoder, 1, 1),
("all_sequence_encoder_v2", bert_encoder.BertEncoderV2, None, 21),
("output_range_encoder_v2", bert_encoder.BertEncoderV2, 1, 1),
)
def test_dict_outputs_network_invocation(
self, encoder_cls, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
dict_outputs=True)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16,
dict_outputs=True)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_embeddings_as_inputs(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoderV2(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
test_network.build(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
embeddings = test_network.get_embedding_layer()(word_ids)
# Calls with the embeddings.
dict_outputs = test_network(
dict(
input_word_embeddings=embeddings,
input_mask=mask,
input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
inner_dim=1223,
inner_activation="relu",
output_dropout=0.05,
attention_dropout=0.22,
initializer="glorot_uniform",
output_range=-1,
embedding_width=16,
embedding_layer=None,
norm_first=False)
network = bert_encoder.BertEncoder(**kwargs)
# Validate that the config can be forced to JSON.
_ = network.to_json()
# Tests model saving/loading.
model_path = self.get_temp_dir() + "/model"
network.save(model_path)
_ = tf.keras.models.load_model(model_path)
def test_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
test_network_dict = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
inputs = dict(
input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)
_ = test_network_dict(inputs)
test_network_dict.set_weights(test_network.get_weights())
batch_size = 2
vocab_size = 100
num_types = 2
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
def test_all_encoder_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
return_all_encoder_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
all_encoder_outputs, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 21),
("output_range", 1, 1),
)
def test_network_invocation(self, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = bert_encoder.BertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
data, pooled = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = bert_encoder.BertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16)
data, pooled = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
class BertEncoderV2CompatibilityTest(tf.test.TestCase):
def tearDown(self):
super().tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
def test_weights_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=None)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
# Create small BertEncoders for testing.
new_net = bert_encoder.BertEncoderV2(**kwargs)
_ = new_net(data)
kwargs["dict_outputs"] = True
old_net = bert_encoder.BertEncoder(**kwargs)
_ = old_net(data)
new_net._embedding_layer.set_weights(old_net._embedding_layer.get_weights())
new_net._position_embedding_layer.set_weights(
old_net._position_embedding_layer.get_weights())
new_net._type_embedding_layer.set_weights(
old_net._type_embedding_layer.get_weights())
new_net._embedding_norm_layer.set_weights(
old_net._embedding_norm_layer.get_weights())
# embedding_dropout has no weights.
if hasattr(old_net, "_embedding_projection"):
new_net._embedding_projection.set_weights(
old_net._embedding_projection.get_weights())
# attention_mask_layer has no weights.
new_net._pooler_layer.set_weights(old_net._pooler_layer.get_weights())
for otl, ntl in zip(old_net._transformer_layers,
new_net._transformer_layers):
ntl.set_weights(otl.get_weights())
def check_output_close(data, net1, net2):
output1 = net1(data)
output2 = net2(data)
for key in output1:
self.assertAllClose(output1[key], output2[key])
check_output_close(data, old_net, new_net)
def test_checkpoint_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=None)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
kwargs["dict_outputs"] = True
old_net = bert_encoder.BertEncoder(**kwargs)
old_net_outputs = old_net(data)
ckpt = tf.train.Checkpoint(net=old_net)
path = ckpt.save(self.get_temp_dir())
del kwargs["dict_outputs"]
new_net = bert_encoder.BertEncoderV2(**kwargs)
new_ckpt = tf.train.Checkpoint(net=new_net)
status = new_ckpt.restore(path)
status.assert_existing_objects_matched()
# assert_consumed will fail because the old model has redundant nodes.
new_net_outputs = new_net(data)
self.assertAllEqual(old_net_outputs.keys(), new_net_outputs.keys())
for key in old_net_outputs:
self.assertAllClose(old_net_outputs[key], new_net_outputs[key])
def test_keras_model_checkpoint_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=None)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
kwargs["dict_outputs"] = True
old_net = bert_encoder.BertEncoder(**kwargs)
inputs = old_net.inputs
outputs = old_net(inputs)
old_model = tf.keras.Model(inputs=inputs, outputs=outputs)
old_model_outputs = old_model(data)
ckpt = tf.train.Checkpoint(net=old_model)
path = ckpt.save(self.get_temp_dir())
del kwargs["dict_outputs"]
new_net = bert_encoder.BertEncoderV2(**kwargs)
inputs = new_net.inputs
outputs = new_net(inputs)
new_model = tf.keras.Model(inputs=inputs, outputs=outputs)
new_ckpt = tf.train.Checkpoint(net=new_model)
status = new_ckpt.restore(path)
status.assert_existing_objects_matched()
new_model_outputs = new_model(data)
self.assertAllEqual(old_model_outputs.keys(), new_model_outputs.keys())
for key in old_model_outputs:
self.assertAllClose(old_model_outputs[key], new_model_outputs[key])
if __name__ == "__main__":
tf.test.main()
| 39.394569 | 101 | 0.717854 |
acf7744502d4ab164d5ff38b2017a1cc21199b71 | 4,315 | py | Python | s3stash/s3tools.py | barbarahui/nuxeo-calisphere | 829e74df108d4d873299daa46139c52b2ee31578 | [
"BSD-3-Clause"
] | null | null | null | s3stash/s3tools.py | barbarahui/nuxeo-calisphere | 829e74df108d4d873299daa46139c52b2ee31578 | [
"BSD-3-Clause"
] | 1 | 2016-11-29T22:26:15.000Z | 2016-11-29T23:13:57.000Z | s3stash/s3tools.py | barbarahui/nuxeo-calisphere | 829e74df108d4d873299daa46139c52b2ee31578 | [
"BSD-3-Clause"
] | 1 | 2016-08-24T20:59:33.000Z | 2016-08-24T20:59:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import boto
import logging
from boto.s3.connection import OrdinaryCallingFormat
import urlparse
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import json
S3_URL_FORMAT = "s3://{0}/{1}"
REGISTRY_API_BASE = 'https://registry.cdlib.org/api/v1/'
def s3stash(filepath, bucket, key, region, mimetype, replace=False):
"""
Stash file in S3 bucket.
"""
logger = logging.getLogger(__name__)
logger.info("filepath: {}".format(filepath))
logger.info("bucket: {}".format(bucket))
logger.info("key: {}".format(key))
logger.info("region: {}".format(region))
logger.info("mimetype: {}".format(mimetype))
report = {}
bucketpath = bucket.strip("/")
bucketbase = bucket.split("/")[0]
s3_url = S3_URL_FORMAT.format(bucketpath, key)
parts = urlparse.urlsplit(s3_url)
logger.info("bucketpath: {}".format(bucketpath))
logger.info("bucketbase: {}".format(bucketbase))
logger.info("s3_url: {}".format(s3_url))
# FIXME ugh this is such a hack. not sure what is going on here.
if region == 'us-east-1':
conn = boto.connect_s3(calling_format=OrdinaryCallingFormat())
else:
conn = boto.s3.connect_to_region(region)
try:
bucket = conn.get_bucket(bucketbase)
except boto.exception.S3ResponseError:
bucket = conn.create_bucket(bucketbase, location=region)
logger.info("Created S3 bucket {}".format(bucketbase))
if not (bucket.get_key(parts.path)):
key = bucket.new_key(parts.path)
key.set_metadata("Content-Type", mimetype)
key.set_contents_from_filename(filepath)
msg = "created {0}".format(s3_url)
action = 'created'
logger.info(msg)
elif replace:
key = bucket.get_key(parts.path)
key.set_metadata("Content-Type", mimetype)
key.set_contents_from_filename(filepath)
msg = "re-uploaded {}".format(s3_url)
action = 'replaced'
logger.info(msg)
else:
msg = "key already existed; not re-uploading {0}".format(s3_url)
action = 'skipped'
logger.info(msg)
report['s3_url'] = s3_url
report['msg'] = msg
report['action'] = action
report['stashed'] = True
return True, report
def is_s3_stashed(bucket, key, region):
""" Check for existence of key on S3.
"""
logger = logging.getLogger(__name__)
key_exists = False
bucketpath = bucket.strip("/")
bucketbase = bucket.split("/")[0]
s3_url = S3_URL_FORMAT.format(bucketpath, key)
parts = urlparse.urlsplit(s3_url)
# FIXME ugh this is such a hack. not sure what is going on here.
if region == 'us-east-1':
conn = boto.connect_s3(calling_format=OrdinaryCallingFormat())
else:
conn = boto.s3.connect_to_region(region)
try:
bucket = conn.get_bucket(bucketbase)
except boto.exception.S3ResponseError:
logger.info("Bucket does not exist: {}".format(bucketbase))
return False
if bucket.get_key(parts.path):
return True
else:
return False
def get_nuxeo_path(registry_id):
''' given ucldc registry collection ID, get Nuxeo path for collection '''
url = "{}collection/{}/?format=json".format(REGISTRY_API_BASE, registry_id)
retry_strategy = Retry(
total=3,
status_forcelist=[413, 429, 500, 502, 503, 504],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
# timeouts based on those used by nuxeo-python-client
# see: https://github.com/nuxeo/nuxeo-python-client/blob/master/nuxeo/constants.py
# but tweaked to be slightly larger than a multiple of 3, which is recommended
# in the requests documentation.
# see: https://docs.python-requests.org/en/master/user/advanced/#timeouts
timeout_connect = 12.05
timeout_read = (60 * 10) + 0.05
res = http.get(url, timeout=(timeout_connect, timeout_read))
res.raise_for_status()
md = json.loads(res.content)
nuxeo_path = md['harvest_extra_data']
if nuxeo_path:
return nuxeo_path
else:
return None
| 31.49635 | 86 | 0.66095 |
acf7745ebeefaaf5bcd1cffdbca29172e0c0c360 | 3,983 | py | Python | tests/test_crle.py | advmach/detools | 2b7b98bb8e5eb1232d15cb1731fe72f8954a2d09 | [
"BSD-2-Clause"
] | 119 | 2019-02-23T07:48:11.000Z | 2022-03-23T20:45:51.000Z | tests/test_crle.py | advmach/detools | 2b7b98bb8e5eb1232d15cb1731fe72f8954a2d09 | [
"BSD-2-Clause"
] | 6 | 2020-01-27T11:15:32.000Z | 2021-09-15T17:58:34.000Z | tests/test_crle.py | advmach/detools | 2b7b98bb8e5eb1232d15cb1731fe72f8954a2d09 | [
"BSD-2-Clause"
] | 10 | 2019-04-23T17:28:48.000Z | 2022-02-14T05:35:31.000Z | import unittest
import detools
from detools.create import CrleCompressor
from detools.apply import CrleDecompressor
class DetoolsCrleTest(unittest.TestCase):
def test_compress(self):
datas = [
( [b''], b'\x00\x00'),
( [b'A'], b'\x00\x01A'),
( [5 * b'A'], b'\x00\x05AAAAA'),
( [6 * b'A'], b'\x01\x06A'),
( [b'ABBCC', b'CBBA'], b'\x00\x09ABBCCCBBA'),
( [126 * b'A', b'', b'A'], b'\x01\x7fA'),
( [128 * b'A'], b'\x01\x80\x01A'),
( [1000 * b'A'], b'\x01\xe8\x07A'),
( [69999 * b'A', b'A'], b'\x01\xf0\xa2\x04A'),
([10 * b'A', b'BC', 8 * b'A'], b'\x01\x0aA\x00\x02BC\x01\x08A'),
( [10 * b'A' + 8 * b'B'], b'\x01\x0aA\x01\x08B')
]
for chunks, compressed in datas:
compressor = CrleCompressor()
data = b''
for chunk in chunks:
data += compressor.compress(chunk)
data += compressor.flush()
self.assertEqual(data, compressed)
def test_decompress_no_data(self):
compressed = b'\x00\x00'
decompressor = CrleDecompressor(len(compressed))
self.assertEqual(decompressor.needs_input, True)
self.assertEqual(decompressor.decompress(compressed, 1), b'')
self.assertEqual(decompressor.eof, True)
def test_decompress(self):
datas = [
( [b'\x00\x01A'], b'A'),
( [b'\x00\x07AAAAAAA'], 7 * b'A'),
( [b'\x01\x08A'], 8 * b'A'),
( [b'\x00\x09ABBCCCBBA'], b'ABBCCCBBA'),
( [b'\x01\x7f', b'A'], 127 * b'A'),
( [b'\x01\x80\x01A'], 128 * b'A'),
( [b'\x01\xe8\x07A'], 1000 * b'A'),
( [b'\x01\xf0', b'\xa2\x04A'], 70000 * b'A'),
([b'\x01\x0aA\x00\x02BC\x01\x08A'], 10 * b'A' + b'BC' + 8 * b'A'),
( [b'\x01\x0aA\x01\x08B'], 10 * b'A' + 8 * b'B')
]
for chunks, decompressed in datas:
decompressor = CrleDecompressor(sum([len(c) for c in chunks]))
for chunk in chunks:
self.assertEqual(decompressor.needs_input, True)
self.assertEqual(decompressor.eof, False)
decompressor.decompress(chunk, 0)
self.assertEqual(decompressor.needs_input, False)
data = b''
while not decompressor.eof:
data += decompressor.decompress(b'', 1)
self.assertEqual(data, decompressed)
def test_decompress_bad_kind(self):
decompressor = CrleDecompressor(3)
with self.assertRaises(detools.Error) as cm:
decompressor.decompress(b'\x02\x01A', 1)
self.assertEqual(
str(cm.exception),
'Expected kind scattered(0) or repeated(1), but got 2.')
def test_decompress_at_eof(self):
compressed = b'\x00\x01A'
decompressor = CrleDecompressor(len(compressed))
self.assertEqual(decompressor.decompress(compressed, 1), b'A')
self.assertEqual(decompressor.eof, True)
with self.assertRaises(detools.Error) as cm:
decompressor.decompress(b'6', 1)
self.assertEqual(str(cm.exception), 'Already at end of stream.')
with self.assertRaises(detools.Error) as cm:
decompressor.decompress(b'', 1)
self.assertEqual(str(cm.exception), 'Already at end of stream.')
def test_decompress_ignore_extra_data(self):
compressed = b'\x00\x01A'
decompressor = CrleDecompressor(len(compressed))
self.assertEqual(decompressor.decompress(compressed + b'B', 1), b'A')
self.assertEqual(decompressor.eof, True)
if __name__ == '__main__':
unittest.main()
| 35.247788 | 78 | 0.517951 |
acf775134c07f030fb37afc66d009d5f43eb228e | 10,163 | py | Python | salt/states/group.py | guoxiaod/salt | 2cd6c03b40932be137e6e8a672967b59025a2d34 | [
"Apache-2.0"
] | null | null | null | salt/states/group.py | guoxiaod/salt | 2cd6c03b40932be137e6e8a672967b59025a2d34 | [
"Apache-2.0"
] | 1 | 2019-08-18T07:03:30.000Z | 2019-08-18T07:03:30.000Z | salt/states/group.py | guoxiaod/salt | 2cd6c03b40932be137e6e8a672967b59025a2d34 | [
"Apache-2.0"
] | 2 | 2020-11-04T06:24:32.000Z | 2020-11-06T11:00:57.000Z | # -*- coding: utf-8 -*-
'''
Management of user groups
=========================
The group module is used to create and manage group settings, groups can be
either present or absent. User/Group names can be passed to the ``adduser``,
``deluser``, and ``members`` parameters. ``adduser`` and ``deluser`` can be used
together but not with ``members``.
In Windows, if no domain is specified in the user or group name (i.e.
``DOMAIN\\username``) the module will assume a local user or group.
.. code-block:: yaml
cheese:
group.present:
- gid: 7648
- system: True
- addusers:
- user1
- users2
- delusers:
- foo
cheese:
group.present:
- gid: 7648
- system: True
- members:
- foo
- bar
- user1
- user2
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import sys
# Import 3rd-party libs
from salt.ext import six
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
def _changes(name,
gid=None,
addusers=None,
delusers=None,
members=None):
'''
Return a dict of the changes required for a group if the group is present,
otherwise return False.
'''
lgrp = __salt__['group.info'](name)
if not lgrp:
return False
# User and Domain names are not case sensitive in Windows. Let's make them
# all lower case so we can compare properly
if salt.utils.platform.is_windows():
if lgrp['members']:
lgrp['members'] = [user.lower() for user in lgrp['members']]
if members:
members = [salt.utils.win_functions.get_sam_name(user).lower() for user in members]
if addusers:
addusers = [salt.utils.win_functions.get_sam_name(user).lower() for user in addusers]
if delusers:
delusers = [salt.utils.win_functions.get_sam_name(user).lower() for user in delusers]
change = {}
ret = {}
if gid:
try:
gid = int(gid)
if lgrp['gid'] != gid:
change['gid'] = gid
except (TypeError, ValueError):
ret['result'] = False
ret['comment'] = 'Invalid gid'
return ret
if members:
# -- if new member list if different than the current
if set(lgrp['members']).symmetric_difference(members):
change['members'] = members
if addusers:
users_2add = [user for user in addusers if user not in lgrp['members']]
if users_2add:
change['addusers'] = users_2add
if delusers:
users_2del = [user for user in delusers if user in lgrp['members']]
if users_2del:
change['delusers'] = users_2del
return change
def present(name,
gid=None,
system=False,
addusers=None,
delusers=None,
members=None):
r'''
Ensure that a group is present
Args:
name (str):
The name of the group to manage
gid (str):
The group id to assign to the named group; if left empty, then the
next available group id will be assigned. Ignored on Windows
system (bool):
Whether or not the named group is a system group. This is essentially
the '-r' option of 'groupadd'. Ignored on Windows
addusers (list):
List of additional users to be added as a group members. Cannot
conflict with names in delusers. Cannot be used in conjunction with
members.
delusers (list):
Ensure these user are removed from the group membership. Cannot
conflict with names in addusers. Cannot be used in conjunction with
members.
members (list):
Replace existing group members with a list of new members. Cannot be
used in conjunction with addusers or delusers.
Example:
.. code-block:: yaml
# Adds DOMAIN\db_admins and Administrators to the local db_admin group
# Removes Users
db_admin:
group.present:
- addusers:
- DOMAIN\db_admins
- Administrators
- delusers:
- Users
# Ensures only DOMAIN\domain_admins and the local Administrator are
# members of the local Administrators group. All other users are
# removed
Administrators:
group.present:
- members:
- DOMAIN\domain_admins
- Administrator
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Group {0} is present and up to date'.format(name)}
if members and (addusers or delusers):
ret['result'] = None
ret['comment'] = (
'Error: Conflicting options "members" with "addusers" and/or'
' "delusers" can not be used together. ')
return ret
if addusers and delusers:
# -- if trying to add and delete the same user(s) at the same time.
if not set(addusers).isdisjoint(set(delusers)):
ret['result'] = None
ret['comment'] = (
'Error. Same user(s) can not be added and deleted'
' simultaneously')
return ret
changes = _changes(name,
gid,
addusers,
delusers,
members)
if changes:
ret['comment'] = (
'The following group attributes are set to be changed:\n')
for key, val in six.iteritems(changes):
ret['comment'] += '{0}: {1}\n'.format(key, val)
if __opts__['test']:
ret['result'] = None
return ret
for key, val in six.iteritems(changes):
if key == 'gid':
__salt__['group.chgid'](name, gid)
continue
if key == 'addusers':
for user in val:
__salt__['group.adduser'](name, user)
continue
if key == 'delusers':
for user in val:
__salt__['group.deluser'](name, user)
continue
if key == 'members':
__salt__['group.members'](name, ','.join(members))
continue
# Clear cached group data
sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('group.getent', None)
changes = _changes(name,
gid,
addusers,
delusers,
members)
if changes:
ret['result'] = False
ret['comment'] += 'Some changes could not be applied'
ret['changes'] = {'Failed': changes}
else:
ret['changes'] = {'Final': 'All changes applied successfully'}
if changes is False:
# The group is not present, make it!
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Group {0} set to be added'.format(name)
return ret
grps = __salt__['group.getent']()
# Test if gid is free
if gid is not None:
gid_group = None
for lgrp in grps:
if lgrp['gid'] == gid:
gid_group = lgrp['name']
break
if gid_group is not None:
ret['result'] = False
ret['comment'] = (
'Group {0} is not present but gid {1} is already taken by'
' group {2}'.format(name, gid, gid_group))
return ret
# Group is not present, make it.
if __salt__['group.add'](name, gid=gid, system=system):
# if members to be added
grp_members = None
if members:
grp_members = ','.join(members)
if addusers:
grp_members = ','.join(addusers)
if grp_members:
__salt__['group.members'](name, grp_members)
# Clear cached group data
sys.modules[__salt__['test.ping'].__module__].__context__.pop(
'group.getent', None)
ret['comment'] = 'New group {0} created'.format(name)
ret['changes'] = __salt__['group.info'](name)
changes = _changes(name,
gid,
addusers,
delusers,
members)
if changes:
ret['result'] = False
ret['comment'] = (
'Group {0} has been created but, some changes could not'
' be applied'.format(name))
ret['changes'] = {'Failed': changes}
else:
ret['result'] = False
ret['comment'] = 'Failed to create new group {0}'.format(name)
return ret
def absent(name):
'''
Ensure that the named group is absent
Args:
name (str):
The name of the group to remove
Example:
.. code-block:: yaml
# Removes the local group `db_admin`
db_admin:
group.absent
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
grp_info = __salt__['group.info'](name)
if grp_info:
# Group already exists. Remove the group.
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Group {0} is set for removal'.format(name)
return ret
ret['result'] = __salt__['group.delete'](name)
if ret['result']:
ret['changes'] = {name: ''}
ret['comment'] = 'Removed group {0}'.format(name)
return ret
else:
ret['comment'] = 'Failed to remove group {0}'.format(name)
return ret
else:
ret['comment'] = 'Group not present'
return ret
| 31.464396 | 97 | 0.520614 |
acf77639a0b9439800e7388d54aa9d05e27ace8a | 8,646 | py | Python | espresso/DLD_v3.py | shenw33/ML_DLD | e83b5237a6f8dce6f9b347258f04b59345c59678 | [
"BSD-3-Clause"
] | null | null | null | espresso/DLD_v3.py | shenw33/ML_DLD | e83b5237a6f8dce6f9b347258f04b59345c59678 | [
"BSD-3-Clause"
] | null | null | null | espresso/DLD_v3.py | shenw33/ML_DLD | e83b5237a6f8dce6f9b347258f04b59345c59678 | [
"BSD-3-Clause"
] | null | null | null | import espressomd
import espressomd.lb
from espressomd import lbboundaries
from espressomd import shapes
from espressomd import interactions
import object_in_fluid as oif
import numpy as np
import os, sys
import random
def create_obstacles():
# bottom of the channel
tmp_shape = shapes.Rhomboid(corner=[0.0, 0.0, 0.0], a=[boxX, 0.0, 0.0], b=[0.0, boxY, 0.0], c=[0.0, 0.0, 1.0],
direction=1)
boundaries.append(tmp_shape)
oif.output_vtk_rhomboid(rhom_shape=tmp_shape, out_file=vtk_directory + "/wallBottom.vtk")
# top of the channel
tmp_shape = shapes.Rhomboid(corner=[0.0, 0.0, boxZ - 1], a=[boxX, 0.0, 0.0], b=[0.0, boxY, 0.0], c=[0.0, 0.0, 1.0],
direction=1)
boundaries.append(tmp_shape)
oif.output_vtk_rhomboid(rhom_shape=tmp_shape, out_file=vtk_directory + "/wallTop.vtk")
## # front wall of the channel
## tmp_shape = shapes.Rhomboid(corner=[0.0, 0.0, 0.0], a=[boxX, 4.0, 0.0], b=[0.0, 1.0, 0.0], c=[0.0, 0.0, boxZ],
## direction=1)
## boundaries.append(tmp_shape)
## oif.output_vtk_rhomboid(rhom_shape=tmp_shape, out_file=vtk_directory + "/wallFront.vtk")
## # back wall of the channel
## tmp_shape = shapes.Rhomboid(corner=[0.0, boxY -4 - 1.0, 0.0], a=[boxX, 4.0, 0.0], b=[0.0, 1.0, 0.0],
## c=[0.0, 0.0, boxZ], direction=1)
## boundaries.append(tmp_shape)
## oif.output_vtk_rhomboid(rhom_shape=tmp_shape, out_file=vtk_directory + "/wallBack.vtk")
##
# obstacle - cylinder A
centerA = [cushion, 0.0, boxZ/2]
piller_centers.append(centerA)
tmp_shape = shapes.Cylinder(center=centerA, axis=[0.0, 0.0, 1.0], length=boxZ, radius=piller_radius, direction=1)
boundaries.append(tmp_shape)
oif.output_vtk_cylinder(cyl_shape=tmp_shape, n=20, out_file=vtk_directory + "/cylinderA.vtk")
# obstacle - cylinder B
centerB = [cushion, boxY-row_shift, boxZ/2]
piller_centers.append(centerB)
tmp_shape = shapes.Cylinder(center=centerB, axis=[0.0, 0.0, 1.0], length=boxZ, radius=piller_radius, direction=1)
boundaries.append(tmp_shape)
oif.output_vtk_cylinder(cyl_shape=tmp_shape, n=20, out_file=vtk_directory + "/cylinderB.vtk")
# obstacle - cylinder C
centerC = [boxX-cushion, row_shift, boxZ/2]
piller_centers.append(centerC)
tmp_shape = shapes.Cylinder(center=centerC, axis=[0.0, 0.0, 1.0], length=boxZ, radius=piller_radius, direction=1)
boundaries.append(tmp_shape)
oif.output_vtk_cylinder(cyl_shape=tmp_shape, n=20, out_file=vtk_directory + "/cylinderC.vtk")
# obstacle - cylinder D
centerD = [boxX-cushion, boxY-round_up_error, boxZ/2]
piller_centers.append(centerD)
tmp_shape = shapes.Cylinder(center=centerD, axis=[0.0, 0.0, 1.0], length=boxZ, radius=piller_radius, direction=1)
boundaries.append(tmp_shape)
oif.output_vtk_cylinder(cyl_shape=tmp_shape, n=20, out_file=vtk_directory + "/cylinderD.vtk")
# input parameters
sim_no = 'new2' # saving simulation to different folder
piller_dia = 15
piller_distance = 25 #NOT lambda; this is GAP
row_shift = 4.1675 #row shift, del_lambda= epsilon*lambda=lambda/np
#here: epsilon = row shift fraction
# np = periodicity
particle_radius = 3.81
piller_hight = 15
particle_type = 'rbc' #rbc/cell
time_step =0.01
particle_y_position = 28.6 #must be a float; must be (piller_dia/2+particle_radius)<particle_y_position> (piller_distance+piller_dia/2-particle_radius)
rotation = [0.0,0.0,0.0] #allign the particle initial rotation. Only matters for rbc (must be a float)
maxCycle = 500 #simulation length
###########################################################################################################################
#
# derived parameters
# Don't need to change anything after this
#
############################################################################################################################
if particle_y_position > (piller_dia/2+particle_radius) and particle_y_position < (piller_distance+piller_dia/2-particle_radius):
print("Initial particle positioning is correct with Y value {}".format(particle_y_position))
else:
print("Initial particle positioning is incorrect with Y value {}".format(particle_y_position))
print( "Enter particle_y_position between {} and {}".format(piller_dia/2+particle_radius,piller_distance+piller_dia/2-particle_radius))
exit()
piller_radius = piller_dia/2
cushion = (piller_distance + piller_dia)*0.25
boxX = piller_distance + piller_dia + cushion*2 #60.0
boxY = piller_distance + piller_dia + np.ceil(row_shift) #44.0
boxZ = piller_hight #15.0
round_up_error = np.ceil(row_shift) - row_shift
directory = "output/sim"+str(sim_no)
os.makedirs(directory)
vtk_directory = directory+"/vtk"
os.makedirs(vtk_directory)
# initialization
system = espressomd.System(box_l=[boxX, boxY, boxZ])
system.cell_system.skin = 0.2
system.time_step = time_step
piller_centers = list()
boundaries = []
create_obstacles()
# creating the template for particle
if particle_type == 'cell':
nodes_file="input/cell_v500s500.vert.dat"
triangles_file="input/cell_v500s500.face.dat"
else:
nodes_file="input/rbc_v2000s2000.vert.dat"
triangles_file="input/rbc_v2000s2000.face.dat"
particle_template = oif.OifCellType(nodes_file=nodes_file, triangles_file=triangles_file,
check_orientation=False, system=system, ks=0.99, kb=0.99, kal=0.99,
kag=0.9, kv=0.9, normal=True, resize=[particle_radius, particle_radius, particle_radius])
#Warmup integration loop
def wormup_loop(x,y):
"""take two parameters 1sr one is number of loop and the second on is number of
steps to run the intigration"""
warmup=x
system.cell_system.skin = 0.2
print("Warmup start ....")
k = 0
for i in range(0, warmup):
#print("time: {:.5f}".format(i * time_step))
system.integrator.run(y)
if i%10==0:
k = k+1
file_name_velocity= vtk_directory +"/warmup_flow_" + str(k) + ".vtk"
lbf.print_vtk_velocity( file_name_velocity )
print(" Warmup completed")
# creating boundaries
for boundary in boundaries:
system.lbboundaries.add(lbboundaries.LBBoundary(shape=boundary))
system.constraints.add(shape=boundary, particle_type=1000, penetrable=False)
# adding fluid
lb_params = {'agrid': 1, 'dens': 1, 'visc': 1.5, 'tau': system.time_step, 'ext_force_density': [0.008, 0.0, 0.0]}
lbf = espressomd.lb.LBFluidGPU(**lb_params)
system.actors.add(lbf)
system.thermostat.set_lb(LB_fluid=lbf, gamma=1.5)
#Warmup integration loop
wormup_loop(0,500)
# creating the particle
particle = oif.OifCell(cell_type=particle_template, particle_type=1, origin=[cushion,particle_y_position,boxZ/2],
rotate=rotation,particle_mass=0.5)
particle.output_vtk_pos(vtk_directory+"/"+particle_type+"_0.vtk")
# cell-wall interactions
system.non_bonded_inter[1, 1000].soft_sphere.set_params(a=0.0001, n=1.2, cutoff=0.1, offset=0.0)
# main integration loop
integr_steps = 500
continue_simulation =1
vel_file = open(directory + '/velocity.csv', mode='w')
vel_file.write("rbc_center_x,rbc_center_y,rbc_center_z,rbc_abs_vel,rbc_vel_x,rbc_vel_y,rbc_vel_z \n")
vel_file.close
for i in range(1, maxCycle):
system.integrator.run(steps=integr_steps)
print(particle.get_origin_folded())
print(particle.get_n_nodes)
particle_x_pos = particle.get_origin_folded()[0]
if np.ceil(particle.get_origin_folded()[0]) >= boxX-cushion:
particle_x_pos = boxX - particle.get_origin_folded()[0]
particle_y_pos = particle.get_origin_folded()[1] - 4.0
particle_z_pos = particle.get_origin_folded()[2]
particle.set_origin([particle_x_pos,particle_y_pos,particle_z_pos])
print(particle.get_origin_folded())
particle.output_vtk_pos_folded(file_name=vtk_directory + "/"+ particle_type + "_" + str(i) + ".vtk")
file_name_velocity= vtk_directory +"/flow_" + str(i) + ".vtk"
lbf.print_vtk_velocity( file_name_velocity )
rbc_center = np.array(particle.get_origin())
rbc_vel = np.array(particle.get_velocity())
rbc_abs_vel = np.linalg.norm(rbc_vel)
with open(directory + "/velocity.csv", "a") as vel_file:
vel_file.write(str(rbc_center[0])+","+ str(rbc_center[1])+","+ str(rbc_center[2])+","
+str(rbc_abs_vel) +"," + str(rbc_vel[0]) +"," + str(rbc_vel[1]) +"," + str(rbc_vel[2]) + "\n")
print ("time: {}".format(str(i*system.time_step*integr_steps)))
print ("Simulation completed.")
| 40.783019 | 151 | 0.677423 |
acf77718f16e62a1d4b342767888dc57ee484475 | 18,702 | py | Python | users/models.py | lsalta/mapground | d927d283dab6f756574bd88b3251b9e68f000ca7 | [
"MIT"
] | null | null | null | users/models.py | lsalta/mapground | d927d283dab6f756574bd88b3251b9e68f000ca7 | [
"MIT"
] | 3 | 2020-02-11T23:04:56.000Z | 2021-06-10T18:07:53.000Z | users/models.py | lsalta/mapground | d927d283dab6f756574bd88b3251b9e68f000ca7 | [
"MIT"
] | 1 | 2021-08-20T14:49:09.000Z | 2021-08-20T14:49:09.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User, Group
from django.conf import settings
from django.db.models import Count
from layers.models import Capa, Categoria, Metadatos, Escala, AreaTematica
from maps.models import Mapa, ManejadorDeMapas
# signals
from django.db.models.signals import post_save, post_delete, pre_save, pre_delete
from django.dispatch import receiver
PERMISOS_ENUM = (
('read', 'read'),
('write', 'write'),
)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
puede_subir_capas = models.BooleanField(default=True, null=False, blank=False)
class PermisoDeCapa(models.Model):
user = models.ForeignKey(User, null=False,blank=False, verbose_name='Usuario')
capa = models.ForeignKey(Capa, null=False,blank=False)
permiso = models.CharField('Permiso', choices=PERMISOS_ENUM, max_length=10, null=False, blank=False)
class Meta:
unique_together=(('user', 'capa'),)
verbose_name = u'Permiso de Capa'
verbose_name_plural = u'Permisos de Capas'
ordering = ['user__username']
def __unicode__(self):
return '%s - %s - %s'%(unicode(self.user), unicode(self.permiso), unicode(self.capa))
class PermisoDeMapa(models.Model):
user = models.ForeignKey(User, null=False,blank=False, verbose_name='Usuario')
mapa = models.ForeignKey(Mapa, null=False,blank=False)
permiso = models.CharField('Permiso', choices=PERMISOS_ENUM, max_length=10, null=False, blank=False)
class Meta:
unique_together=(('user', 'mapa'),)
verbose_name = u'Permiso de Mapa'
verbose_name_plural = u'Permisos de Mapas'
def __unicode__(self):
return '%s - %s - %s'%(unicode(self.user), unicode(self.permiso), unicode(self.capa))
class PermisoDeCapaPorGrupo(models.Model):
group = models.ForeignKey(Group, null=False,blank=False, verbose_name='Grupo')
capa = models.ForeignKey(Capa, null=False,blank=False)
permiso = models.CharField('Permiso', choices=PERMISOS_ENUM, max_length=10, null=False, blank=False)
class Meta:
unique_together=(('group', 'capa'),)
verbose_name = u'Permiso de Capa por Grupo'
verbose_name_plural = u'Permisos de Capas por Grupos'
ordering = ['group__name']
def __unicode__(self):
return '%s - %s - %s'%(unicode(self.group), unicode(self.permiso), unicode(self.capa))
class ManejadorDePermisos():
@staticmethod
def capas_de_usuario(user, caso_de_uso, qset_capas_inicial=None):
if type(user) in (str,unicode):
try:
user = User.objects.get(username=user)
except:
return None
# decidimos si partimos de todas las capas o un subset inicial
if qset_capas_inicial is None:
q = Capa.objects.all()
else:
q = qset_capas_inicial
# sea quien sea el usuario, si no esta autenticado fuerzo el caso publico
if not user.is_authenticated():
caso_de_uso='public'
# armamos el queryset filtrando segun el caso
if caso_de_uso=='public': # capas publicas
q = q.filter(wxs_publico=True)
elif caso_de_uso=='own': # capas propias
q = q.filter(owner=user)
elif caso_de_uso=='all': # todas las que tiene acceso
if not user.is_superuser: # si es superuser no filtro, sino filtro
q = (q.filter(owner=user)| # propias
q.filter(permisodecapa__user=user)| # algun permiso personal
q.filter(permisodecapaporgrupo__group__in=user.groups.all())| # algun permiso de grupo
q.filter(wxs_publico=True)) # todas las publicas
q = q.distinct()
else:
q = Capa.objects.none()
return q
@staticmethod
def mapas_de_usuario(user, caso_de_uso, qset_mapas_inicial=None):
if type(user) in (str,unicode):
try:
user = User.objects.get(username=user)
except:
return None
# decidimos si partimos de todos los mapas o un subset inicial
if qset_mapas_inicial is None:
q = Mapa.objects.all().filter(tipo_de_mapa='general')
else:
q = qset_mapas_inicial
# sea quien sea el usuario, si no esta autenticado fuerzo el caso publico
if not user.is_authenticated():
caso_de_uso='public'
# armamos el queryset filtrando segun el caso
if caso_de_uso=='public': # mapas publicos
q = q.filter(publico=True)
elif caso_de_uso=='own': # mapas propios
q = q.filter(owner=user)
elif caso_de_uso=='all': # todos los que tiene acceso
if not user.is_superuser: # si es superuser no filtro, sino filtro
q = (q.filter(owner=user)| # propios
q.filter(publico=True)) # todos los publicos
q = q.distinct()
else:
q = Mapa.objects.none()
return q
@staticmethod
def permiso_de_capa(user, capa):
""" Devuelve alguno de estos casos en orden: owner|superuser|(read|write)|None"""
if type(user) in (str,unicode):
try:
user = User.objects.get(username=user)
except:
return None
if type(capa) in (str,unicode):
try:
capa = Capa.objects.get(id_capa=capa)
except:
return None
if capa.owner==user:
return 'owner'
if user.is_superuser:
return 'superuser'
try:
p=PermisoDeCapa.objects.get(user=user, capa=capa)
return p.permiso # si existe, esto devuelve read o write
except: # si no existe, verificamos si hay algun grupo write, y sino, luego algun grupo read
for g in user.groups.all():
if len(PermisoDeCapaPorGrupo.objects.filter(group=g, capa=capa, permiso='write')) > 0:
return 'write'
for g in user.groups.all():
if len(PermisoDeCapaPorGrupo.objects.filter(group=g, capa=capa, permiso='read')) > 0:
return 'read'
if capa.wxs_publico:
return 'read'
return None
@staticmethod
def permiso_de_mapa(user, mapa):
""" Devuelve alguno de estos casos en orden: owner|superuser|read|None"""
if type(user) in (str,unicode):
try:
user = User.objects.get(username=user)
except:
return None
if type(mapa) in (str,unicode):
try:
mapa = Mapa.objects.get(id_mapa=mapa)
except:
return None
if mapa.tipo_de_mapa in ('layer', 'layer_raster_band'):
return ManejadorDePermisos.permiso_de_capa(user, mapa.capas.first())
elif mapa.tipo_de_mapa=='general':
if mapa.owner==user:
return 'owner'
if user.is_superuser:
return 'superuser'
try:
p=PermisoDeMapa.objects.get(user=user, mapa=mapa) # por el momento no implementamos permisos a nivel de mapa, lo simplificamos a mapa publico o privado
return p.permiso # si existe, esto devuelve read o write
except: # si no existe, verificamos si hay algun grupo write, y sino, luego algun grupo read
if mapa.publico:
return 'read'
return None
@classmethod
def anotar_permiso_a_queryset_de_capas(cls, user, qs):
for capa in qs:
capa.permiso=cls.permiso_de_capa(user, capa)
capa.borrable=len(capa.mapa_set.filter(tipo_de_mapa='general'))==0
@classmethod
def anotar_permiso_a_queryset_de_mapas(cls, user, qs):
for mapa in qs:
mapa.permiso=cls.permiso_de_mapa(user, mapa)
@classmethod
def anotar_permiso_a_capa(cls, user, capa):
capa.permiso=cls.permiso_de_capa(user, capa)
capa.borrable=len(capa.mapa_set.filter(tipo_de_mapa='general'))==0
@classmethod
def anotar_permiso_a_mapa(cls, user, mapa):
mapa.permiso=cls.permiso_de_mapa(user, mapa)
@classmethod
def usuarios_con_permiso_a_capa(cls, capa):
# imposible resolver esta consulta con querysets como sigue porque aparecen repetidos en ambos grupos read y write
# tampoco se pueden agregar objetos a un queryset, la solucion es convertir a listas
# return (User.objects.filter(permisodecapa__permiso=permiso, permisodecapa__capa=capa)|
# User.objects.filter(groups__permisodecapaporgrupo__permiso=permiso,groups__permisodecapaporgrupo__capa=capa)).order_by('username')
# armamos listas iniciales con permisos de usuarios especificos
read = list(User.objects.filter(permisodecapa__permiso='read', permisodecapa__capa=capa))
write = list(User.objects.filter(permisodecapa__permiso='write', permisodecapa__capa=capa))
# appendeamos permisos de grupos
for u in User.objects.filter(groups__permisodecapaporgrupo__permiso='write',groups__permisodecapaporgrupo__capa=capa):
if u not in read and u not in write:
write.append(u)
for u in User.objects.filter(groups__permisodecapaporgrupo__permiso='read',groups__permisodecapaporgrupo__capa=capa):
if u not in read and u not in write:
read.append(u)
# sacamos owner si aparece en los grupos
if capa.owner in read:
read.remove(capa.owner)
if capa.owner in write:
write.remove(capa.owner)
# ordenamos
read.sort(key=lambda x:x.username)
write.sort(key=lambda x:x.username)
return {'owner': capa.owner, 'read': read, 'write': write}
@classmethod
def capas_agrupadas_por_categoria(cls):
categorias=Categoria.objects.annotate(total=Count('metadatos')).order_by('nombre')
sin_categoria=len(Metadatos.objects.filter(categorias=None))
return {'categorias': categorias, 'sin_categoria': sin_categoria}
@classmethod
def mapas_agrupados_por_categoria(cls):
categorias=Categoria.objects.annotate(total=Count('mapa')).order_by('nombre')
sin_categoria=len(Mapa.objects.filter(tipo_de_mapa='general',categorias=None))
return {'categorias': categorias, 'sin_categoria': sin_categoria}
@classmethod
def capas_agrupadas_por_escala(cls):
escalas=Escala.objects.annotate(total=Count('metadatos')).order_by('nombre')
sin_escala=len(Metadatos.objects.filter(escala=None))
return {'escalas': escalas, 'sin_escala': sin_escala }
@classmethod
def capas_agrupadas_por_area_tematica(cls):
areas_tematicas=AreaTematica.objects.annotate(total=Count('metadatos')).order_by('nombre')
sin_area_tematica=len(Metadatos.objects.filter(area_tematica=None))
return {'areas_tematicas': areas_tematicas, 'sin_area_tematica': sin_area_tematica }
@classmethod
def mapas_agrupados_por_escala(cls):
escalas=Escala.objects.annotate(total=Count('mapa')).order_by('nombre')
sin_escala=len(Mapa.objects.filter(tipo_de_mapa='general', escala=None))
return {'escalas': escalas, 'sin_escala': sin_escala }
# @classmethod
# def capas_de_usuario_agrupadas_por_categoria(cls, user, caso_de_uso, qset_capas_inicial=None):
# capas=ManejadorDePermisos.capas_de_usuario(user, caso_de_uso, qset_capas_inicial).order_by('nombre')
# if capas is None:
# return {'categorias': {}, 'sin_categoria': {} }
#
# # inicializo las estructuras resultantes
# categorias={}
# sin_categoria={'capas':[]}
# for c in Categoria.objects.all().order_by('nombre'):
# categorias[c]={'capas':[]}
# # itero las capas del usuario y completo las estructuras
# for c in capas:
# cats = c.metadatos.categorias.all()
# if len(cats) > 0:
# for cat in cats:
# #categorias[cat]['capas'].append(c.dame_titulo)
# categorias[cat]['capas'].append(c)
# else:
# #sin_categoria['capas'].append(c.dame_titulo)
# sin_categoria['capas'].append(c)
# # completo los totales
# for cat, capas in categorias.iteritems():
# categorias[cat]['total']=len(capas['capas'])
# sin_categoria['total']=len(sin_categoria['capas'])
# return {'categorias': categorias, 'sin_categoria': sin_categoria }
# método específico que genera la estructura necesaria para armar el árbol de capas en el cliente del visor, ordenado por categoría (OBSOLETO)
@classmethod
def capas_de_usuario_para_el_visor_por_categoria(cls, user):
capas=ManejadorDePermisos.capas_de_usuario(user, 'all').order_by('metadatos__titulo','nombre')
if capas is None:
return []
categorias={}
sin_categoria=[]
# itero las capas del usuario y completo las estructuras por categoría
for c in capas:
cats = c.metadatos.categorias.all().order_by('nombre')
if len(cats) > 0:
for cat in cats:
if cat.nombre not in categorias:
categorias[cat.nombre]=[]
categorias[cat.nombre].append(c)
else:
sin_categoria.append(c)
res=[]
for cat, capas in sorted(categorias.iteritems()):
nodes=[]
for c in capas:
nodes.append({'text':c.dame_titulo, 'layerId': c.id_capa, 'layerType': c.dame_tipo_de_capa})
#categoriaNode={'text': cat.nombre, 'checkable': False, 'nodes': nodes, 'total': len(nodes)}
categoriaNode={'text': '%s (%s)'%(cat, str(len(nodes))), 'checkable': False, 'nodes': nodes}
res.append(categoriaNode)
if len(sin_categoria) > 0: #si hay capas sin categoría
nodes=[]
for c in sin_categoria:
nodes.append({'text':c.dame_titulo, 'layerId': c.id_capa, 'layerType': c.dame_tipo_de_capa})
#categoriaNode={'text': u'Sin categoría', 'checkable': False, 'nodes': nodes, 'total': len(nodes)}
categoriaNode={'text': u'Sin categoría (%s)'%(str(len(nodes))), 'checkable': False, 'nodes': nodes}
res.append(categoriaNode)
return res
# método específico que genera la estructura necesaria para armar el árbol de capas en el cliente del visor, ordenado por área temática
@classmethod
def capas_de_usuario_para_el_visor_por_area_tematica(cls, user):
capas=ManejadorDePermisos.capas_de_usuario(user, 'all').order_by('metadatos__titulo','nombre')
if capas is None:
return []
areas_tematicas={}
sin_area_tematica=[]
# itero las capas del usuario y completo la estructuras por área temática
for c in capas:
at = c.metadatos.area_tematica
if at is not None:
if at.nombre not in areas_tematicas:
areas_tematicas[at.nombre]=[]
areas_tematicas[at.nombre].append(c)
else:
sin_area_tematica.append(c)
res=[]
for cat, capas in sorted(areas_tematicas.iteritems()):
nodes=[]
for c in capas:
nodes.append({'text':c.dame_titulo, 'layerId': c.id_capa, 'layerType': c.dame_tipo_de_capa})
#areaTematicaNode={'text': cat.nombre, 'checkable': False, 'nodes': nodes, 'total': len(nodes)}
areaTematicaNode={'text': '%s (%s)'%(cat, str(len(nodes))), 'checkable': False, 'nodes': nodes}
res.append(areaTematicaNode)
if len(sin_area_tematica) > 0: #si hay capas sin área temática
nodes=[]
for c in sin_area_tematica:
nodes.append({'text':c.dame_titulo, 'layerId': c.id_capa, 'layerType': c.dame_tipo_de_capa})
#areaTematicaNode={'text': u'Sin área temática', 'checkable': False, 'nodes': nodes, 'total': len(nodes)}
areaTematicaNode={'text': u'Sin área temática (%s)'%(str(len(nodes))), 'checkable': False, 'nodes': nodes}
res.append(areaTematicaNode)
return res
@receiver(post_save, sender=PermisoDeCapa)
def onPermisoDeCapaPostSave(sender, instance, created, **kwargs):
print 'onPermisoDeCapaPostSave %s'%(str(instance))
ManejadorDeMapas.delete_mapfile(instance.user.username)
@receiver(post_delete, sender=PermisoDeCapa)
def onPermisoDeCapaPostDelete(sender, instance, **kwargs):
print 'onPermisoDeCapaPostDelete %s'%(str(instance))
ManejadorDeMapas.delete_mapfile(instance.user.username)
@receiver(post_save, sender=PermisoDeCapaPorGrupo)
def onPermisoDeCapaPorGrupoPostSave(sender, instance, created, **kwargs):
print 'onPermisoDeCapaPorGrupoPostSave %s'%(str(instance))
for u in instance.group.user_set.all():
ManejadorDeMapas.delete_mapfile(u.username)
@receiver(post_delete, sender=PermisoDeCapaPorGrupo)
def onPermisoDeCapaPorGrupoPostDelete(sender, instance, **kwargs):
print 'onPermisoDeCapaPorGrupoPostDelete %s'%(str(instance))
for u in instance.group.user_set.all():
ManejadorDeMapas.delete_mapfile(u.username)
@receiver(post_save, sender=User)
def onUserPostSave(sender, instance, created, **kwargs):
print 'onUserPostSave %s'%(str(instance))
if created:
UserProfile.objects.create(user=instance)
mapa_usuario = Mapa.objects.create(owner=instance, nombre=instance.username, id_mapa=instance.username, tipo_de_mapa='user')
ManejadorDeMapas.regenerar_mapas_de_usuarios([instance])
@receiver(post_save, sender=Group)
def onGroupPostSave(sender, instance, created, **kwargs):
print 'onGroupPostSave %s'%(str(instance))
# aca no iria nada: una creacion de grupo ni un rename de grupo recalculan nada
@receiver(post_delete, sender=Group)
def onGroupPostDelete(sender, instance, **kwargs):
print 'onGroupPostDelete %s'%(str(instance))
# aca no iria nada porque antes ejecuta la senial onPermisoDeCapaPorGrupoPostDelete
| 45.173913 | 167 | 0.629879 |
acf7776f9d20ba98ba5d1fbad386edc5ab51086d | 7,550 | py | Python | integration/sawtooth_integration/tests/test_two_families.py | trust-tech/sawtooth-core | fcd66ff2f13dba51d7642049e0c0306dbee3b07d | [
"Apache-2.0"
] | 1 | 2017-08-04T10:31:00.000Z | 2017-08-04T10:31:00.000Z | integration/sawtooth_integration/tests/test_two_families.py | trust-tech/sawtooth-core | fcd66ff2f13dba51d7642049e0c0306dbee3b07d | [
"Apache-2.0"
] | null | null | null | integration/sawtooth_integration/tests/test_two_families.py | trust-tech/sawtooth-core | fcd66ff2f13dba51d7642049e0c0306dbee3b07d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
import logging
import time
import operator
import subprocess
import shlex
import urllib.request
import urllib.error
import json
from base64 import b64decode
import cbor
from sawtooth_intkey.intkey_message_factory import IntkeyMessageFactory
from sawtooth_integration.tests.integration_tools import wait_for_rest_apis
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
INTKEY_PREFIX = '1cf126'
XO_PREFIX = '5b7349'
class TestTwoFamilies(unittest.TestCase):
@classmethod
def setUpClass(cls):
wait_for_rest_apis(['rest_api:8080'])
def test_two_families(self):
'''
After starting a validator with both intkey and xo
transaction processors and initializing xo, verify that
state is empty. Next, send the following pairs of commands,
verifying that the state is as it should be after each:
1. Send a batch of intkey 'set' txns and create an xo game.
2. Send a batch of valid intkey 'inc'/'dec' txns and take an xo space.
3. Send an invalid batch of intkey 'inc'/'dec' txns (invalid because
they target names that haven't been set) and take the same xo space.
4. Send more valid intkey txns and take a new xo space.
5. Send the same intkey 'set' txns (which are now invalid) and
create the same xo game (which has already been created).
6. Send more valid intkey txns and take a new xo space.
Besides verifying that the xo and intkey commands act as expected,
verify that there is nothing in the state that isn't xo or intkey.
'''
self.intkey_verifier = IntkeyTestVerifier()
self.xo_verifier = XoTestVerifier()
_send_xo_cmd('xo init --url rest_api:8080')
self.verify_empty_state()
commands = zip(
self.intkey_verifier.intkey_cmds,
self.xo_verifier.xo_cmds)
how_many_updates = 0
for intkey_cmd, xo_cmd in commands:
_send_intkey_cmd(intkey_cmd)
_send_xo_cmd(xo_cmd)
time.sleep(1)
if intkey_cmd == self.intkey_verifier.valid_txns:
how_many_updates += 1
self.verify_state_after_n_updates(how_many_updates)
self.verify_all_state_xo_or_intkey()
def verify_empty_state(self):
LOGGER.debug('Verifying empty state')
self.assertEqual(
[],
_get_state(),
'Empty state error')
def verify_state_after_n_updates(self, num):
LOGGER.debug('Verifying state after {} updates'.format(num))
intkey_state = _get_intkey_data()
LOGGER.info('Current intkey state: {}'.format(intkey_state))
xo_data = _get_xo_data()
LOGGER.info('Current xo state: {}'.format(xo_data))
self.assertEqual(
intkey_state,
self.intkey_verifier.state_after_n_updates(num),
'Wrong intkey state')
self.assertEqual(
xo_data,
self.xo_verifier.state_after_n_updates(num),
'Wrong xo state')
def verify_all_state_xo_or_intkey(self):
state = _get_state()
xo_state = _get_xo_state()
intkey_state = _get_intkey_state()
for entry in state:
if entry not in intkey_state:
self.assertIn(
entry,
xo_state,
'Unknown state entry')
# sending commands
def _send_xo_cmd(cmd_str):
LOGGER.info('Sending xo cmd')
subprocess.run(
shlex.split(cmd_str),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
def _send_intkey_cmd(txns):
batch = IntkeyMessageFactory().create_batch(txns)
LOGGER.info('Sending intkey txns')
_post_batch(batch)
# rest_api calls
def _post_batch(batch):
headers = {'Content-Type': 'application/octet-stream'}
response = _query_rest_api('/batches', data=batch, headers=headers)
return response
def _get_intkey_data():
state = _get_intkey_state()
# state is a list of dictionaries: { data: ..., address: ... }
dicts = [cbor.loads(b64decode(entry['data'])) for entry in state]
LOGGER.debug(dicts)
data = {k:v for d in dicts for k, v in d.items()} # merge dicts
return data
def _get_xo_data():
state = _get_xo_state()
data = b64decode(state[0]['data']).decode().split('|')[0].split(',')
game_name, board, turn, _, _ = data
return board, turn, game_name
def _get_intkey_state():
state = _get_state_prefix(INTKEY_PREFIX)
return state
def _get_xo_state():
state = _get_state_prefix(XO_PREFIX)
return state
def _get_state_prefix(prefix):
response = _query_rest_api('/state?address=' + prefix)
return response['data']
def _get_state():
response = _query_rest_api('/state')
return response['data']
def _query_rest_api(suffix='', data=None, headers={}):
url = 'http://rest_api:8080' + suffix
request = urllib.request.Request(url, data, headers)
response = urllib.request.urlopen(request).read().decode('utf-8')
return json.loads(response)
# verifiers
class XoTestVerifier:
def __init__(self):
self.xo_cmds = (
'xo create game',
'xo take game 5',
'xo take game 5',
'xo take game 9',
'xo create game',
'xo take game 4',
)
def state_after_n_updates(self, num):
state = {
0: ('---------', 'P1-NEXT', 'game'),
1: ('----X----', 'P2-NEXT', 'game'),
2: ('----X---O', 'P1-NEXT', 'game'),
3: ('---XX---O', 'P2-NEXT', 'game')
}
try:
return state[num]
except KeyError:
return ()
class IntkeyTestVerifier:
def __init__(self):
self.valid = 'ragdoll', 'sphynx', 'van'
self.invalid = 'manx', 'persian', 'siamese'
self.verbs = 'inc', 'dec', 'inc'
self.sets = 'set', 'set', 'set'
self.incdec = 11, 13, 10
self.initial = 110, 143, 130
self.populate = tuple(zip(self.sets, self.valid, self.initial))
self.valid_txns = tuple(zip(self.verbs, self.valid, self.incdec))
self.invalid_txns = tuple(zip(self.verbs, self.invalid, self.incdec))
self.intkey_cmds = (
self.populate,
self.valid_txns,
self.invalid_txns,
self.valid_txns,
self.populate,
self.valid_txns,
)
def state_after_n_updates(self, num):
ops = {
'inc': operator.add,
'dec': operator.sub
}
expected_values = [
ops[verb](init, (val * num))
for verb, init, val
in zip(self.verbs, self.initial, self.incdec)
]
return {word: val for word, val in zip(self.valid, expected_values)}
| 29.960317 | 80 | 0.61894 |
acf7786c84b76fd9609c0f2d44a6c1162b320b37 | 957 | py | Python | test/test_edit_contact_via_profile.py | nitrobenzol/python_test_repo | f562b0b6a7169f5222b8b2ee5a699de1658e7ef9 | [
"Apache-2.0"
] | null | null | null | test/test_edit_contact_via_profile.py | nitrobenzol/python_test_repo | f562b0b6a7169f5222b8b2ee5a699de1658e7ef9 | [
"Apache-2.0"
] | null | null | null | test/test_edit_contact_via_profile.py | nitrobenzol/python_test_repo | f562b0b6a7169f5222b8b2ee5a699de1658e7ef9 | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
def test_edit_first_contact_via_profile(app):
contact = Contact(first_name="New Gleb", last_name="New Sarkisov", middle_name="New Igorevich", address="New Moscow, Veneskaya St, 23, Apt 119", home_phone="New 4957166231",
mobile_phone="New 9866662325", work_phone="New 123123123", secondary_phone="New 1414141414", email="glebsarkisov@gmail.com", email2="asdasdasd@com",
email3="wdwdwdwdwdw@gmail.com")
if app.contact.count() == 0:
app.contact.create(Contact(first_name="Someone"))
old_contacts = app.contact.get_contacts_list()
contact.id = old_contacts[0].id
app.contact.edit_first_contact_via_profile(contact)
assert len(old_contacts) == app.contact.count()
new_contacts = app.contact.get_contacts_list()
old_contacts[0] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max) | 59.8125 | 177 | 0.718913 |
acf77904fad8217d24899a81447bac14ce7d9f8d | 11,366 | py | Python | tests/test_reaction_class.py | tlestang/autodE | 56fd4c78e7d7e78c5747428190211ff69dc6d94a | [
"MIT"
] | 90 | 2020-03-13T15:03:35.000Z | 2022-03-14T13:41:04.000Z | tests/test_reaction_class.py | skphy/autodE | fd80995206ac601299d2f78105d0fe4deee8c2cf | [
"MIT"
] | 117 | 2020-06-13T00:11:06.000Z | 2022-03-24T08:54:16.000Z | tests/test_reaction_class.py | skphy/autodE | fd80995206ac601299d2f78105d0fe4deee8c2cf | [
"MIT"
] | 26 | 2020-08-14T04:52:53.000Z | 2022-03-06T13:04:17.000Z | import os
from autode.reactions import reaction
from autode.reactions import reaction_types
from autode.transition_states.transition_state import TransitionState
from autode.bond_rearrangement import BondRearrangement
from autode.species import Reactant, Product
from autode.transition_states.ts_guess import TSguess
from autode.species.complex import ReactantComplex, ProductComplex
from autode.atoms import Atom
from autode.exceptions import UnbalancedReaction
from autode.exceptions import SolventsDontMatch
from autode.mol_graphs import make_graph
from autode.plotting import plot_reaction_profile
from autode.units import KcalMol
from autode.methods import get_hmethod
from autode.config import Config
from .testutils import work_in_zipped_dir
import shutil
import pytest
here = os.path.dirname(os.path.abspath(__file__))
h1 = Reactant(name='h1', atoms=[Atom('H', 0.0, 0.0, 0.0)])
h2 = Reactant(name='h2', atoms=[Atom('H', 1.0, 0.0, 0.0)])
h2_product = Product(name='h2', atoms=[Atom('H', 1.0, 0.0, 0.0)])
lin_h3 = Reactant(name='h3_linear', atoms=[Atom('H', -1.76172, 0.79084, -0.00832),
Atom('H', -2.13052, 0.18085, 0.00494),
Atom('H', -1.39867, 1.39880, -0.00676)])
trig_h3 = Product(name='h3_trigonal', atoms=[Atom('H', -1.76172, 0.79084, -0.00832),
Atom('H', -1.65980, 1.15506, 0.61469),
Atom('H', -1.39867, 1.39880, -0.00676)])
def test_reaction_class():
h1 = reaction.Reactant(name='h1', atoms=[Atom('H', 0.0, 0.0, 0.0)])
hh_product = reaction.Product(name='hh', atoms=[Atom('H', 0.0, 0.0, 0.0),
Atom('H', 0.7, 0.0, 0.0)])
# h + h > mol
hh_reac = reaction.Reaction(h1, h2, hh_product, name='h2_assoc')
h1.energy = 2
h2.energy = 3
hh_product.energy = 1
# Only swap to dissociation in invoking locate_ts()
assert hh_reac.type == reaction_types.Addition
assert len(hh_reac.prods) == 1
assert len(hh_reac.reacs) == 2
assert hh_reac.ts is None
assert hh_reac.tss is None
assert hh_reac.name == 'h2_assoc'
assert hh_reac.calc_delta_e() == -4
h1 = reaction.Reactant(name='h1', atoms=[Atom('H')])
hh_reactant = reaction.Reactant(name='hh', atoms=[Atom('H'),
Atom('H', x=1.0)])
hh_product = reaction.Product(name='hh', atoms=[Atom('H'),
Atom('H', x=1.0)])
# h + mol > mol + h
h_sub = reaction.Reaction(h1, hh_reactant, h2_product, hh_product,
solvent_name='water')
assert h_sub.type == reaction_types.Substitution
assert h_sub.name == 'reaction'
assert h_sub.solvent.name == 'water'
assert h_sub.solvent.smiles == 'O'
for mol in h_sub.reacs + h_sub.prods:
assert mol.solvent.name == 'water'
def test_reactant_product_complexes():
h2_prod = Product(name='h2', atoms=[Atom('H'), Atom('H', x=1.0)])
rxn = reaction.Reaction(h1, h2, h2_prod)
assert rxn.reactant.n_molecules == 2
assert rxn.reactant.distance(0, 1) > 1
assert rxn.product.n_molecules == 1
# If the reactant complex is set then the whole reactant should be that
rxn.reactant = ReactantComplex(h1, h1, copy=True, do_init_translation=False)
assert -1E-4 < rxn.reactant.distance(0, 1) < 1E-4
# but cannot be just a reactant
with pytest.raises(ValueError):
rxn.reactant = h1
# and similarly with the products
with pytest.raises(ValueError):
rxn.product = h2
# but can set the product complex
rxn.product = ProductComplex(Product(atoms=[Atom('H'), Atom('H', x=1.0)]),
name='tmp')
assert rxn.product.name == 'tmp'
def test_invalid_with_complexes():
h3_reaction = reaction.Reaction(lin_h3, trig_h3)
# Currently free energies with association complexes is not supported
with pytest.raises(NotImplementedError):
h3_reaction.calculate_reaction_profile(with_complexes=True,
free_energy=True)
# Cannot plot a reaction profile with complexes without them existing
with pytest.raises(ValueError):
h3_reaction._plot_reaction_profile_with_complexes(units=KcalMol,
free_energy=False,
enthalpy=False)
def test_check_rearrangement():
# Linear H3 -> Trigonal H3
make_graph(species=trig_h3, allow_invalid_valancies=True)
reac = reaction.Reaction(lin_h3, trig_h3)
# Should switch reactants and products if the products have more bonds than
# the reactants, but only when the TS is attempted to be located..
# assert reac.reacs[0].name == 'h3_trigonal'
# assert reac.prods[0].name == 'h3_linear'
def test_check_solvent():
r = Reactant(name='r', solvent_name='water')
p = Product(name='p')
with pytest.raises(SolventsDontMatch):
_ = reaction.Reaction(r, p)
p = Product(name='p', solvent_name='water')
reaction_check = reaction.Reaction(r, p)
assert reaction_check.solvent.name == 'water'
def test_reaction_identical_reac_prods():
hh_reactant = reaction.Reactant(name='hh', atoms=[Atom('H'),
Atom('H', x=1.0)])
hh_product = reaction.Product(name='hh', atoms=[Atom('H'),
Atom('H', x=1.0)])
h2_reaction = reaction.Reaction(hh_reactant, hh_product)
with pytest.raises(ValueError):
h2_reaction.locate_transition_state()
def test_swap_reacs_prods():
reactant = Reactant(name='r')
product = Product(name='p')
swapped_reaction = reaction.Reaction(reactant, product)
assert swapped_reaction.reacs[0].name == 'r'
assert swapped_reaction.prods[0].name == 'p'
swapped_reaction.switch_reactants_products()
assert swapped_reaction.reacs[0].name == 'p'
assert swapped_reaction.prods[0].name == 'r'
def test_bad_balance():
hh_product = reaction.Product(name='hh',
atoms=[Atom('H'), Atom('H', x=1.0)])
with pytest.raises(UnbalancedReaction):
reaction.Reaction(h1, hh_product)
h_minus = reaction.Reactant(name='h1_minus', atoms=[Atom('H')], charge=-1)
with pytest.raises(UnbalancedReaction):
reaction.Reaction(h1, h_minus, hh_product)
h1_water = reaction.Reactant(name='h1', atoms=[Atom('H')],
solvent_name='water')
h2_water = reaction.Reactant(name='h2', atoms=[Atom('H', x=1.0)],
solvent_name='water')
hh_thf = reaction.Product(name='hh', atoms=[Atom('H'), Atom('H', x=1.0)],
solvent_name='thf')
with pytest.raises(SolventsDontMatch):
reaction.Reaction(h1_water, h2_water, hh_thf)
with pytest.raises(NotImplementedError):
hh_triplet = reaction.Product(name='hh_trip',
atoms=[Atom('H'), Atom('H', x=0.7)],
mult=3)
reaction.Reaction(h1, h2, hh_triplet)
def test_calc_delta_e():
r1 = reaction.Reactant(name='h', atoms=[Atom('H')])
r1.energy = -0.5
r2 = reaction.Reactant(name='h', atoms=[Atom('H')])
r2.energy = -0.5
tsguess = TSguess(atoms=None, reactant=ReactantComplex(r1),
product=ProductComplex(r2))
tsguess.bond_rearrangement = BondRearrangement()
ts = TransitionState(tsguess)
ts.energy = -0.8
p = reaction.Product(name='hh', atoms=[Atom('H'), Atom('H', x=1.0)])
p.energy = -1.0
reac = reaction.Reaction(r1, r2, p)
reac.ts = ts
assert -1E-6 < reac.calc_delta_e() < 1E-6
assert 0.2 - 1E-6 < reac.calc_delta_e_ddagger() < 0.2 + 1E-6
def test_from_smiles():
# Chemdraw can generate a reaction with reactants and products
addition = reaction.Reaction(smiles='CC(C)=O.[C-]#N>>CC([O-])(C#N)C')
assert len(addition.reacs) == 2
assert len(addition.prods) == 1
# Should be readable-ish names
for reac in addition.reacs:
assert reac.name != 'molecule'
with pytest.raises(UnbalancedReaction):
_ = reaction.Reaction('CC(C)=O.[C-]#N')
def test_single_points():
# Spoof ORCA install
Config.ORCA.path = here
rxn = reaction.Reaction(Reactant(smiles='O'), Product(smiles='O'))
# calculate_single_points should be pretty tolerant.. not raising
# exceptions if the energy is already None
rxn.calculate_single_points()
assert rxn.reacs[0].energy is None
overlapping_h2 = Reactant(atoms=[Atom('H'), Atom('H')])
overlapping_h2.energy = -1
rxn.reacs = [overlapping_h2]
# Shouldn't calculate a single point for a molecule that is not
# 'reasonable'
rxn.calculate_single_points()
assert rxn.reacs[0].energy == -1
Config.ORCA.path = None
@work_in_zipped_dir(os.path.join(here, 'data', 'free_energy_profile.zip'))
def test_free_energy_profile():
# Use a spoofed Gaussian09 and XTB install
Config.lcode = 'xtb'
Config.hcode = 'g09'
Config.G09.path = here
Config.ts_template_folder_path = os.getcwd()
Config.hmethod_conformers = False
Config.standard_state = '1atm'
Config.lfm_method = 'igm'
method = get_hmethod()
assert method.name == 'g09'
assert method.available
rxn = reaction.Reaction(Reactant(name='F-', smiles='[F-]'),
Reactant(name='CH3Cl', smiles='ClC'),
Product(name='Cl-', smiles='[Cl-]'),
Product(name='CH3F', smiles='CF'),
name='sn2', solvent_name='water')
# Don't run the calculation without a working XTB install
if shutil.which('xtb') is None or not shutil.which('xtb').endswith('xtb'):
return
rxn.calculate_reaction_profile(free_energy=True)
# Allow ~0.5 kcal mol-1 either side of the 'true' value
dg_ts = rxn.calc_delta_g_ddagger()
assert 16 < dg_ts.to('kcal mol-1') < 18
dg_r = rxn.calc_delta_g()
assert -14 < dg_r.to('kcal mol-1') < -12
dh_ts = rxn.calc_delta_h_ddagger()
assert 9 < dh_ts.to('kcal mol-1') < 11
dh_r = rxn.calc_delta_h()
assert -14 < dh_r.to('kcal mol-1') < -12
# Should be able to plot an enthalpy profile
plot_reaction_profile([rxn], units=KcalMol, name='enthalpy',
enthalpy=True)
assert os.path.exists('enthalpy_reaction_profile.png')
os.remove('enthalpy_reaction_profile.png')
# Reset the configuration to the default values
Config.hcode = None
Config.G09.path = None
Config.lcode = None
Config.XTB.path = None
def test_unavail_properties():
ha = reaction.Reactant(name='ha', atoms=[Atom('H')])
hb = reaction.Product(name='hb', atoms=[Atom('H')])
rxn = reaction.Reaction(ha, hb)
delta = reaction.calc_delta_with_cont(left=[ha], right=[hb], cont='h_cont')
assert delta is None
# Should not raise an exception(?)
rxn.find_lowest_energy_ts_conformer()
rxn.calculate_thermochemical_cont(free_energy=False, enthalpy=False)
| 33.928358 | 94 | 0.620535 |
acf77a708936878ff48636250023185dc11e3e75 | 6,471 | py | Python | setup.py | bluetech/mypy | efd68dd752410f4d987ce407354623938ff8e7f3 | [
"PSF-2.0"
] | null | null | null | setup.py | bluetech/mypy | efd68dd752410f4d987ce407354623938ff8e7f3 | [
"PSF-2.0"
] | null | null | null | setup.py | bluetech/mypy | efd68dd752410f4d987ce407354623938ff8e7f3 | [
"PSF-2.0"
] | null | null | null | #!/usr/bin/env python
import glob
import os
import os.path
import sys
if sys.version_info < (3, 5, 0):
sys.stderr.write("ERROR: You need Python 3.5 or later to use mypy.\n")
exit(1)
# we'll import stuff from the source tree, let's ensure is on the sys path
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
# This requires setuptools when building; setuptools is not needed
# when installing from a wheel file (though it is still neeeded for
# alternative forms of installing, as suggested by README.md).
from setuptools import setup
from setuptools.command.build_py import build_py
from mypy.version import __version__ as version
from mypy import git
git.verify_git_integrity_or_abort(".")
description = 'Optional static typing for Python'
long_description = '''
Mypy -- Optional Static Typing for Python
=========================================
Add type annotations to your Python programs, and use mypy to type
check them. Mypy is essentially a Python linter on steroids, and it
can catch many programming errors by analyzing your program, without
actually having to run it. Mypy has a powerful type system with
features such as type inference, gradual typing, generics and union
types.
'''.lstrip()
def find_package_data(base, globs):
"""Find all interesting data files, for setup(package_data=)
Arguments:
root: The directory to search in.
globs: A list of glob patterns to accept files.
"""
rv_dirs = [root for root, dirs, files in os.walk(base)]
rv = []
for rv_dir in rv_dirs:
files = []
for pat in globs:
files += glob.glob(os.path.join(rv_dir, pat))
if not files:
continue
rv.extend([f[5:] for f in files])
return rv
class CustomPythonBuild(build_py):
def pin_version(self):
path = os.path.join(self.build_lib, 'mypy')
self.mkpath(path)
with open(os.path.join(path, 'version.py'), 'w') as stream:
stream.write('__version__ = "{}"\n'.format(version))
def run(self):
self.execute(self.pin_version, ())
build_py.run(self)
cmdclass = {'build_py': CustomPythonBuild}
package_data = ['py.typed']
package_data += find_package_data(os.path.join('mypy', 'typeshed'), ['*.py', '*.pyi'])
package_data += find_package_data(os.path.join('mypy', 'xml'), ['*.xsd', '*.xslt', '*.css'])
USE_MYPYC = False
# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
if len(sys.argv) > 1 and sys.argv[1] == '--use-mypyc':
sys.argv.pop(1)
USE_MYPYC = True
if os.getenv('MYPY_USE_MYPYC', None) == '1':
USE_MYPYC = True
if USE_MYPYC:
MYPYC_BLACKLIST = (
# Need to be runnable as scripts
'__main__.py',
'sitepkgs.py',
os.path.join('dmypy', '__main__.py'),
# Needs to be interpreted to provide a hook to interpreted plugins
'interpreted_plugin.py',
# Uses __getattr__/__setattr__
'split_namespace.py',
# Lies to mypy about code reachability
'bogus_type.py',
# We don't populate __file__ properly at the top level or something?
# Also I think there would be problems with how we generate version.py.
'version.py',
)
everything = find_package_data('mypy', ['*.py'])
# Start with all the .py files
all_real_pys = [x for x in everything if not x.startswith('typeshed' + os.sep)]
# Strip out anything in our blacklist
mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]
# Strip out any test code
mypyc_targets = [x for x in mypyc_targets if not x.startswith('test' + os.sep)]
# ... and add back in the one test module we need
mypyc_targets.append(os.path.join('test', 'visitors.py'))
# Fix the paths to be full
mypyc_targets = [os.path.join('mypy', x) for x in mypyc_targets]
# The targets come out of file system apis in an unspecified
# order. Sort them so that the mypyc output is deterministic.
mypyc_targets.sort()
# This bit is super unfortunate: we want to use the mypy packaged
# with mypyc. It will arrange for the path to be setup so it can
# find it, but we've already imported parts, so we remove the
# modules that we've imported already, which will let the right
# versions be imported by mypyc.
del sys.modules['mypy']
del sys.modules['mypy.version']
del sys.modules['mypy.git']
from mypyc.build import mypycify, MypycifyBuildExt
opt_level = os.getenv('MYPYC_OPT_LEVEL', '3')
ext_modules = mypycify(
mypyc_targets,
['--config-file=mypy_bootstrap.ini'],
opt_level=opt_level,
# Use multi-file compliation mode on windows because without it
# our Appveyor builds run out of memory sometimes.
multi_file=sys.platform == 'win32',
)
cmdclass['build_ext'] = MypycifyBuildExt
else:
ext_modules = []
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development',
]
setup(name='mypy',
version=version,
description=description,
long_description=long_description,
author='Jukka Lehtosalo',
author_email='jukka.lehtosalo@iki.fi',
url='http://www.mypy-lang.org/',
license='MIT License',
py_modules=[],
ext_modules=ext_modules,
packages=[
'mypy', 'mypy.test', 'mypy.server', 'mypy.plugins', 'mypy.newsemanal', 'mypy.dmypy'
],
package_data={'mypy': package_data},
entry_points={'console_scripts': ['mypy=mypy.__main__:console_entry',
'stubgen=mypy.stubgen:main',
'dmypy=mypy.dmypy.client:console_entry',
]},
classifiers=classifiers,
cmdclass=cmdclass,
# When changing this, also update test-requirements.txt.
install_requires=['typed_ast >= 1.4.0, < 1.5.0',
'typing_extensions>=3.7.4',
'mypy_extensions >= 0.4.0, < 0.5.0',
],
# Same here.
extras_require={'dmypy': 'psutil >= 4.0'},
include_package_data=True,
)
| 34.238095 | 93 | 0.64055 |
acf77b648cb26db71d74da29b184abd7291eb7fd | 34,666 | py | Python | libcxx/utils/gdb/libcxx/printers.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/gdb/libcxx/printers.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/gdb/libcxx/printers.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | #===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""GDB pretty-printers for libc++.
These should work for objects compiled with either the stable ABI or the unstable ABI.
"""
from __future__ import print_function
import math
import re
import gdb
# One under-documented feature of the gdb pretty-printer API
# is that clients can call any other member of the API
# before they call to_string.
# Therefore all self.FIELDs must be set in the pretty-printer's
# __init__ function.
_void_pointer_type = gdb.lookup_type("void").pointer()
_long_int_type = gdb.lookup_type("unsigned long long")
_libcpp_big_endian = False
def addr_as_long(addr):
return int(addr.cast(_long_int_type))
# The size of a pointer in bytes.
_pointer_size = _void_pointer_type.sizeof
def _remove_cxx_namespace(typename):
"""Removed libc++ specific namespace from the type.
Arguments:
typename(string): A type, such as std::__u::something.
Returns:
A string without the libc++ specific part, such as std::something.
"""
return re.sub("std::__.*?::", "std::", typename)
def _remove_generics(typename):
"""Remove generics part of the type. Assumes typename is not empty.
Arguments:
typename(string): A type such as std::my_collection<element>.
Returns:
The prefix up to the generic part, such as std::my_collection.
"""
match = re.match("^([^<]+)", typename)
return match.group(1)
# Some common substitutions on the types to reduce visual clutter (A user who
# wants to see the actual details can always use print/r).
_common_substitutions = [
("std::basic_string<char, std::char_traits<char>, std::allocator<char> >",
"std::string"),
("std::basic_string_view<char, std::char_traits<char> >",
"std::string_view"),
]
def _prettify_typename(gdb_type):
"""Returns a pretty name for the type, or None if no name can be found.
Arguments:
gdb_type(gdb.Type): A type object.
Returns:
A string, without type_defs, libc++ namespaces, and common substitutions
applied.
"""
type_without_typedefs = gdb_type.strip_typedefs()
typename = type_without_typedefs.name or type_without_typedefs.tag or \
str(type_without_typedefs)
result = _remove_cxx_namespace(typename)
for find_str, subst_str in _common_substitutions:
result = re.sub(find_str, subst_str, result)
return result
def _typename_for_nth_generic_argument(gdb_type, n):
"""Returns a pretty string for the nth argument of the given type.
Arguments:
gdb_type(gdb.Type): A type object, such as the one for std::map<int, int>
n: The (zero indexed) index of the argument to return.
Returns:
A string for the nth argument, such a "std::string"
"""
element_type = gdb_type.template_argument(n)
return _prettify_typename(element_type)
def _typename_with_n_generic_arguments(gdb_type, n):
"""Return a string for the type with the first n (1, ...) generic args."""
base_type = _remove_generics(_prettify_typename(gdb_type))
arg_list = [base_type]
template = "%s<"
for i in range(n):
arg_list.append(_typename_for_nth_generic_argument(gdb_type, i))
template += "%s, "
result = (template[:-2] + ">") % tuple(arg_list)
return result
def _typename_with_first_generic_argument(gdb_type):
return _typename_with_n_generic_arguments(gdb_type, 1)
class StdTuplePrinter(object):
"""Print a std::tuple."""
class _Children(object):
"""Class to iterate over the tuple's children."""
def __init__(self, val):
self.val = val
self.child_iter = iter(self.val["__base_"].type.fields())
self.count = 0
def __iter__(self):
return self
def __next__(self):
# child_iter raises StopIteration when appropriate.
field_name = next(self.child_iter)
child = self.val["__base_"][field_name]["__value_"]
self.count += 1
return ("[%d]" % self.count, child)
next = __next__ # Needed for GDB built against Python 2.7.
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.val.type.fields():
return "empty %s" % typename
return "%s containing" % typename
def children(self):
if not self.val.type.fields():
return iter(())
return self._Children(self.val)
def _get_base_subobject(child_class_value, index=0):
"""Returns the object's value in the form of the parent class at index.
This function effectively casts the child_class_value to the base_class's
type, but the type-to-cast to is stored in the field at index, and once
we know the field, we can just return the data.
Args:
child_class_value: the value to cast
index: the parent class index
Raises:
Exception: field at index was not a base-class field.
"""
field = child_class_value.type.fields()[index]
if not field.is_base_class:
raise Exception("Not a base-class field.")
return child_class_value[field]
def _value_of_pair_first(value):
"""Convenience for _get_base_subobject, for the common case."""
return _get_base_subobject(value, 0)["__value_"]
class StdStringPrinter(object):
"""Print a std::string."""
def _get_short_size(self, short_field, short_size):
"""Short size depends on both endianness and a compile-time define."""
# If the padding field is present after all this indirection, then string
# was compiled with _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT defined.
field = short_field.type.fields()[1].type.fields()[0]
libcpp_abi_alternate_string_layout = field.name and "__padding" in field.name
# This logical structure closely follows the original code (which is clearer
# in C++). Keep them parallel to make them easier to compare.
if libcpp_abi_alternate_string_layout:
if _libcpp_big_endian:
return short_size >> 1
else:
return short_size
elif _libcpp_big_endian:
return short_size
else:
return short_size >> 1
def __init__(self, val):
self.val = val
def to_string(self):
"""Build a python string from the data whether stored inline or separately."""
value_field = _value_of_pair_first(self.val["__r_"])
short_field = value_field["__s"]
short_size = short_field["__size_"]
if short_size == 0:
return ""
short_mask = self.val["__short_mask"]
# Counter intuitive to compare the size and short_mask to see if the string
# is long, but that's the way the implementation does it. Note that
# __is_long() doesn't use get_short_size in C++.
is_long = short_size & short_mask
if is_long:
long_field = value_field["__l"]
data = long_field["__data_"]
size = long_field["__size_"]
else:
data = short_field["__data_"]
size = self._get_short_size(short_field, short_size)
return data.lazy_string(length=size)
def display_hint(self):
return "string"
class StdStringViewPrinter(object):
"""Print a std::string_view."""
def __init__(self, val):
self.val = val
def display_hint(self):
return "string"
def to_string(self): # pylint: disable=g-bad-name
"""GDB calls this to compute the pretty-printed form."""
ptr = self.val["__data"]
ptr = ptr.cast(ptr.type.target().strip_typedefs().pointer())
size = self.val["__size"]
return ptr.lazy_string(length=size)
class StdUniquePtrPrinter(object):
"""Print a std::unique_ptr."""
def __init__(self, val):
self.val = val
self.addr = _value_of_pair_first(self.val["__ptr_"])
self.pointee_type = self.val.type.template_argument(0)
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.addr:
return "%s is nullptr" % typename
return ("%s<%s> containing" %
(typename,
_remove_generics(_prettify_typename(self.pointee_type))))
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr.cast(self.pointee_type.pointer())
def children(self):
return self
class StdSharedPointerPrinter(object):
"""Print a std::shared_ptr."""
def __init__(self, val):
self.val = val
self.addr = self.val["__ptr_"]
def to_string(self):
"""Returns self as a string."""
typename = _remove_generics(_prettify_typename(self.val.type))
pointee_type = _remove_generics(
_prettify_typename(self.val.type.template_argument(0)))
if not self.addr:
return "%s is nullptr" % typename
refcount = self.val["__cntrl_"]
if refcount != 0:
try:
usecount = refcount["__shared_owners_"] + 1
weakcount = refcount["__shared_weak_owners_"]
if usecount == 0:
state = "expired, weak %d" % weakcount
else:
state = "count %d, weak %d" % (usecount, weakcount)
except:
# Debug info for a class with virtual functions is emitted
# in the same place as its key function. That means that
# for std::shared_ptr, __shared_owners_ is emitted into
# into libcxx.[so|a] itself, rather than into the shared_ptr
# instantiation point. So if libcxx.so was built without
# debug info, these fields will be missing.
state = "count ?, weak ? (libc++ missing debug info)"
return "%s<%s> %s containing" % (typename, pointee_type, state)
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr
def children(self):
return self
class StdVectorPrinter(object):
"""Print a std::vector."""
class _VectorBoolIterator(object):
"""Class to iterate over the bool vector's children."""
def __init__(self, begin, size, bits_per_word):
self.item = begin
self.size = size
self.bits_per_word = bits_per_word
self.count = 0
self.offset = 0
def __iter__(self):
return self
def __next__(self):
"""Retrieve the next element."""
self.count += 1
if self.count > self.size:
raise StopIteration
entry = self.item.dereference()
if entry & (1 << self.offset):
outbit = 1
else:
outbit = 0
self.offset += 1
if self.offset >= self.bits_per_word:
self.item += 1
self.offset = 0
return ("[%d]" % self.count, outbit)
next = __next__ # Needed for GDB built against Python 2.7.
class _VectorIterator(object):
"""Class to iterate over the non-bool vector's children."""
def __init__(self, begin, end):
self.item = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def __next__(self):
self.count += 1
if self.item == self.end:
raise StopIteration
entry = self.item.dereference()
self.item += 1
return ("[%d]" % self.count, entry)
next = __next__ # Needed for GDB built against Python 2.7.
def __init__(self, val):
"""Set val, length, capacity, and iterator for bool and normal vectors."""
self.val = val
self.typename = _remove_generics(_prettify_typename(val.type))
begin = self.val["__begin_"]
if self.val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL:
self.typename += "<bool>"
self.length = self.val["__size_"]
bits_per_word = self.val["__bits_per_word"]
self.capacity = _value_of_pair_first(
self.val["__cap_alloc_"]) * bits_per_word
self.iterator = self._VectorBoolIterator(
begin, self.length, bits_per_word)
else:
end = self.val["__end_"]
self.length = end - begin
self.capacity = _get_base_subobject(
self.val["__end_cap_"])["__value_"] - begin
self.iterator = self._VectorIterator(begin, end)
def to_string(self):
return ("%s of length %d, capacity %d" %
(self.typename, self.length, self.capacity))
def children(self):
return self.iterator
def display_hint(self):
return "array"
class StdBitsetPrinter(object):
"""Print a std::bitset."""
def __init__(self, val):
self.val = val
self.n_words = int(self.val["__n_words"])
self.bits_per_word = int(self.val["__bits_per_word"])
self.bit_count = self.val.type.template_argument(0)
if self.n_words == 1:
self.values = [int(self.val["__first_"])]
else:
self.values = [int(self.val["__first_"][index])
for index in range(self.n_words)]
def to_string(self):
typename = _prettify_typename(self.val.type)
return "%s" % typename
def _list_it(self):
for bit in range(self.bit_count):
word = bit // self.bits_per_word
word_bit = bit % self.bits_per_word
if self.values[word] & (1 << word_bit):
yield ("[%d]" % bit, 1)
def __iter__(self):
return self._list_it()
def children(self):
return self
class StdDequePrinter(object):
"""Print a std::deque."""
def __init__(self, val):
self.val = val
self.size = int(_value_of_pair_first(val["__size_"]))
self.start_ptr = self.val["__map_"]["__begin_"]
self.first_block_start_index = int(self.val["__start_"])
self.node_type = self.start_ptr.type
self.block_size = self._calculate_block_size(
val.type.template_argument(0))
def _calculate_block_size(self, element_type):
"""Calculates the number of elements in a full block."""
size = element_type.sizeof
# Copied from struct __deque_block_size implementation of libcxx.
return 4096 / size if size < 256 else 16
def _bucket_it(self, start_addr, start_index, end_index):
for i in range(start_index, end_index):
yield i, (start_addr.dereference() + i).dereference()
def _list_it(self):
"""Primary iteration worker."""
num_emitted = 0
current_addr = self.start_ptr
start_index = self.first_block_start_index
while num_emitted < self.size:
end_index = min(start_index + self.size -
num_emitted, self.block_size)
for _, elem in self._bucket_it(current_addr, start_index, end_index):
yield "", elem
num_emitted += end_index - start_index
current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size) \
.cast(self.node_type)
start_index = 0
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def __iter__(self):
return self._list_it()
def children(self):
return self
def display_hint(self):
return "array"
class StdListPrinter(object):
"""Print a std::list."""
def __init__(self, val):
self.val = val
size_alloc_field = self.val["__size_alloc_"]
self.size = int(_value_of_pair_first(size_alloc_field))
dummy_node = self.val["__end_"]
self.nodetype = gdb.lookup_type(
re.sub("__list_node_base", "__list_node",
str(dummy_node.type.strip_typedefs()))).pointer()
self.first_node = dummy_node["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _list_iter(self):
current_node = self.first_node
for _ in range(self.size):
yield "", current_node.cast(self.nodetype).dereference()["__value_"]
current_node = current_node.dereference()["__next_"]
def __iter__(self):
return self._list_iter()
def children(self):
return self if self.nodetype else iter(())
def display_hint(self):
return "array"
class StdQueueOrStackPrinter(object):
"""Print a std::queue or std::stack."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class StdPriorityQueuePrinter(object):
"""Print a std::priority_queue."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
# TODO(tamur): It would be nice to print the top element. The technical
# difficulty is that, the implementation refers to the underlying
# container, which is a generic class. libstdcxx pretty printers do not
# print the top element.
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class RBTreeUtils(object):
"""Utility class for std::(multi)map, and std::(multi)set and iterators."""
def __init__(self, cast_type, root):
self.cast_type = cast_type
self.root = root
def left_child(self, node):
result = node.cast(self.cast_type).dereference()["__left_"]
return result
def right_child(self, node):
result = node.cast(self.cast_type).dereference()["__right_"]
return result
def parent(self, node):
"""Return the parent of node, if it exists."""
# If this is the root, then from the algorithm's point of view, it has no
# parent.
if node == self.root:
return None
# We don't have enough information to tell if this is the end_node (which
# doesn't have a __parent_ field), or the root (which doesn't have a parent
# from the algorithm's point of view), so cast_type may not be correct for
# this particular node. Use heuristics.
# The end_node's left child is the root. Note that when printing interators
# in isolation, the root is unknown.
if self.left_child(node) == self.root:
return None
parent = node.cast(self.cast_type).dereference()["__parent_"]
# If the value at the offset of __parent_ doesn't look like a valid pointer,
# then assume that node is the end_node (and therefore has no parent).
# End_node type has a pointer embedded, so should have pointer alignment.
if addr_as_long(parent) % _void_pointer_type.alignof:
return None
# This is ugly, but the only other option is to dereference an invalid
# pointer. 0x8000 is fairly arbitrary, but has had good results in
# practice. If there was a way to tell if a pointer is invalid without
# actually dereferencing it and spewing error messages, that would be ideal.
if parent < 0x8000:
return None
return parent
def is_left_child(self, node):
parent = self.parent(node)
return parent is not None and self.left_child(parent) == node
def is_right_child(self, node):
parent = self.parent(node)
return parent is not None and self.right_child(parent) == node
class AbstractRBTreePrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set."""
def __init__(self, val):
self.val = val
tree = self.val["__tree_"]
self.size = int(_value_of_pair_first(tree["__pair3_"]))
dummy_root = tree["__pair1_"]
root = _value_of_pair_first(dummy_root)["__left_"]
cast_type = self._init_cast_type(val.type)
self.util = RBTreeUtils(cast_type, root)
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def _traverse(self):
"""Traverses the binary search tree in order."""
current = self.util.root
skip_left_child = False
while True:
if not skip_left_child and self.util.left_child(current):
current = self.util.left_child(current)
continue
skip_left_child = False
for key_value in self._get_key_value(current):
yield "", key_value
right_child = self.util.right_child(current)
if right_child:
current = right_child
continue
while self.util.is_right_child(current):
current = self.util.parent(current)
if self.util.is_left_child(current):
current = self.util.parent(current)
skip_left_child = True
continue
break
def __iter__(self):
return self._traverse()
def children(self):
return self if self.util.cast_type and self.size > 0 else iter(())
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
class StdMapPrinter(AbstractRBTreePrinter):
"""Print a std::map or std::multimap."""
def _init_cast_type(self, val_type):
map_it_type = gdb.lookup_type(
str(val_type.strip_typedefs()) + "::iterator").strip_typedefs()
tree_it_type = map_it_type.template_argument(0)
node_ptr_type = tree_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "map"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()[
"__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
class StdSetPrinter(AbstractRBTreePrinter):
"""Print a std::set."""
def _init_cast_type(self, val_type):
set_it_type = gdb.lookup_type(
str(val_type.strip_typedefs()) + "::iterator").strip_typedefs()
node_ptr_type = set_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "array"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()["__value_"]
return [key_value]
class AbstractRBTreeIteratorPrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set iterator."""
def _initialize(self, val, typename):
self.typename = typename
self.val = val
self.addr = self.val["__ptr_"]
cast_type = self.val.type.template_argument(1)
self.util = RBTreeUtils(cast_type, None)
if self.addr:
self.node = self.addr.cast(cast_type).dereference()
def _is_valid_node(self):
if not self.util.parent(self.addr):
return False
return self.util.is_left_child(self.addr) or \
self.util.is_right_child(self.addr)
def to_string(self):
if not self.addr:
return "%s is nullptr" % self.typename
return "%s " % self.typename
def _get_node_value(self, node):
raise NotImplementedError
def __iter__(self):
addr_str = "[%s]" % str(self.addr)
if not self._is_valid_node():
yield addr_str, " end()"
else:
yield addr_str, self._get_node_value(self.node)
def children(self):
return self if self.addr else iter(())
class MapIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self._initialize(val["__i_"],
_remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]["__cc"]
class SetIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self._initialize(val, _remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]
class StdFposPrinter(object):
"""Print a std::fpos or std::streampos."""
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
offset = self.val["__off_"]
state = self.val["__st_"]
count = state["__count"]
value = state["__value"]["__wch"]
return "%s with stream offset:%s with state: {count:%s value:%s}" % (
typename, offset, count, value)
class AbstractUnorderedCollectionPrinter(object):
"""Abstract super class for std::unordered_(multi)[set|map]."""
def __init__(self, val):
self.val = val
self.table = val["__table_"]
self.sentinel = self.table["__p1_"]
self.size = int(_value_of_pair_first(self.table["__p2_"]))
node_base_type = self.sentinel.type.template_argument(0)
self.cast_type = node_base_type.template_argument(0)
def _list_it(self, sentinel_ptr):
next_ptr = _value_of_pair_first(sentinel_ptr)["__next_"]
while str(next_ptr.cast(_void_pointer_type)) != "0x0":
next_val = next_ptr.cast(self.cast_type).dereference()
for key_value in self._get_key_value(next_val):
yield "", key_value
next_ptr = next_val["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def children(self):
return self if self.cast_type and self.size > 0 else iter(())
def __iter__(self):
return self._list_it(self.sentinel)
class StdUnorderedSetPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)set."""
def _get_key_value(self, node):
return [node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)map."""
def _get_key_value(self, node):
key_value = node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
class AbstractHashMapIteratorPrinter(object):
"""Abstract class for unordered collection iterators."""
def _initialize(self, val, addr):
self.val = val
self.typename = _remove_generics(_prettify_typename(self.val.type))
self.addr = addr
if self.addr:
self.node = self.addr.cast(self.cast_type).dereference()
def _get_key_value(self):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def to_string(self):
if not self.addr:
return "%s = end()" % self.typename
return "%s " % self.typename
def children(self):
return self if self.addr else iter(())
def __iter__(self):
for key_value in self._get_key_value():
yield "", key_value
class StdUnorderedSetIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0)
self._initialize(val, val["__node_"])
def _get_key_value(self):
return [self.node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0).template_argument(0)
self._initialize(val, val["__i_"]["__node_"])
def _get_key_value(self):
key_value = self.node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
def _remove_std_prefix(typename):
match = re.match("^std::(.+)", typename)
return match.group(1) if match is not None else ""
class LibcxxPrettyPrinter(object):
"""PrettyPrinter object so gdb-commands like 'info pretty-printers' work."""
def __init__(self, name):
super(LibcxxPrettyPrinter, self).__init__()
self.name = name
self.enabled = True
self.lookup = {
"basic_string": StdStringPrinter,
"string": StdStringPrinter,
"string_view": StdStringViewPrinter,
"tuple": StdTuplePrinter,
"unique_ptr": StdUniquePtrPrinter,
"shared_ptr": StdSharedPointerPrinter,
"weak_ptr": StdSharedPointerPrinter,
"bitset": StdBitsetPrinter,
"deque": StdDequePrinter,
"list": StdListPrinter,
"queue": StdQueueOrStackPrinter,
"stack": StdQueueOrStackPrinter,
"priority_queue": StdPriorityQueuePrinter,
"map": StdMapPrinter,
"multimap": StdMapPrinter,
"set": StdSetPrinter,
"multiset": StdSetPrinter,
"vector": StdVectorPrinter,
"__map_iterator": MapIteratorPrinter,
"__map_const_iterator": MapIteratorPrinter,
"__tree_iterator": SetIteratorPrinter,
"__tree_const_iterator": SetIteratorPrinter,
"fpos": StdFposPrinter,
"unordered_set": StdUnorderedSetPrinter,
"unordered_multiset": StdUnorderedSetPrinter,
"unordered_map": StdUnorderedMapPrinter,
"unordered_multimap": StdUnorderedMapPrinter,
"__hash_map_iterator": StdUnorderedMapIteratorPrinter,
"__hash_map_const_iterator": StdUnorderedMapIteratorPrinter,
"__hash_iterator": StdUnorderedSetIteratorPrinter,
"__hash_const_iterator": StdUnorderedSetIteratorPrinter,
}
self.subprinters = []
for name, subprinter in self.lookup.items():
# Subprinters and names are used only for the rarely used command "info
# pretty" (and related), so the name of the first data structure it prints
# is a reasonable choice.
if subprinter not in self.subprinters:
subprinter.name = name
self.subprinters.append(subprinter)
def __call__(self, val):
"""Return the pretty printer for a val, if the type is supported."""
# Do not handle any type that is not a struct/class.
if val.type.strip_typedefs().code != gdb.TYPE_CODE_STRUCT:
return None
# Don't attempt types known to be inside libstdcxx.
typename = val.type.name or val.type.tag or str(val.type)
match = re.match("^std::(__.*?)::", typename)
if match is not None and match.group(1) in ["__cxx1998",
"__debug",
"__7",
"__g"]:
return None
# Handle any using declarations or other typedefs.
typename = _prettify_typename(val.type)
if not typename:
return None
without_generics = _remove_generics(typename)
lookup_name = _remove_std_prefix(without_generics)
if lookup_name in self.lookup:
return self.lookup[lookup_name](val)
return None
_libcxx_printer_name = "libcxx_pretty_printer"
# These are called for every binary object file, which could be thousands in
# certain pathological cases. Limit our pretty printers to the progspace.
def _register_libcxx_printers(event):
progspace = event.new_objfile.progspace
# It would be ideal to get the endianness at print time, but
# gdb.execute clears gdb's internal wrap buffer, removing any values
# already generated as part of a larger data structure, and there is
# no python api to get the endianness. Mixed-endianness debugging
# rare enough that this workaround should be adequate.
_libcpp_big_endian = "big endian" in gdb.execute("show endian",
to_string=True)
if not getattr(progspace, _libcxx_printer_name, False):
print("Loading libc++ pretty-printers.")
gdb.printing.register_pretty_printer(
progspace, LibcxxPrettyPrinter(_libcxx_printer_name))
setattr(progspace, _libcxx_printer_name, True)
def _unregister_libcxx_printers(event):
progspace = event.progspace
if getattr(progspace, _libcxx_printer_name, False):
for printer in progspace.pretty_printers:
if getattr(printer, "name", "none") == _libcxx_printer_name:
progspace.pretty_printers.remove(printer)
setattr(progspace, _libcxx_printer_name, False)
break
def register_libcxx_printer_loader():
"""Register event handlers to load libc++ pretty-printers."""
gdb.events.new_objfile.connect(_register_libcxx_printers)
gdb.events.clear_objfiles.connect(_unregister_libcxx_printers)
| 33.886608 | 86 | 0.623608 |
acf77ca838ef637f901cdeaa02cfd18d04e92be8 | 83 | py | Python | 01-simulation/empty.py | ryanboldi/CS206 | a2d0d29eb8f9209b6f60f97085e1e03f418cd71a | [
"MIT"
] | 1 | 2021-04-19T08:47:49.000Z | 2021-04-19T08:47:49.000Z | 01-simulation/empty.py | ryanboldi/UVM-CS206 | a2d0d29eb8f9209b6f60f97085e1e03f418cd71a | [
"MIT"
] | null | null | null | 01-simulation/empty.py | ryanboldi/UVM-CS206 | a2d0d29eb8f9209b6f60f97085e1e03f418cd71a | [
"MIT"
] | null | null | null | import pyrosim
sim = pyrosim.Simulator()
sim.start()
sim.wait_to_finish()
print(1)
| 13.833333 | 25 | 0.759036 |
acf77d0e1b06293b305eefdd66ccb2fee33c93cd | 22,445 | py | Python | src/poetry/packages/locker.py | ombschervister/poetry | 66bd0dd974f1735db82e13998f0d5dae26a235d4 | [
"MIT"
] | null | null | null | src/poetry/packages/locker.py | ombschervister/poetry | 66bd0dd974f1735db82e13998f0d5dae26a235d4 | [
"MIT"
] | null | null | null | src/poetry/packages/locker.py | ombschervister/poetry | 66bd0dd974f1735db82e13998f0d5dae26a235d4 | [
"MIT"
] | null | null | null | from __future__ import annotations
import json
import logging
import os
import re
from copy import deepcopy
from hashlib import sha256
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import cast
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.directory_dependency import DirectoryDependency
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.packages.package import Package
from poetry.core.packages.url_dependency import URLDependency
from poetry.core.packages.vcs_dependency import VCSDependency
from poetry.core.semver.helpers import parse_constraint
from poetry.core.semver.version import Version
from poetry.core.toml.file import TOMLFile
from poetry.core.version.markers import parse_marker
from poetry.core.version.requirements import InvalidRequirement
from tomlkit import array
from tomlkit import document
from tomlkit import inline_table
from tomlkit import item
from tomlkit import table
from tomlkit.exceptions import TOMLKitError
from tomlkit.items import Array
from tomlkit.items import Table
from poetry.packages import DependencyPackage
from poetry.utils.extras import get_extra_package_names
if TYPE_CHECKING:
from collections.abc import Iterable
from collections.abc import Iterator
from collections.abc import Sequence
from poetry.core.version.markers import BaseMarker
from tomlkit.toml_document import TOMLDocument
from poetry.repositories import Repository
logger = logging.getLogger(__name__)
class Locker:
_VERSION = "1.1"
_legacy_keys = ["dependencies", "source", "extras", "dev-dependencies"]
_relevant_keys = [*_legacy_keys, "group"]
def __init__(self, lock: str | Path, local_config: dict[str, Any]) -> None:
self._lock = TOMLFile(lock)
self._local_config = local_config
self._lock_data: TOMLDocument | None = None
self._content_hash = self._get_content_hash()
@property
def lock(self) -> TOMLFile:
return self._lock
@property
def lock_data(self) -> TOMLDocument:
if self._lock_data is None:
self._lock_data = self._get_lock_data()
return self._lock_data
def is_locked(self) -> bool:
"""
Checks whether the locker has been locked (lockfile found).
"""
if not self._lock.exists():
return False
return "package" in self.lock_data
def is_fresh(self) -> bool:
"""
Checks whether the lock file is still up to date with the current hash.
"""
lock = self._lock.read()
metadata = lock.get("metadata", {})
if "content-hash" in metadata:
fresh: bool = self._content_hash == metadata["content-hash"]
return fresh
return False
def locked_repository(self) -> Repository:
"""
Searches and returns a repository of locked packages.
"""
from poetry.factory import Factory
from poetry.repositories import Repository
if not self.is_locked():
return Repository()
lock_data = self.lock_data
packages = Repository()
locked_packages = cast("list[dict[str, Any]]", lock_data["package"])
if not locked_packages:
return packages
for info in locked_packages:
source = info.get("source", {})
source_type = source.get("type")
url = source.get("url")
if source_type in ["directory", "file"]:
url = self._lock.path.parent.joinpath(url).resolve().as_posix()
package = Package(
info["name"],
info["version"],
info["version"],
source_type=source_type,
source_url=url,
source_reference=source.get("reference"),
source_resolved_reference=source.get("resolved_reference"),
)
package.description = info.get("description", "")
package.category = info.get("category", "main")
package.optional = info["optional"]
metadata = cast("dict[str, Any]", lock_data["metadata"])
name = info["name"]
if "hashes" in metadata:
# Old lock so we create dummy files from the hashes
hashes = cast("dict[str, Any]", metadata["hashes"])
package.files = [{"name": h, "hash": h} for h in hashes[name]]
else:
files = metadata["files"][name]
package.files = files
package.python_versions = info["python-versions"]
extras = info.get("extras", {})
if extras:
for name, deps in extras.items():
package.extras[name] = []
for dep in deps:
try:
dependency = Dependency.create_from_pep_508(dep)
except InvalidRequirement:
# handle lock files with invalid PEP 508
m = re.match(r"^(.+?)(?:\[(.+?)])?(?:\s+\((.+)\))?$", dep)
if not m:
raise
dep_name = m.group(1)
extras = m.group(2) or ""
constraint = m.group(3) or "*"
dependency = Dependency(
dep_name, constraint, extras=extras.split(",")
)
package.extras[name].append(dependency)
if "marker" in info:
package.marker = parse_marker(info["marker"])
else:
# Compatibility for old locks
if "requirements" in info:
dep = Dependency("foo", "0.0.0")
for name, value in info["requirements"].items():
if name == "python":
dep.python_versions = value
elif name == "platform":
dep.platform = value
split_dep = dep.to_pep_508(False).split(";")
if len(split_dep) > 1:
package.marker = parse_marker(split_dep[1].strip())
for dep_name, constraint in info.get("dependencies", {}).items():
root_dir = self._lock.path.parent
if package.source_type == "directory":
# root dir should be the source of the package relative to the lock
# path
assert package.source_url is not None
root_dir = Path(package.source_url)
if isinstance(constraint, list):
for c in constraint:
package.add_dependency(
Factory.create_dependency(dep_name, c, root_dir=root_dir)
)
continue
package.add_dependency(
Factory.create_dependency(dep_name, constraint, root_dir=root_dir)
)
if "develop" in info:
package.develop = info["develop"]
packages.add_package(package)
return packages
@staticmethod
def __get_locked_package(
dependency: Dependency,
packages_by_name: dict[str, list[Package]],
decided: dict[Package, Dependency] | None = None,
) -> Package | None:
"""
Internal helper to identify corresponding locked package using dependency
version constraints.
"""
decided = decided or {}
# Get the packages that are consistent with this dependency.
packages = [
package
for package in packages_by_name.get(dependency.name, [])
if package.python_constraint.allows_all(dependency.python_constraint)
and dependency.constraint.allows(package.version)
]
# If we've previously made a choice that is compatible with the current
# requirement, stick with it.
for package in packages:
old_decision = decided.get(package)
if (
old_decision is not None
and not old_decision.marker.intersect(dependency.marker).is_empty()
):
return package
return next(iter(packages), None)
@classmethod
def __walk_dependencies(
cls,
dependencies: list[Dependency],
packages_by_name: dict[str, list[Package]],
) -> dict[Package, Dependency]:
nested_dependencies: dict[Package, Dependency] = {}
visited: set[tuple[Dependency, BaseMarker]] = set()
while dependencies:
requirement = dependencies.pop(0)
if (requirement, requirement.marker) in visited:
continue
visited.add((requirement, requirement.marker))
locked_package = cls.__get_locked_package(
requirement, packages_by_name, nested_dependencies
)
if not locked_package:
raise RuntimeError(f"Dependency walk failed at {requirement}")
# create dependency from locked package to retain dependency metadata
# if this is not done, we can end-up with incorrect nested dependencies
constraint = requirement.constraint
marker = requirement.marker
extras = requirement.extras
requirement = locked_package.to_dependency()
requirement.marker = requirement.marker.intersect(marker)
requirement.set_constraint(constraint)
for require in locked_package.requires:
if require.in_extras and extras.isdisjoint(require.in_extras):
continue
require = deepcopy(require)
require.marker = require.marker.intersect(
requirement.marker.without_extras()
)
if not require.marker.is_empty():
dependencies.append(require)
key = locked_package
if key not in nested_dependencies:
nested_dependencies[key] = requirement
else:
nested_dependencies[key].marker = nested_dependencies[key].marker.union(
requirement.marker
)
return nested_dependencies
@classmethod
def get_project_dependencies(
cls,
project_requires: list[Dependency],
locked_packages: list[Package],
) -> Iterable[tuple[Package, Dependency]]:
# group packages entries by name, this is required because requirement might use
# different constraints.
packages_by_name: dict[str, list[Package]] = {}
for pkg in locked_packages:
if pkg.name not in packages_by_name:
packages_by_name[pkg.name] = []
packages_by_name[pkg.name].append(pkg)
# Put higher versions first so that we prefer them.
for packages in packages_by_name.values():
packages.sort(
key=lambda package: package.version,
reverse=True,
)
nested_dependencies = cls.__walk_dependencies(
dependencies=project_requires,
packages_by_name=packages_by_name,
)
return nested_dependencies.items()
def get_project_dependency_packages(
self,
project_requires: list[Dependency],
project_python_marker: BaseMarker | None = None,
extras: bool | Sequence[str] | None = None,
) -> Iterator[DependencyPackage]:
# Apply the project python marker to all requirements.
if project_python_marker is not None:
marked_requires: list[Dependency] = []
for require in project_requires:
require = deepcopy(require)
require.marker = require.marker.intersect(project_python_marker)
marked_requires.append(require)
project_requires = marked_requires
repository = self.locked_repository()
# Build a set of all packages required by our selected extras
extra_package_names: set[str] | None = None
if extras is not True:
extra_package_names = set(
get_extra_package_names(
repository.packages,
self.lock_data.get("extras", {}),
extras or (),
)
)
# If a package is optional and we haven't opted in to it, do not select
selected = []
for dependency in project_requires:
try:
package = repository.find_packages(dependency=dependency)[0]
except IndexError:
continue
if extra_package_names is not None and (
package.optional and package.name not in extra_package_names
):
# a package is locked as optional, but is not activated via extras
continue
selected.append(dependency)
for package, dependency in self.get_project_dependencies(
project_requires=selected,
locked_packages=repository.packages,
):
for extra in dependency.extras:
package.requires_extras.append(extra)
yield DependencyPackage(dependency=dependency, package=package)
def set_lock_data(self, root: Package, packages: list[Package]) -> bool:
files: dict[str, Any] = table()
package_specs = self._lock_packages(packages)
# Retrieving hashes
for package in package_specs:
if package["name"] not in files:
files[package["name"]] = []
for f in package["files"]:
file_metadata = inline_table()
for k, v in sorted(f.items()):
file_metadata[k] = v
files[package["name"]].append(file_metadata)
if files[package["name"]]:
package_files = item(files[package["name"]])
assert isinstance(package_files, Array)
files[package["name"]] = package_files.multiline(True)
del package["files"]
lock = document()
lock["package"] = package_specs
if root.extras:
lock["extras"] = {
extra: [dep.pretty_name for dep in deps]
for extra, deps in sorted(root.extras.items())
}
lock["metadata"] = {
"lock-version": self._VERSION,
"python-versions": root.python_versions,
"content-hash": self._content_hash,
"files": files,
}
if not self.is_locked() or lock != self.lock_data:
self._write_lock_data(lock)
return True
return False
def _write_lock_data(self, data: TOMLDocument) -> None:
self.lock.write(data)
# Checking lock file data consistency
if data != self.lock.read():
raise RuntimeError("Inconsistent lock file data.")
self._lock_data = None
def _get_content_hash(self) -> str:
"""
Returns the sha256 hash of the sorted content of the pyproject file.
"""
content = self._local_config
relevant_content = {}
for key in self._relevant_keys:
data = content.get(key)
if data is None and key not in self._legacy_keys:
continue
relevant_content[key] = data
return sha256(json.dumps(relevant_content, sort_keys=True).encode()).hexdigest()
def _get_lock_data(self) -> TOMLDocument:
if not self._lock.exists():
raise RuntimeError("No lockfile found. Unable to read locked packages")
try:
lock_data: TOMLDocument = self._lock.read()
except TOMLKitError as e:
raise RuntimeError(f"Unable to read the lock file ({e}).")
metadata = cast(Table, lock_data["metadata"])
lock_version = Version.parse(metadata.get("lock-version", "1.0"))
current_version = Version.parse(self._VERSION)
# We expect the locker to be able to read lock files
# from the same semantic versioning range
accepted_versions = parse_constraint(
f"^{Version.from_parts(current_version.major, 0)}"
)
lock_version_allowed = accepted_versions.allows(lock_version)
if lock_version_allowed and current_version < lock_version:
logger.warning(
"The lock file might not be compatible with the current version of"
" Poetry.\nUpgrade Poetry to ensure the lock file is read properly or,"
" alternatively, regenerate the lock file with the `poetry lock`"
" command."
)
elif not lock_version_allowed:
raise RuntimeError(
"The lock file is not compatible with the current version of Poetry.\n"
"Upgrade Poetry to be able to read the lock file or, alternatively, "
"regenerate the lock file with the `poetry lock` command."
)
return lock_data
def _lock_packages(self, packages: list[Package]) -> list[dict[str, Any]]:
locked = []
for package in sorted(packages, key=lambda x: (x.name, x.version)):
spec = self._dump_package(package)
locked.append(spec)
return locked
def _dump_package(self, package: Package) -> dict[str, Any]:
dependencies: dict[str, list[Any]] = {}
for dependency in sorted(
package.requires,
key=lambda d: d.name,
):
if dependency.pretty_name not in dependencies:
dependencies[dependency.pretty_name] = []
constraint = inline_table()
if dependency.is_directory():
dependency = cast(DirectoryDependency, dependency)
constraint["path"] = dependency.path.as_posix()
if dependency.develop:
constraint["develop"] = True
elif dependency.is_file():
dependency = cast(FileDependency, dependency)
constraint["path"] = dependency.path.as_posix()
elif dependency.is_url():
dependency = cast(URLDependency, dependency)
constraint["url"] = dependency.url
elif dependency.is_vcs():
dependency = cast(VCSDependency, dependency)
constraint[dependency.vcs] = dependency.source
if dependency.branch:
constraint["branch"] = dependency.branch
elif dependency.tag:
constraint["tag"] = dependency.tag
elif dependency.rev:
constraint["rev"] = dependency.rev
else:
constraint["version"] = str(dependency.pretty_constraint)
if dependency.extras:
constraint["extras"] = sorted(dependency.extras)
if dependency.is_optional():
constraint["optional"] = True
if not dependency.marker.is_any():
constraint["markers"] = str(dependency.marker)
dependencies[dependency.pretty_name].append(constraint)
# All the constraints should have the same type,
# but we want to simplify them if it's possible
for dependency_name, constraints in dependencies.items():
if all(
len(constraint) == 1 and "version" in constraint
for constraint in constraints
):
dependencies[dependency_name] = [
constraint["version"] for constraint in constraints
]
data: dict[str, Any] = {
"name": package.pretty_name,
"version": package.pretty_version,
"description": package.description or "",
"category": package.category,
"optional": package.optional,
"python-versions": package.python_versions,
"files": sorted(
package.files,
key=lambda x: x["file"], # type: ignore[no-any-return]
),
}
if dependencies:
data["dependencies"] = table()
for k, constraints in dependencies.items():
if len(constraints) == 1:
data["dependencies"][k] = constraints[0]
else:
data["dependencies"][k] = array().multiline(True)
for constraint in constraints:
data["dependencies"][k].append(constraint)
if package.extras:
extras = {}
for name, deps in package.extras.items():
# TODO: This should use dep.to_pep_508() once this is fixed
# https://github.com/python-poetry/poetry-core/pull/102
extras[name] = [
dep.base_pep_508_name if not dep.constraint.is_any() else dep.name
for dep in deps
]
data["extras"] = extras
if package.source_url:
url = package.source_url
if package.source_type in ["file", "directory"]:
# The lock file should only store paths relative to the root project
url = Path(
os.path.relpath(
Path(url).as_posix(), self._lock.path.parent.as_posix()
)
).as_posix()
data["source"] = {}
if package.source_type:
data["source"]["type"] = package.source_type
data["source"]["url"] = url
if package.source_reference:
data["source"]["reference"] = package.source_reference
if package.source_resolved_reference:
data["source"]["resolved_reference"] = package.source_resolved_reference
if package.source_type in ["directory", "git"]:
data["develop"] = package.develop
return data
class NullLocker(Locker):
def set_lock_data(self, root: Package, packages: list[Package]) -> bool:
pass
| 36.201613 | 88 | 0.574293 |
acf77dcaf9659ac459b5c4b8f6d95872b9ec8570 | 1,172 | gyp | Python | binding.gyp | Icemanbeta/node-uchardet | 0d6e59246c520b18f17e72932d99937712d13be0 | [
"MIT"
] | null | null | null | binding.gyp | Icemanbeta/node-uchardet | 0d6e59246c520b18f17e72932d99937712d13be0 | [
"MIT"
] | 2 | 2021-05-09T16:18:18.000Z | 2021-09-01T12:11:16.000Z | binding.gyp | Icemanbeta/node-uchardet | 0d6e59246c520b18f17e72932d99937712d13be0 | [
"MIT"
] | null | null | null | {
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'uchardet',
'defines': [
'NAPI_CPP_EXCEPTIONS',
'VERSION="<!(node -e "process.stdout.write(require(\'./package.json\').libuchardet)")"',
],
'dependencies': [
'deps/uchardet.gyp:libuchardet',
],
'include_dirs': [
"<!(node -e \"require('nan')\")",
"deps/uchardet/src",
"src",
],
'sources': [
'src/binding.cpp',
'src/factory.cpp',
'src/uchardet.cpp',
],
'cflags!': [
'-fno-exceptions'
],
'cflags_cc!': [
'-fno-exceptions'
],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
}
}]
]
},
# {
# 'target_name': 'uchardet-test',
# 'type': 'executable',
# 'dependencies': [
# 'deps/uchardet.gyp:libuchardet',
# ],
# 'include_dirs': [
# "deps/uchardet/src",
# "src",
# ],
# 'sources': [
# 'src/test.cpp',
# 'src/factory.cpp',
# ],
# }
]
}
| 20.928571 | 96 | 0.419795 |
acf77eb99749f184e14dd3a076216318b4828449 | 370 | py | Python | django_require_login/utils.py | laactech/django-login-required-all | 967513f9f41c2050a4a361814028e92648963d54 | [
"MIT",
"BSD-3-Clause"
] | 6 | 2019-11-10T17:08:06.000Z | 2021-09-23T17:34:20.000Z | django_require_login/utils.py | laactech/django-require-login | de1e09c51beb3121a73dd753ba3690aa6af62b7a | [
"MIT",
"BSD-3-Clause"
] | null | null | null | django_require_login/utils.py | laactech/django-require-login | de1e09c51beb3121a73dd753ba3690aa6af62b7a | [
"MIT",
"BSD-3-Clause"
] | null | null | null | def is_view_func_public(func):
"""
Returns whether a view is public or not (ie/ has the REQUIRE_LOGIN_IS_PUBLIC
attribute set)
"""
return getattr(func, "REQUIRE_LOGIN_IS_PUBLIC", False)
def set_view_func_public(func):
"""
Set the REQUIRE_LOGIN_IS_PUBLIC attribute on a given function to True
"""
func.REQUIRE_LOGIN_IS_PUBLIC = True
| 26.428571 | 80 | 0.713514 |
acf77eccde5b15774345693102eecafbeeea5c7a | 4,060 | py | Python | pixloc/run_RobotCar.py | jmorlana/pixloc | 90f7e968398252e8557b284803ee774cb8d80cd0 | [
"Apache-2.0"
] | 2 | 2021-12-14T13:02:08.000Z | 2022-01-19T13:27:02.000Z | pixloc/run_RobotCar.py | jmorlana/pixloc | 90f7e968398252e8557b284803ee774cb8d80cd0 | [
"Apache-2.0"
] | null | null | null | pixloc/run_RobotCar.py | jmorlana/pixloc | 90f7e968398252e8557b284803ee774cb8d80cd0 | [
"Apache-2.0"
] | null | null | null | import pickle
from pathlib import Path
from . import set_logging_debug, logger
from .localization import RetrievalLocalizer, PoseLocalizer
from .utils.data import Paths, create_argparser, parse_paths, parse_conf
from .utils.io import write_pose_results, concat_results
default_paths = Paths(
query_images='images/',
reference_images='images/',
reference_sfm='sfm_superpoint+superglue/',
query_list='{condition}_queries_with_intrinsics.txt',
global_descriptors='robotcar_ov-ref_tf-netvlad.h5',
retrieval_pairs='pairs-query-netvlad10-percam-perloc.txt',
results='pixloc_RobotCar_{condition}.txt',
)
experiment = 'pixloc_cmu'
default_confs = {
'from_retrieval': {
'experiment': experiment,
'features': {},
'optimizer': {
'num_iters': 100,
'pad': 2,
},
'refinement': {
'num_dbs': 2,
'point_selection': 'all',
'normalize_descriptors': True,
'average_observations': False,
'filter_covisibility': False,
'do_pose_approximation': False,
},
},
'from_pose': {
'experiment': experiment,
'features': {},
'optimizer': {
'num_iters': 100,
'pad': 2,
},
'refinement': {
'num_dbs': 5,
'min_points_opt': 100,
'point_selection': 'inliers',
'normalize_descriptors': True,
'average_observations': False,
'layer_indices': [0, 1],
},
},
}
CONDITIONS = ['dawn', 'dusk', 'night', 'night-rain', 'overcast-summer',
'overcast-winter', 'rain', 'snow', 'sun']
def generate_query_list(paths, condition):
h, w = 1024, 1024
intrinsics_filename = 'intrinsics/{}_intrinsics.txt'
cameras = {}
for side in ['left', 'right', 'rear']:
with open(paths.dataset / intrinsics_filename.format(side), 'r') as f:
fx = f.readline().split()[1]
fy = f.readline().split()[1]
cx = f.readline().split()[1]
cy = f.readline().split()[1]
assert fx == fy
params = ['SIMPLE_RADIAL', w, h, fx, cx, cy, 0.0]
cameras[side] = [str(p) for p in params]
queries = sorted((paths.query_images / condition).glob('**/*.jpg'))
queries = [str(q.relative_to(paths.query_images)) for q in queries]
out = [[q] + cameras[Path(q).parent.name] for q in queries]
with open(paths.query_list, 'w') as f:
f.write('\n'.join(map(' '.join, out)))
def main():
parser = create_argparser('RobotCar')
parser.add_argument('--conditions', default=CONDITIONS, choices=CONDITIONS,
nargs='+')
args = parser.parse_intermixed_args()
set_logging_debug(args.verbose)
paths = parse_paths(args, default_paths)
conf = parse_conf(args, default_confs)
logger.info('Will evaluate %s conditions.', len(args.conditions))
all_results = []
for condition in args.conditions:
logger.info('Working on condition %s.', condition)
paths_cond = paths.interpolate(condition=condition)
all_results.append(paths_cond.results)
if paths_cond.results.exists():
continue
if not paths_cond.query_list.exists():
generate_query_list(paths_cond, condition)
if args.from_poses:
localizer = PoseLocalizer(paths_cond, conf)
else:
localizer = RetrievalLocalizer(paths_cond, conf)
poses, logs = localizer.run_batched(skip=args.skip)
write_pose_results(poses, paths_cond.results, prepend_camera_name=True)
with open(f'{paths_cond.results}_logs.pkl', 'wb') as f:
pickle.dump(logs, f)
output_path = concat_results(
all_results, args.conditions, paths.results, 'condition')
logger.info(
'Finished evaluating all conditions, you can now submit the file %s to'
' https://www.visuallocalization.net/submission/', output_path)
if __name__ == '__main__':
main()
| 33.278689 | 79 | 0.609852 |
acf77ee8fde96bcd61468787f62e00ea7941d2bc | 24,732 | py | Python | src/smif/data_layer/validate.py | willu47/smif | a5d2e476c157391fd16d4b4c335d1e0664d2a14c | [
"MIT"
] | null | null | null | src/smif/data_layer/validate.py | willu47/smif | a5d2e476c157391fd16d4b4c335d1e0664d2a14c | [
"MIT"
] | null | null | null | src/smif/data_layer/validate.py | willu47/smif | a5d2e476c157391fd16d4b4c335d1e0664d2a14c | [
"MIT"
] | 1 | 2021-03-17T17:50:58.000Z | 2021-03-17T17:50:58.000Z | # -*- coding: utf-8 -*-
"""Validate the correct format and presence of the config data
for the system-of-systems model
"""
import itertools
from smif.exception import (SmifDataError, SmifDataInputError,
SmifValidationError)
VALIDATION_ERRORS = []
def validate_sos_model_format(sos_model):
errors = []
if not isinstance(sos_model, dict):
msg = "Main config file should contain setup data, instead found: {}"
err = SmifValidationError(msg.format(sos_model))
errors.append(err)
return sos_model
default_keys = {
'name': '',
'description': '',
'sector_models': [],
'scenarios': [],
'narratives': [],
'model_dependencies': [],
'scenario_dependencies': []
}
# Add default values to missing keys
for key, value in default_keys.items():
if key not in sos_model:
sos_model[key] = value
# Report keys that should not be in the config
for key, value in sos_model.items():
if key not in default_keys:
errors.append(
SmifValidationError(
'Invalid key `%s` in sos_model configuration `%s`.'
% (key, sos_model['name']))
)
# Throw collection of errors
if errors:
raise SmifDataError(errors)
return sos_model
def validate_sos_model_config(sos_model, sector_models, scenarios):
"""Check expected values for data loaded from master config file
"""
errors = []
if not isinstance(sos_model, dict):
msg = "Main config file should contain setup data, instead found: {}"
err = SmifValidationError(msg.format(sos_model))
errors.append(err)
return
# check description
errors.extend(_validate_description(sos_model))
# check sector models
errors.extend(_validate_sos_model_models(sos_model, sector_models))
# check scenarios
errors.extend(_validate_sos_model_scenarios(sos_model, scenarios))
# check narratives
errors.extend(_validate_sos_model_narratives(sos_model, sector_models))
# check dependencies
errors.extend(_validate_sos_model_deps(sos_model, sector_models, scenarios))
if errors:
raise SmifDataError(errors)
def _validate_sos_model_models(sos_model, sector_models):
errors = []
if not sos_model['sector_models']:
errors.append(
SmifDataInputError(
'sector_models',
'At least one sector model must be selected.',
'A system-of-systems model requires to have at least one system ' +
'enabled to build a valid configuration.'))
for sector_model in sos_model['sector_models']:
if sector_model not in [sector_model['name'] for sector_model in sector_models]:
errors.append(
SmifDataInputError(
'sector_models',
'%s must have a valid sector_model configuration.' % (sector_model),
'Smif refers to the sector_model-configurations to find ' +
'details about a selected sector_model.'))
return errors
def _validate_sos_model_scenarios(sos_model, scenarios):
errors = []
for scenario in sos_model['scenarios']:
if scenario not in [scenario['name'] for scenario in scenarios]:
errors.append(
SmifDataInputError(
'scenarios',
'%s must have a valid scenario configuration.' % (scenario),
'Smif refers to the scenario-configurations to find ' +
'details about a selected scenario.'))
return errors
def _validate_sos_model_narratives(sos_model, sector_models):
errors = []
for narrative in sos_model['narratives']:
# Check provides are valid
for model_name in narrative['provides']:
# A narrative can only provides for enabled models
if model_name not in sos_model['sector_models']:
errors.append(
SmifDataInputError(
'narratives',
('Narrative `%s` provides data for model `%s` that is not enabled ' +
'in this system-of-systems model.') % (narrative['name'], model_name),
'A narrative can only provide for enabled models.'))
else:
# A narrative can only provides parameters that exist in the model
try:
sector_model = _pick_sector_model(model_name, sector_models)
except KeyError:
msg = 'Narrative `{}` provides data for model `{}` that is not found.'
errors.append(
SmifDataInputError(
'models',
msg.format(narrative['name'], model_name),
'A narrative can only provide for existing models.'))
sector_model = {'parameters': []}
parameters = [
parameter['name'] for parameter in sector_model['parameters']
]
for provide in narrative['provides'][model_name]:
msg = 'Narrative `{}` provides data for non-existing model parameter `{}`'
if provide not in parameters:
errors.append(
SmifDataInputError(
'narratives',
msg.format(narrative['name'], provide),
'A narrative can only provide existing model parameters.'
)
)
# Check if all variants are valid
for variant in narrative['variants']:
should_provide = list(itertools.chain(*narrative['provides'].values()))
variant_provides = list(variant['data'].keys())
if sorted(variant_provides) != sorted(should_provide):
msg = 'Narrative `{}`, variant `{}` provides incorrect data.'
errors.append(
SmifDataInputError(
'narratives',
msg.format(narrative['name'], variant['name']),
'A variant can only provide data for parameters that are specified ' +
'by the narrative.'))
return errors
def _pick_sector_model(name, models):
for model in models:
if model['name'] == name:
return model
raise KeyError("Model '{}' not found in models".format(name))
def _validate_sos_model_deps(sos_model, sector_models, scenarios):
errors = []
errors.extend(_validate_dependencies(
sos_model, 'model_dependencies',
sector_models, 'sector_models',
sector_models, 'sector_models'
))
errors.extend(_validate_dependencies(
sos_model, 'scenario_dependencies',
scenarios, 'scenarios',
sector_models, 'sector_models'
))
return errors
def _validate_description(configuration):
errors = []
if len(configuration['description']) > 255:
errors.append(
SmifDataInputError(
'description',
'Description must not contain more than 255 characters.',
'A description should briefly outline a `%s` configuration.'
% (configuration['name'])))
return errors
def _validate_dependencies(configuration, conf_key, source, source_key, sink, sink_key):
errors = []
for idx, dependency in enumerate(configuration[conf_key]):
errors.extend(_validate_dependency_cycle(
idx, dependency, conf_key))
errors.extend(_validate_dependency_in_sos_model(
idx, dependency, configuration, conf_key, source_key, sink_key))
errors.extend(_validate_dependency(
idx, dependency, conf_key, source, source_key, sink, sink_key))
return errors
def _validate_dependency_cycle(idx, dependency, conf_key):
errors = []
# Circular dependencies are not allowed
is_current = 'timestep' not in dependency or dependency['timestep'] == 'CURRENT'
if dependency['source'] == dependency['sink'] and is_current:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Circular dependencies are not allowed.' % (idx + 1),
'Smif does not support self-dependencies unless the dependency is on ' +
'output from a previous timestep.'))
return errors
def _validate_dependency_in_sos_model(idx, dependency, configuration, conf_key, source_key,
sink_key):
errors = []
# Source / Sink must be enabled in sos_model config
if dependency['source'] not in configuration[source_key]:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Source `%s` is not enabled.' %
(idx + 1, dependency['source']),
'Each dependency source must be enabled in the sos-model'))
if dependency['sink'] not in configuration[sink_key]:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Sink `%s` is not enabled.' %
(idx + 1, dependency['sink']),
'Each dependency sink must be enabled in the sos-model'))
# Sink can only have a single dependency
dep_sinks = [
(dependency['sink'], dependency['sink_input'])
for dependency in configuration[conf_key]
]
if dep_sinks.count((dependency['sink'], dependency['sink_input'])) > 1:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Sink input `%s` is driven by multiple sources.'
% (idx + 1, dependency['sink_input']),
'A model input can only be driven by a single model output.'))
return errors
def _validate_dependency(idx, dependency, conf_key, source, source_key, sink,
sink_key):
errors = []
# Source and sink model configurations must exist
source_model = [model for model in source if model['name'] == dependency['source']]
sink_model = [model for model in sink if model['name'] == dependency['sink']]
if not source_model:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Source `%s` does not exist.' %
(idx + 1, dependency['source']),
'Each dependency source must have a `%s` configuration.' %
(source_key)))
if not sink_model:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Sink `%s` does not exist.' %
(idx + 1, dependency['sink']),
'Each dependency sink must have a `%s` configuration.' %
(sink_key)))
if not sink_model or not source_model:
# not worth doing further checks if source/sink does not exist
return errors
# Source_output and sink_input must exist
if source_key == 'sector_models':
source_model_outputs = [
output for output in source_model[0]['outputs']
if output['name'] == dependency['source_output']
]
if source_key == 'scenarios':
source_model_outputs = [
output for output in source_model[0]['provides']
if output['name'] == dependency['source_output']
]
sink_model_inputs = [
input_ for input_ in sink_model[0]['inputs']
if input_['name'] == dependency['sink_input']
]
if not source_model_outputs:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Source output `%s` does not exist.' %
(idx + 1, dependency['source_output']),
'Each dependency source output must exist in the `%s` configuration.' %
(source_key)))
if not sink_model_inputs:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Sink input `%s` does not exist.' %
(idx + 1, dependency['sink_input']),
'Each dependency sink input must exist in the `%s` configuration.' %
(sink_key)))
if not source_model_outputs or not sink_model_inputs:
# not worth doing further checks if source_output/sink_input does not exist
return errors
# Source_output and sink_input must have matching specs
source_model_output = source_model_outputs[0]
sink_model_input = sink_model_inputs[0]
if sorted(source_model_output['dims']) != sorted(sink_model_input['dims']):
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Source `%s` has different dimensions than sink ' % (
idx + 1,
source_model_output['name']
) +
'`%s` (%s != %s).' % (
sink_model_input['name'],
source_model_output['dims'],
sink_model_input['dims']
),
'Dependencies must have matching dimensions.'))
if source_model_output['dtype'] != sink_model_input['dtype']:
errors.append(
SmifDataInputError(
conf_key,
'(Dependency %s) Source `%s` has a different dtype than sink ' % (
idx + 1,
source_model_output['name'],
) +
'`%s` (%s != %s).' % (
sink_model_input['name'],
source_model_output['dtype'],
sink_model_input['dtype']),
'Dependencies must have matching data types.'))
return errors
def validate_path_to_timesteps(timesteps):
"""Check timesteps is a path to timesteps file
"""
if not isinstance(timesteps, str):
VALIDATION_ERRORS.append(
SmifValidationError(
"Expected 'timesteps' in main config to specify " +
"a timesteps file, instead got {}.".format(timesteps)))
def validate_timesteps(timesteps, file_path):
"""Check timesteps is a list of integers
"""
if not isinstance(timesteps, list):
msg = "Loading {}: expected a list of timesteps.".format(file_path)
VALIDATION_ERRORS.append(SmifValidationError(msg))
else:
msg = "Loading {}: timesteps should be integer years, instead got {}"
for timestep in timesteps:
if not isinstance(timestep, int):
VALIDATION_ERRORS.append(msg.format(file_path, timestep))
def validate_time_intervals(intervals, file_path):
"""Check time intervals
"""
if not isinstance(intervals, list):
msg = "Loading {}: expected a list of time intervals.".format(file_path)
VALIDATION_ERRORS.append(SmifValidationError(msg))
else:
for interval in intervals:
validate_time_interval(interval)
def validate_time_interval(interval):
"""Check a single time interval
"""
if not isinstance(interval, dict):
msg = "Expected a time interval, instead got {}.".format(interval)
VALIDATION_ERRORS.append(SmifValidationError(msg))
return
required_keys = ["id", "start", "end"]
for key in required_keys:
if key not in interval:
fmt = "Expected a value for '{}' in each " + \
"time interval, only received {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, interval)))
def validate_sector_models_initial_config(sector_models):
"""Check list of sector models initial configuration
"""
if not isinstance(sector_models, list):
fmt = "Expected 'sector_models' in main config to " + \
"specify a list of sector models to run, instead got {}."
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(sector_models)))
else:
if len(sector_models) == 0:
VALIDATION_ERRORS.append(
SmifValidationError("No 'sector_models' specified in main config file."))
# check each sector model
for sector_model_config in sector_models:
validate_sector_model_initial_config(sector_model_config)
def validate_sector_model_initial_config(sector_model_config):
"""Check a single sector model initial configuration
"""
if not isinstance(sector_model_config, dict):
fmt = "Expected a sector model config block, instead got {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(sector_model_config)))
return
required_keys = ["name", "config_dir", "path", "classname"]
for key in required_keys:
if key not in sector_model_config:
fmt = "Expected a value for '{}' in each " + \
"sector model in main config file, only received {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, sector_model_config)))
def validate_dependency_spec(input_spec, model_name):
"""Check the input specification for a single sector model
"""
if not isinstance(input_spec, list):
fmt = "Expected a list of parameter definitions in '{}' model " + \
"input specification, instead got {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(model_name, input_spec)))
return
for dep in input_spec:
validate_dependency(dep)
def validate_dependency(dep):
"""Check a dependency specification
"""
if not isinstance(dep, dict):
fmt = "Expected a dependency specification, instead got {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(dep)))
return
required_keys = ["name", "spatial_resolution", "temporal_resolution", "units"]
for key in required_keys:
if key not in dep:
fmt = "Expected a value for '{}' in each model dependency, only received {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, dep)))
def validate_scenario_data_config(scenario_data):
"""Check scenario data
"""
if not isinstance(scenario_data, list):
fmt = "Expected a list of scenario datasets in main model config, " + \
"instead got {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(scenario_data)))
return
for scenario in scenario_data:
validate_scenario(scenario)
def validate_scenario(scenario):
"""Check a single scenario specification
"""
if not isinstance(scenario, dict):
fmt = "Expected a scenario specification, instead got {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(scenario)))
return
required_keys = ["parameter", "spatial_resolution", "temporal_resolution", "units", "file"]
for key in required_keys:
if key not in scenario:
fmt = "Expected a value for '{}' in each scenario, only received {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, scenario)))
def validate_scenario_data(data, file_path):
"""Check a list of scenario observations
"""
if not isinstance(data, list):
fmt = "Expected a list of scenario data in {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(file_path)))
return
for datum in data:
validate_scenario_datum(datum, file_path)
def validate_scenario_datum(datum, file_path):
"""Check a single scenario datum
"""
if not isinstance(datum, dict):
fmt = "Expected a scenario data point, instead got {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(datum)))
return
required_keys = ["region", "interval", "year", "value"]
for key in required_keys:
if key not in datum:
fmt = "Expected a value for '{}' in each data point in a scenario, " + \
"only received {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, datum)))
def validate_initial_conditions(data, file_path):
"""Check a list of initial condition observations
"""
if not isinstance(data, list):
fmt = "Expected a list of initial conditions in {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(file_path)))
return
for datum in data:
validate_initial_condition(datum, file_path)
def validate_initial_condition(datum, file_path):
"""Check a single initial condition datum
"""
if not isinstance(datum, dict):
fmt = "Expected a initial condition data point, instead got {} from {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(datum, file_path)))
return
required_keys = ["name", "build_date"]
for key in required_keys:
if key not in datum:
fmt = "Expected a value for '{}' in each data point in a initial condition, " + \
"only received {} from {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, datum, file_path)))
def validate_planning_config(planning):
"""Check planning options
"""
required_keys = ["pre_specified", "rule_based", "optimisation"]
for key in required_keys:
if key not in planning:
fmt = "No '{}' settings specified under 'planning' " + \
"in main config file."
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key)))
# check each planning type
for key, planning_type in planning.items():
if "use" not in planning_type:
fmt = "No 'use' settings specified for '{}' 'planning'"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key)))
continue
if planning_type["use"]:
if "files" not in planning_type or \
not isinstance(planning_type["files"], list) or \
len(planning_type["files"]) == 0:
fmt = "No 'files' provided for the '{}' " + \
"planning type in main config file."
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key)))
def validate_region_sets_config(region_sets):
"""Check regions sets
"""
required_keys = ["name", "file"]
for key in required_keys:
for region_set in region_sets:
if key not in region_set:
fmt = "Expected a value for '{}' in each " + \
"region set in main config file, only received {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, region_set)))
def validate_interval_sets_config(interval_sets):
"""Check interval sets
"""
required_keys = ["name", "file"]
for key in required_keys:
for interval_set in interval_sets:
if key not in interval_set:
fmt = "Expected a value for '{}' in each " + \
"interval set in main config file, only received {}"
VALIDATION_ERRORS.append(SmifValidationError(fmt.format(key, interval_set)))
def validate_interventions(data, path):
"""Validate the loaded data as required for model interventions
"""
# check required keys
required_keys = ["name", "location", "capital_cost", "operational_lifetime",
"economic_lifetime"]
# except for some keys which are allowed simple values,
# expect each attribute to be of the form {value: x, units: y}
simple_keys = ["name", "sector", "location"]
for intervention in data:
for key in required_keys:
if key not in intervention:
fmt = "Loading interventions from {}, required " + \
"a value for '{}' in each intervention, but only " + \
"received {}"
VALIDATION_ERRORS.append(
SmifValidationError(fmt.format(path, key, intervention)))
for key, value in intervention.items():
if key not in simple_keys and (
not isinstance(value, dict)
or "value" not in value
or "units" not in value):
fmt = "Loading interventions from {3}, {0}.{1} was {2} but " + \
"should have specified units, " + \
"e.g. {{'value': {2}, 'units': 'm'}}"
msg = fmt.format(intervention["name"], key, value, path)
VALIDATION_ERRORS.append(SmifValidationError(msg))
| 38.463453 | 95 | 0.600235 |
acf77f82d12df21f2e5f9b8290ccded28947148d | 1,103 | py | Python | WEEKS/CD_Sata-Structures/DS_ALGO/Algorithms/misc/challenge.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/DS_ALGO/Algorithms/misc/challenge.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/DS_ALGO/Algorithms/misc/challenge.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | # Work out the time complexity of these solutions
"""
Formally:
1. Compute the Big-O for each line in isolation
2. if something is in a loop, multiply it's Big-O by the loop for the total.
3. If two things happen sequentially, add the Big-Os.
4. Drop leading multiplicative constants from each of the Big-Os.
5. From all of the Big-Os that are added, drop all but the biggest, dominating one.
"""
import math
# 1
def baz(n):
s = 0
for i in range(n): # O(n)
for j in range(int(math.sqrt(n))): # O(sqrt(n)) n * sqrt n
s += i * j # O(1)
return s # O(1)
# O(n sqrt(n))
# 2
def frotz(n):
s = 0 # O(1)
for i in range(n): # O(n)
for j in range(2 * n): # O(2n) => O(n) => O(n^2)
s += i * j # O(1)
return s # O(1)
# O(2 n^2) => O(n^2)
# 3
def bar(x):
sum = 0 # O(1)
for i in range(0, 1463): # O(1436) => O(1)
i += sum # O(1)
for _ in range(0, x): # O(x)
for _ in range(x, x + 15): # O(15) => O(1)
sum += 1 # O(1) * O(x) * O(1) => O(1 * X * 1) => O(x)
# O(n) linear
| 22.06 | 83 | 0.507706 |
acf77f89cdc657fcee6c1632fb4a78f591df34be | 8,988 | py | Python | clef20/authorship-verification/baseline.py | sabman/pan-code | fcb97c4f9605f94a49b7979c725f0675053c3bca | [
"MIT"
] | null | null | null | clef20/authorship-verification/baseline.py | sabman/pan-code | fcb97c4f9605f94a49b7979c725f0675053c3bca | [
"MIT"
] | null | null | null | clef20/authorship-verification/baseline.py | sabman/pan-code | fcb97c4f9605f94a49b7979c725f0675053c3bca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[20]:
input_pairs="datasets/pan20-authorship-verification-training-small/prepare/input_pairs.jsonl"
input_truth="datasets/pan20-authorship-verification-training-small/prepare/input_truth.jsonl"
test_pairs="datasets/pan20-authorship-verification-training-small/prepare/test_pairs.jsonl"
seed=42
output="out"
vocab_size=3000
ngram_size=4
num_iterations=0
dropout=0.5
# In[21]:
# Imports
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Naive, Distance-Based Baseline
## Introduction
This baseline offers a naive, yet fast solution to the
PAN2020 track on authorship verification. All documents
are represented using a bag-of-character-ngrams model,
that is TFIDF weighted. The cosine similarity between
each document pair in the calibration data set is
calculated. Finally, the resulting similarities are
optimized, and projected through a simple rescaling
operation, so that they can function as pseudo-probabi-
lities, indiciating the likelihood that a document-pair
is a same-author pair. Via a grid search, the optimal
verification threshold is determined, taking into account
that some difficult problems can be left unanswered.
Through setting `num_iterations` to an integer > 0,
a bootstrapped variant of this procedure can be used.
In this case, the similarity calculation is applied in
an iterative procedure to a randomly sampled subset of
the available features. The average similarity is then
used downstream. This imputation procedure is inspired
by the imposters approach.
## Dependencies
- Python 3.6+ (we recommend the Anaconda Python distribution)
- scikit-learn, numpy, scipy
- non-essential: tqdm, seaborn/matplotlib
- pan20_verif_evaluator.py
Example usage from the command line:
>>> python pan20-verif-baseline.py \
-input_pairs="datasets/pan20-authorship-verification-training-small/pairs.jsonl" \
-input_truth="datasets/pan20-authorship-verification-training-small/truth.jsonl" \
-test_pairs="datasets/pan20-authorship-verification-test/pairs.jsonl" \
-num_iterations=0 \
-output="out"
"""
import argparse
import json
import random
import os
import glob
import shutil
from itertools import combinations
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import f1_score, precision_score, recall_score
from scipy.spatial.distance import cosine
import matplotlib.pyplot as plt
from seaborn import kdeplot
from tqdm import tqdm
from pan20_verif_evaluator import evaluate_all
# In[22]:
# Helper functions
def cosine_sim(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def rescale(value, orig_min, orig_max, new_min, new_max):
"""
Rescales a `value` in the old range defined by
`orig_min` and `orig_max`, to the new range
`new_min` and `new_max`. Assumes that
`orig_min` <= value <= `orig_max`.
Parameters
----------
value: float, default=None
The value to be rescaled.
orig_min: float, default=None
The minimum of the original range.
orig_max: float, default=None
The minimum of the original range.
new_min: float, default=None
The minimum of the new range.
new_max: float, default=None
The minimum of the new range.
Returns
----------
new_value: float
The rescaled value.
"""
orig_span = orig_max - orig_min
new_span = new_max - new_min
try:
scaled_value = float(value - orig_min) / float(orig_span)
except ZeroDivisionError:
orig_span += 1e-6
scaled_value = float(value - orig_min) / float(orig_span)
return new_min + (scaled_value * new_span)
def correct_scores(scores, p1, p2):
new_scores = []
for sc in scores:
if sc <= p1:
sc = rescale(sc, 0, p1, 0, 0.49)
new_scores.append(sc)
elif sc > p1 and sc < p2:
new_scores.append(0.5)
else:
sc = rescale(sc, p2, 1, 0.51, 1)
new_scores.append(sc)
return np.array(new_scores)
# In[8]:
# Load data
# parser = argparse.ArgumentParser(description='Distance-based verification: PAN20 baseline')
# # data settings:
# parser.add_argument('-input_pairs', type=str, required=True,
# help='Path to the jsonl-file with the input pairs')
# parser.add_argument('-input_truth', type=str, required=True,
# help='Path to the ground truth-file for the input pairs')
# parser.add_argument('-test_pairs', type=str, required=True,
# help='Path to the jsonl-file with the test pairs')
# parser.add_argument('-output', type=str, required=True,
# help='Path to the output folder for the predictions.\
# (Will be overwritten if it exist already.)')
# # algorithmic settings:
# parser.add_argument('-seed', default=2020, type=int,
# help='Random seed')
# parser.add_argument('-vocab_size', default=3000, type=int,
# help='Maximum number of vocabulary items in feature space')
# parser.add_argument('-ngram_size', default=4, type=int,
# help='Size of the ngrams')
# parser.add_argument('-num_iterations', default=0, type=int,
# help='Number of iterations (`k`); zero by default')
# parser.add_argument('-dropout', default=.5, type=float,
# help='Proportion of features to keep in each iteration')
# args = parser.parse_args()
# print(args)
np.random.seed(seed)
random.seed(seed)
try:
shutil.rmtree(output)
except FileNotFoundError:
pass
os.mkdir(output)
gold = {}
for line in open(input_truth):
d = json.loads(line.strip())
gold[d['id']] = int(d['same'])
# truncation for development purposes
cutoff = 0
if cutoff:
gold = dict(random.sample(gold.items(), cutoff))
print(len(gold))
texts = []
for line in tqdm(open(input_pairs)):
d = json.loads(line.strip())
if d['id'] in gold:
texts.extend(d['pair'])
# In[15]:
#
dict.keys(d)
# In[24]:
print('-> constructing vectorizer')
vectorizer = TfidfVectorizer(max_features=vocab_size, analyzer='char',
ngram_range=(ngram_size, ngram_size))
vectorizer.fit(texts)
# # 🚽 break....
# In[25]:
if num_iterations:
total_feats = len(vectorizer.get_feature_names())
keep_feats = int(total_feats * dropout)
rnd_feature_idxs = []
for _ in range(num_iterations):
rnd_feature_idxs.append(np.random.choice(total_feats,
keep_feats,
replace=False))
rnd_feature_idxs = np.array(rnd_feature_idxs)
# In[27]:
print('-> calculating pairwise similarities')
similarities, labels = [], []
for line in tqdm(open(input_pairs)):
d = json.loads(line.strip())
if d['id'] in gold:
x1, x2 = vectorizer.transform(d['pair']).toarray()
if num_iterations:
similarities_ = []
for i in range(num_iterations):
similarities_.append(cosine_sim(x1[rnd_feature_idxs[i, :]],
x2[rnd_feature_idxs[i, :]]))
similarities.append(np.mean(similarities_))
else:
similarities.append(cosine_sim(x1, x2))
labels.append(gold[d['id']])
similarities = np.array(similarities, dtype=np.float64)
labels = np.array(labels, dtype=np.float64)
# In[40]:
import pandas as pd
sim_lab = pd.DataFrame([similarities, labels]).transpose()
sim_lab.columns = ["similarity", "label"]
sim_lab.to_csv("similarities.csv")
# In[43]:
# let's plot
kdeplot(similarities, label='orig cos sim')
print('-> grid search p1/p2:')
step_size = 0.01
thresholds = np.arange(0.01, 0.99, step_size)
combs = [(p1, p2) for (p1, p2) in combinations(thresholds, 2) if p1 < p2]
params = {}
for p1, p2 in tqdm(combs):
corrected_scores = correct_scores(similarities, p1=p1, p2=p2)
score = evaluate_all(pred_y=corrected_scores,
true_y=labels)
params[(p1, p2)] = score['overall']
opt_p1, opt_p2 = max(params, key=params.get)
print('optimal p1/p2:', opt_p1, opt_p2)
plt.axvline(opt_p1, ls='--', c='darkgrey')
plt.axvline(opt_p2, ls='--', c='darkgrey')
corrected_scores = correct_scores(similarities, p1=opt_p1, p2=opt_p2)
print('optimal score:', evaluate_all(pred_y=corrected_scores,
true_y=labels))
kdeplot(corrected_scores, label='corr cos sim')
corr_p1, corr_p2 = correct_scores([opt_p1, opt_p2], p1=opt_p1, p2=opt_p2)
plt.axvline(corr_p1, ls='--', c='darkgrey')
plt.axvline(corr_p2, ls='--', c='darkgrey')
plt.xlim([0, 1])
plt.legend()
plt.tight_layout()
plt.savefig('kde.pdf')
plt.clf()
# In[ ]:
| 29.181818 | 97 | 0.654651 |
acf7801e2a913725f641ccd4b9f21bb4d85f5aef | 897 | py | Python | Python/6/CreatePhoneNumber/test_create_phone_number.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | Python/6/CreatePhoneNumber/test_create_phone_number.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | 6 | 2020-02-21T17:01:59.000Z | 2021-05-04T07:04:41.000Z | Python/6/CreatePhoneNumber/test_create_phone_number.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest import main
from create_phone_number import create_phone_number
class TestCreatePhoneNumber(TestCase):
# Test class of create_phone_number
def test_create_phone_number(self):
test_patterns = [
([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], '(123) 456-7890'),
([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], '(111) 111-1111'),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], '(123) 456-7890'),
([0, 2, 3, 0, 5, 6, 0, 8, 9, 0], '(023) 056-0890'),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], '(000) 000-0000'),
# ([0, 9, 0, 9, 6, 7, 5, 8, 1, 3], '(090) 967-5811'), #-> Wrong Case
]
for nums, expected in test_patterns:
with self.subTest(nums=nums, expected=expected):
self.assertEqual(create_phone_number(n=nums), expected)
if __name__ == '__main__':
main(verbosity=2)
| 34.5 | 80 | 0.536232 |
acf7819e632a075ebac987850b694780db0630d2 | 1,686 | py | Python | setup.py | chasek23/django-chart-compare | 6ffa3a5ccdc4a92585990c96731605dd2d60a720 | [
"MIT"
] | null | null | null | setup.py | chasek23/django-chart-compare | 6ffa3a5ccdc4a92585990c96731605dd2d60a720 | [
"MIT"
] | 15 | 2020-02-12T01:23:20.000Z | 2022-03-11T23:56:43.000Z | setup.py | chasek23/django-chart-compare | 6ffa3a5ccdc4a92585990c96731605dd2d60a720 | [
"MIT"
] | null | null | null | import os
from setuptools import find_packages, setup
# from io import open
with open(os.path.join(os.path.dirname(__file__), 'readme.md'), encoding='utf-8') as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-chart-compare",
version="1.0.3",
packages=find_packages(),
include_package_data=True,
license="MIT License", # example license
description="Compare matrices in a django app",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/chasek23/django-chart-compare",
download_url='https://github.com/chasek23/xml2pandas/archive/0.0.0.tar.gz',
author="Chase Kelly",
author_email="chase@microsearch.net",
install_requires=[
'pandas',
'bokeh',
],
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
project_urls={
'Documentation': 'https://github.com/chasek23/django-chart-compare',
'Funding': 'https://microsearch.cloud/',
'Say Thanks!': 'http://chasekel.ly/',
'Source': 'https://github.com/chasek23/django-chart-compare',
'Tracker': 'https://github.com/chasek23/django-chart-compare/issues',
},
)
| 35.87234 | 93 | 0.646501 |
acf781f5e672ab24834149d6bfd2490a19d6750a | 7,232 | py | Python | docs/conf.py | DorianXGH/chipyard | 38034cfafbf3a389138a23cc6ca660440075fdba | [
"Apache-2.0",
"BSD-3-Clause"
] | 91 | 2016-11-01T00:23:38.000Z | 2019-06-26T16:59:17.000Z | docs/conf.py | DorianXGH/chipyard | 38034cfafbf3a389138a23cc6ca660440075fdba | [
"Apache-2.0",
"BSD-3-Clause"
] | 93 | 2016-11-29T15:53:02.000Z | 2019-07-02T16:33:58.000Z | docs/conf.py | DorianXGH/chipyard | 38034cfafbf3a389138a23cc6ca660440075fdba | [
"Apache-2.0",
"BSD-3-Clause"
] | 62 | 2016-10-26T17:52:42.000Z | 2019-05-31T22:01:22.000Z | # -*- coding: utf-8 -*-
#
# Chipyard documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 8 11:46:38 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import subprocess
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosectionlabel']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Chipyard'
copyright = u'2019, Berkeley Architecture Research'
author = u'Berkeley Architecture Research'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
on_rtd = os.environ.get("READTHEDOCS") == "True"
if on_rtd:
for item, value in os.environ.items():
print("[READTHEDOCS] {} = {}".format(item, value))
if on_rtd:
rtd_version = os.environ.get("READTHEDOCS_VERSION")
if rtd_version == "latest":
version = "main" # TODO: default to what "latest" points to
elif rtd_version == "stable":
# get the latest git tag (which is what rtd normally builds under "stable")
# this works since rtd builds things within the repo
process = subprocess.Popen(["git", "describe", "--exact-match", "--tags"], stdout=subprocess.PIPE)
output = process.communicate()[0].decode("utf-8").strip()
if process.returncode == 0:
version = output
else:
version = "v?.?.?" # this should not occur as "stable" is always pointing to tagged version
else:
version = rtd_version # name of a branch
else:
version = "v?.?.?"
# for now make these match
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'logo_only': True,
# 'display_version': True,
# 'navigation_depth': 4,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
html_logo = '_static/images/chipyard-logo.png'
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Chipyarddoc'
# -- Misc Options ---------------------------------------------------------
html_context = {
"version": version
}
# add rst to end of each rst source file
# can put custom strings here that are generated from this file
rst_epilog = f"""
.. |overall_version| replace:: {version}
"""
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Chipyard.tex', u'Chipyard Documentation',
u'Berkeley Architecture Research', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chipyard', u'Chipyard Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Chipyard', u'Chipyard Documentation',
author, 'Chipyard', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python' : ('https://docs.python.org/', None),
'boom' : ('https://docs.boom-core.org/en/latest/', None),
'firesim' : ('http://docs.fires.im/en/latest/', None) }
# resolve label conflict between documents
autosectionlabel_prefix_document = True
| 31.443478 | 106 | 0.664685 |
acf782454f10d470f97f033e134482f182916420 | 2,011 | py | Python | Calibration/TkAlCaRecoProducers/python/ALCARECOSiStripCalMinBias_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | Calibration/TkAlCaRecoProducers/python/ALCARECOSiStripCalMinBias_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | Calibration/TkAlCaRecoProducers/python/ALCARECOSiStripCalMinBias_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
# Set the HLT paths
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOSiStripCalMinBiasHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
## HLTPaths = [
## #Minimum Bias
## "HLT_MinBias*"
## ],
eventSetupPathsKey = 'SiStripCalMinBias',
throw = False # tolerate triggers stated above, but not available
)
# Select only events where tracker had HV on (according to DCS bit information)
# AND respective partition is in the run (according to FED information)
import CalibTracker.SiStripCommon.SiStripDCSFilter_cfi
DCSStatusForSiStripCalMinBias = CalibTracker.SiStripCommon.SiStripDCSFilter_cfi.siStripDCSFilter.clone()
# Select only good tracks
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOSiStripCalMinBias = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone()
ALCARECOSiStripCalMinBias.filter = True ##do not store empty events
ALCARECOSiStripCalMinBias.src = 'generalTracks'
ALCARECOSiStripCalMinBias.applyBasicCuts = True
ALCARECOSiStripCalMinBias.ptMin = 0.8 ##GeV
ALCARECOSiStripCalMinBias.nHitMin = 6 ## at least 6 hits required
ALCARECOSiStripCalMinBias.chi2nMax = 10.
ALCARECOSiStripCalMinBias.GlobalSelector.applyIsolationtest = False
ALCARECOSiStripCalMinBias.GlobalSelector.applyGlobalMuonFilter = False
ALCARECOSiStripCalMinBias.GlobalSelector.applyJetCountFilter = False
ALCARECOSiStripCalMinBias.TwoBodyDecaySelector.applyMassrangeFilter = False
ALCARECOSiStripCalMinBias.TwoBodyDecaySelector.applyChargeFilter = False
ALCARECOSiStripCalMinBias.TwoBodyDecaySelector.applyAcoplanarityFilter = False
ALCARECOSiStripCalMinBias.TwoBodyDecaySelector.applyMissingETFilter = False
# Sequence #
seqALCARECOSiStripCalMinBias = cms.Sequence(ALCARECOSiStripCalMinBiasHLT*DCSStatusForSiStripCalMinBias*ALCARECOSiStripCalMinBias)
| 47.880952 | 129 | 0.820985 |
acf7824ecf9a097513a8552b1a94fed3037b076f | 5,335 | py | Python | test/test_appscale_reset_pwd.py | NeolithEra/appscale-tools | bf02462645ba08975e0e4cfcc25f3ff4e0ec1545 | [
"Apache-2.0"
] | 21 | 2015-04-22T02:46:22.000Z | 2021-11-14T20:05:18.000Z | test/test_appscale_reset_pwd.py | NeolithEra/appscale-tools | bf02462645ba08975e0e4cfcc25f3ff4e0ec1545 | [
"Apache-2.0"
] | 244 | 2015-01-02T22:43:58.000Z | 2020-01-29T17:52:43.000Z | test/test_appscale_reset_pwd.py | NeolithEra/appscale-tools | bf02462645ba08975e0e4cfcc25f3ff4e0ec1545 | [
"Apache-2.0"
] | 32 | 2015-03-12T14:49:49.000Z | 2021-06-08T09:43:36.000Z | #!/usr/bin/env python
# General-purpose Python library imports
import getpass
import json
import os
import sys
import time
import unittest
# Third party libraries
from flexmock import flexmock
import SOAPpy
# AppScale import, the library that we're testing here
from appscale.tools.appscale_logger import AppScaleLogger
from appscale.tools.appscale_tools import AppScaleTools
from appscale.tools.local_state import LocalState
from appscale.tools.parse_args import ParseArgs
class TestAppScaleResetPassword(unittest.TestCase):
def setUp(self):
self.keyname = "boobazblargfoo"
self.function = "appscale-reset-pwd"
# mock out any writing to stdout
flexmock(AppScaleLogger)
AppScaleLogger.should_receive('log').and_return()
AppScaleLogger.should_receive('success').and_return()
AppScaleLogger.should_receive('warn').and_return()
# mock out all sleeping
flexmock(time)
time.should_receive('sleep').and_return()
def test_reset_password_for_user_that_exists(self):
# put in a mock for reading the secret file
builtins = flexmock(sys.modules['__builtin__'])
builtins.should_call('open') # set the fall-through
secret_key_location = LocalState.get_secret_key_location(self.keyname)
fake_secret = flexmock(name="fake_secret")
fake_secret.should_receive('read').and_return('the secret')
builtins.should_receive('open').with_args(secret_key_location, 'r') \
.and_return(fake_secret)
# mock out reading the username and new password from the user
builtins.should_receive('raw_input').and_return('boo@foo.goo')
flexmock(getpass)
getpass.should_receive('getpass').and_return('the password')
# mock out finding the login node's IP address from the json file
flexmock(os.path)
os.path.should_call('exists') # set the fall-through
os.path.should_receive('exists').with_args(
LocalState.get_locations_json_location(self.keyname)).and_return(True)
fake_nodes_json = flexmock(name="fake_secret")
fake_nodes_json.should_receive('read').and_return(
json.dumps({"node_info": [{
'public_ip': 'public1',
'private_ip': 'private1',
'roles': ['load_balancer', 'db_master']
}]}))
builtins.should_receive('open').with_args(
LocalState.get_locations_json_location(self.keyname), 'r') \
.and_return(fake_nodes_json)
# mock out grabbing the userappserver ip from an appcontroller
fake_appcontroller = flexmock(name='fake_appcontroller')
fake_appcontroller.should_receive('status').with_args('the secret') \
.and_return('nothing interesting here') \
.and_return('Database is at not-up-yet') \
.and_return('Database is at public1')
fake_appcontroller.should_receive('reset_password').with_args(
'boo@foo.goo', str, 'the secret').and_return('true')
flexmock(SOAPpy)
SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \
.and_return(fake_appcontroller)
argv = [
"--keyname", self.keyname
]
options = ParseArgs(argv, self.function).args
AppScaleTools.reset_password(options)
def test_reset_password_for_user_that_doesnt_exist(self):
# put in a mock for reading the secret file
builtins = flexmock(sys.modules['__builtin__'])
builtins.should_call('open') # set the fall-through
secret_key_location = LocalState.get_secret_key_location(self.keyname)
fake_secret = flexmock(name="fake_secret")
fake_secret.should_receive('read').and_return('the secret')
builtins.should_receive('open').with_args(secret_key_location, 'r') \
.and_return(fake_secret)
# mock out reading the username and new password from the user
builtins.should_receive('raw_input').and_return('boo@foo.goo')
flexmock(getpass)
getpass.should_receive('getpass').and_return('the password')
# mock out finding the login node's IP address from the json file
flexmock(os.path)
os.path.should_call('exists') # set the fall-through
os.path.should_receive('exists').with_args(
LocalState.get_locations_json_location(self.keyname)).and_return(True)
fake_nodes_json = flexmock(name="fake_secret")
fake_nodes_json.should_receive('read').and_return(
json.dumps({"node_info": [{
'public_ip': 'public1',
'private_ip': 'private1',
'roles': ['load_balancer', 'db_master']
}]}))
builtins.should_receive('open').with_args(
LocalState.get_locations_json_location(self.keyname), 'r') \
.and_return(fake_nodes_json)
# mock out grabbing the userappserver ip from an appcontroller
fake_appcontroller = flexmock(name='fake_appcontroller')
fake_appcontroller.should_receive('status').with_args('the secret') \
.and_return('nothing interesting here') \
.and_return('Database is at not-up-yet') \
.and_return('Database is at public1')
fake_appcontroller.should_receive('reset_password').with_args(
'boo@foo.goo', str, 'the secret').and_return('false')
flexmock(SOAPpy)
SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \
.and_return(fake_appcontroller)
argv = [
"--keyname", self.keyname
]
options = ParseArgs(argv, self.function).args
self.assertRaises(SystemExit, AppScaleTools.reset_password, options)
| 36.541096 | 76 | 0.723711 |
acf78303930edc8368f5660815de2a6521d3f7f1 | 5,111 | py | Python | ashic/commands/fit.py | wmalab/ASHIC | f9dbee64ef13c7f10c25bc266209fb7fc430d39e | [
"MIT"
] | 5 | 2021-01-28T21:51:55.000Z | 2022-03-04T17:35:59.000Z | ashic/commands/fit.py | wmalab/ASHIC | f9dbee64ef13c7f10c25bc266209fb7fc430d39e | [
"MIT"
] | 4 | 2020-09-01T06:23:49.000Z | 2022-01-11T03:58:04.000Z | ashic/commands/fit.py | wmalab/ASHIC | f9dbee64ef13c7f10c25bc266209fb7fc430d39e | [
"MIT"
] | 1 | 2020-12-06T07:03:50.000Z | 2020-12-06T07:03:50.000Z | import numpy as np
from ashic.model.zipoisson import ZeroInflatedPoisson
from ashic.model.zipoissonhuman import ZeroInflatedPoissonHuman
from ashic.model.poisson import Poisson
from ashic.utils import init_counts, join_matrix
from ashic.optimization.mds import MDS
def initialx(data, alpha=-3.0, beta=1.0, seed=0, max_iter=5000, smooth=False, h=0, diag=0):
mds = MDS(alpha=alpha, beta=beta, random_state=seed, max_iter=max_iter,
smooth=smooth, h=h, diag=diag, numchr=2)
# init full_counts by using certain counts as poisson lambda
certain = join_matrix(data['aa'], data['ab'], data['ba'], data['bb'])
full_counts = init_counts(certain, data['ax'], data['bx'], data['xx'])
x = mds.fit(full_counts)
return x
def create_model(params, modeltype='zip', seed=0, merge=None):
if modeltype == 'zip':
model = ZeroInflatedPoisson(params, merge=merge, normalize=False,
random_state=np.random.RandomState(seed=seed))
elif modeltype == 'poisson':
model = Poisson(params, normalize=False, random_state=np.random.RandomState(seed=seed))
else:
raise ValueError("Model type should be zip or poisson.")
return model
def create_model_human(params, modeltype='ziphuman', seed=0, merge=None, loci=None, diag=0, mask=None):
if modeltype == 'ziphuman':
model = ZeroInflatedPoissonHuman(params, merge=merge, normalize=False, loci=loci, diag=diag, mask=mask,
random_state=np.random.RandomState(seed=seed))
elif modeltype == 'poisson':
model = Poisson(params, normalize=False, loci=loci, diag=diag, mask=mask,
random_state=np.random.RandomState(seed=seed))
else:
raise ValueError("Model type not implemented.")
return model
def fit(data, outdir, modeltype, n=None, numruns=5, maxiter=20, tol=1e-2, alpha=-3.0, beta=1.0, tail=None, **kwargs):
best_loglikelihood = -np.inf
best_model = None
best_converge = False
best_expected = None
best_sim = None
best_rseed = None
if n is None:
n = data['aa'].shape[0]
for rseed in range(numruns):
init = {
'n': n,
'x': initialx(data, alpha=alpha, beta=beta, seed=rseed, **kwargs),
'alpha': alpha,
'beta': beta
}
if tail is None:
merge = None
elif 1 <= tail < n:
merge = n - tail
else:
raise ValueError("tail should between 1 and {}.".format(n - 1))
model = create_model(init, modeltype=modeltype, seed=rseed, merge=merge)
simprogress = SimulationProgress(model, outdir=os.path.join(outdir, 'seed'+str(rseed)),
simobj=sim, seed=rseed, maxiter=maxiter, tol=tol)
model, converge, loglikelihood, expected = emfit(model, sim.obs, maxiter=maxiter, tol=tol,
callback=simprogress.callback)
with open(simprogress.logfile, 'a') as fh:
fh.write("# converge={}\n".format(converge))
# choose the model with maximum loglikelihood in all runs
if loglikelihood > best_loglikelihood:
best_loglikelihood = loglikelihood
best_model = model
best_converge = converge
best_expected = expected
best_sim = simprogress
best_rseed = rseed
# save best result
with open(os.path.join(outdir, 'result.json'), 'w') as fh:
retdict = {
'loglikelihood': best_loglikelihood,
'converge': best_converge,
'seed': best_rseed,
'relative_error': best_sim.errors
}
json.dump(retdict, fh, indent=4, sort_keys=True)
best_model.dumpjson(os.path.join(outdir, 'result_model.json'),
indent=4, sort_keys=True)
with open(os.path.join(outdir, 'result_expected.json'), 'w') as fh:
row, col = np.where(best_model.mask)
values = {}
if isinstance(best_model, Poisson):
values['T'] = {
'aa': best_expected[0],
'ab': best_expected[1],
'ba': best_expected[2],
'bb': best_expected[3]
}
elif isinstance(best_model, ZeroInflatedPoisson):
values['Z'] = {
'aa': best_expected[0][0],
'ab': best_expected[0][1],
'ba': best_expected[0][2],
'bb': best_expected[0][3],
}
values['T'] = {
'aa': best_expected[1][0],
'ab': best_expected[1][1],
'ba': best_expected[1][2],
'bb': best_expected[1][3],
}
else:
raise ValueError("model should be zip or poisson.")
encodejson(values)
expectdict = {
'n': params['n'],
'row': row.flatten().tolist(),
'col': col.flatten().tolist(),
'values': values
}
json.dump(expectdict, fh, indent=4, sort_keys=True) | 41.893443 | 117 | 0.575621 |
acf783bc0d037269c9a58c1f342c6bc1fbfae3db | 434 | py | Python | example/main_00.py | janothan/Evaluation-Framework | e53847bc352f657953933e1d7c97b68ac890c852 | [
"Apache-2.0"
] | 5 | 2020-02-12T13:11:14.000Z | 2021-01-28T12:45:22.000Z | example/main_00.py | charyeezy/Evaluation-Framework | ddfd4ea654a3d7d2abd58f062ec98a8a736f8f51 | [
"Apache-2.0"
] | 9 | 2019-07-29T17:45:30.000Z | 2022-03-17T12:24:47.000Z | example/main_00.py | charyeezy/Evaluation-Framework | ddfd4ea654a3d7d2abd58f062ec98a8a736f8f51 | [
"Apache-2.0"
] | 7 | 2020-02-12T13:22:49.000Z | 2021-11-29T01:08:50.000Z | from evaluation_framework.manager import FrameworkManager
if __name__ == "__main__":
evaluation_manager = FrameworkManager()
# evaluation_manager.evaluate('uniform_classification_regression.txt', parallel=True, debugging_mode = False)
evaluation_manager.evaluate(
"uniform_classification_regression.txt",
tasks=["Classification", "Regression"],
parallel=True,
debugging_mode=False,
)
| 36.166667 | 114 | 0.737327 |
acf783df5ccf90e74a1a22f6c86146d5c578262a | 178 | py | Python | info_module/text.py | joejcollins/lieutenant-dean | eea536a146fb89b2feca244d5c4cf68e662cf2f2 | [
"MIT"
] | null | null | null | info_module/text.py | joejcollins/lieutenant-dean | eea536a146fb89b2feca244d5c4cf68e662cf2f2 | [
"MIT"
] | null | null | null | info_module/text.py | joejcollins/lieutenant-dean | eea536a146fb89b2feca244d5c4cf68e662cf2f2 | [
"MIT"
] | null | null | null | """ Text manipulations. """
def reverse_string(string_to_reverse):
""" Do the string reversal. """
reversed_string = string_to_reverse[::-1]
return reversed_string
| 22.25 | 45 | 0.696629 |
acf783f3de7f3424e9fdfa1996d74ad8b0c2bdec | 66 | py | Python | floggy/admin/components/auth/queries.py | gladioluz/Floggy | bc80cd068429ba1763d61b2baef2cefe321cda72 | [
"MIT"
] | null | null | null | floggy/admin/components/auth/queries.py | gladioluz/Floggy | bc80cd068429ba1763d61b2baef2cefe321cda72 | [
"MIT"
] | null | null | null | floggy/admin/components/auth/queries.py | gladioluz/Floggy | bc80cd068429ba1763d61b2baef2cefe321cda72 | [
"MIT"
] | null | null | null | from floggy import db
from floggy.models.Content import Content
| 13.2 | 41 | 0.818182 |
acf7846841b4f9361fb2d7889cadd6c84918d17f | 1,118 | py | Python | sprites/meta.py | DeadZombie14/chillMagicCarPygame | 756bb6d27939bed3c2834222d03096e90f05a788 | [
"MIT"
] | null | null | null | sprites/meta.py | DeadZombie14/chillMagicCarPygame | 756bb6d27939bed3c2834222d03096e90f05a788 | [
"MIT"
] | null | null | null | sprites/meta.py | DeadZombie14/chillMagicCarPygame | 756bb6d27939bed3c2834222d03096e90f05a788 | [
"MIT"
] | null | null | null | import pygame
from utilidades.colores import *
class Meta(pygame.sprite.Sprite):
def __init__(self, screen,screenSize, coord, estilo=1):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('assets/img/r_flag.png')
self.image = pygame.transform.scale(self.image,[32,32])
# self.image.fill((0,0,255))
self.estilo = estilo
def color_surface(surface, color):
arr = pygame.surfarray.pixels3d(surface)
arr[:,:,0] = color[0]
arr[:,:,1] = color[1]
arr[:,:,2] = color[2]
coloredSurface = self.image.copy()
if estilo == 1:
color_surface(coloredSurface, red)
elif estilo == 2:
color_surface(coloredSurface, green)
elif estilo == 3:
color_surface(coloredSurface, blue)
elif estilo == 4:
color_surface(coloredSurface, black)
self.image = coloredSurface
self.rect = self.image.get_rect()
self.rect.x,self.rect.y = coord[0],coord[1]
def getEstilo(self):
return self.estilo | 29.421053 | 63 | 0.583184 |
acf784715b3582b191ef4fbe39fa128bd0720df1 | 8,701 | py | Python | foolscap/test/test_reconnector.py | daira/foolscap | d34838ec99023eabef178110a74ca0c97b69603a | [
"MIT"
] | 1 | 2018-01-22T17:17:32.000Z | 2018-01-22T17:17:32.000Z | foolscap/test/test_reconnector.py | daira/foolscap | d34838ec99023eabef178110a74ca0c97b69603a | [
"MIT"
] | null | null | null | foolscap/test/test_reconnector.py | daira/foolscap | d34838ec99023eabef178110a74ca0c97b69603a | [
"MIT"
] | null | null | null | # -*- test-case-name: foolscap.test.test_reconnector -*-
from twisted.trial import unittest
from foolscap.api import UnauthenticatedTub, eventually, flushEventualQueue
from foolscap.test.common import HelperTarget
from twisted.internet import defer, reactor, error
from foolscap import negotiate
class AlwaysFailNegotiation(negotiate.Negotiation):
def evaluateHello(self, offer):
raise negotiate.NegotiationError("I always fail")
class Reconnector(unittest.TestCase):
def setUp(self):
self.services = [UnauthenticatedTub(), UnauthenticatedTub()]
self.tubA, self.tubB = self.services
for s in self.services:
s.startService()
l = s.listenOn("tcp:0:interface=127.0.0.1")
s.setLocation("127.0.0.1:%d" % l.getPortnum())
def tearDown(self):
d = defer.DeferredList([s.stopService() for s in self.services])
d.addCallback(flushEventualQueue)
return d
def test_try(self):
self.count = 0
self.attached = False
self.done = defer.Deferred()
target = HelperTarget("bob")
url = self.tubB.registerReference(target)
rc = self.tubA.connectTo(url, self._got_ref, "arg", kw="kwarg")
# at least make sure the stopConnecting method is present, even if we
# don't have a real test for it yet
self.failUnless(rc.stopConnecting)
return self.done
def _got_ref(self, rref, arg, kw):
self.failUnlessEqual(self.attached, False)
self.attached = True
self.failUnlessEqual(arg, "arg")
self.failUnlessEqual(kw, "kwarg")
self.count += 1
rref.notifyOnDisconnect(self._disconnected, self.count)
if self.count < 2:
# forcibly disconnect it
eventually(rref.tracker.broker.transport.loseConnection)
else:
self.done.callback("done")
def _disconnected(self, count):
self.failUnlessEqual(self.attached, True)
self.failUnlessEqual(count, self.count)
self.attached = False
def _connected(self, ref, notifiers, accumulate):
accumulate.append(ref)
if notifiers:
notifiers.pop(0).callback(ref)
def stall(self, timeout, res=None):
d = defer.Deferred()
reactor.callLater(timeout, d.callback, res)
return d
def test_retry(self):
tubC = UnauthenticatedTub()
connects = []
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
portb = self.tubB.getListeners()[0].getPortnum()
d1 = defer.Deferred()
notifiers = [d1]
self.services.remove(self.tubB)
d = self.tubB.stopService()
def _start_connecting(res):
# this will fail, since tubB is not listening anymore
self.rc = self.tubA.connectTo(url, self._connected,
notifiers, connects)
# give it a few tries, then start tubC listening on the same port
# that tubB used to, which should allow the connection to
# complete (since they're both UnauthenticatedTubs)
return self.stall(2)
d.addCallback(_start_connecting)
def _start_tubC(res):
self.failUnlessEqual(len(connects), 0)
self.services.append(tubC)
tubC.startService()
tubC.listenOn("tcp:%d:interface=127.0.0.1" % portb)
tubC.setLocation("127.0.0.1:%d" % portb)
url2 = tubC.registerReference(target, "target")
assert url2 == url
return d1
d.addCallback(_start_tubC)
def _connected(res):
self.failUnlessEqual(len(connects), 1)
self.rc.stopConnecting()
d.addCallback(_connected)
return d
def test_negotiate_fails_and_retry(self):
connects = []
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
l = self.tubB.getListeners()[0]
l.negotiationClass = AlwaysFailNegotiation
portb = l.getPortnum()
d1 = defer.Deferred()
notifiers = [d1]
self.rc = self.tubA.connectTo(url, self._connected,
notifiers, connects)
d = self.stall(2)
def _failed_a_few_times(res):
# the reconnector should have failed once or twice, since the
# negotiation would always fail.
self.failUnlessEqual(len(connects), 0)
# Now we fix tubB. We only touched the Listener, so re-doing the
# listenOn should clear it.
return self.tubB.stopListeningOn(l)
d.addCallback(_failed_a_few_times)
def _stopped(res):
self.tubB.listenOn("tcp:%d:interface=127.0.0.1" % portb)
# the next time the reconnector tries, it should succeed
return d1
d.addCallback(_stopped)
def _connected(res):
self.failUnlessEqual(len(connects), 1)
self.rc.stopConnecting()
d.addCallback(_connected)
return d
def test_lose_and_retry(self):
tubC = UnauthenticatedTub()
connects = []
d1 = defer.Deferred()
d2 = defer.Deferred()
notifiers = [d1, d2]
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
portb = self.tubB.getListeners()[0].getPortnum()
self.rc = self.tubA.connectTo(url, self._connected,
notifiers, connects)
def _connected_first(res):
# we are now connected to tubB. Shut it down to force a
# disconnect.
self.services.remove(self.tubB)
d = self.tubB.stopService()
return d
d1.addCallback(_connected_first)
def _wait(res):
# wait a few seconds to give the Reconnector a chance to try and
# fail a few times
return self.stall(2)
d1.addCallback(_wait)
def _start_tubC(res):
# now start tubC listening on the same port that tubB used to,
# which should allow the connection to complete (since they're
# both UnauthenticatedTubs)
self.services.append(tubC)
tubC.startService()
tubC.listenOn("tcp:%d:interface=127.0.0.1" % portb)
tubC.setLocation("127.0.0.1:%d" % portb)
url2 = tubC.registerReference(target, "target")
assert url2 == url
# this will fire when the second connection has been made
return d2
d1.addCallback(_start_tubC)
def _connected(res):
self.failUnlessEqual(len(connects), 2)
self.rc.stopConnecting()
d1.addCallback(_connected)
return d1
def test_stop_trying(self):
connects = []
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
d1 = defer.Deferred()
self.services.remove(self.tubB)
d = self.tubB.stopService()
def _start_connecting(res):
# this will fail, since tubB is not listening anymore
self.rc = self.tubA.connectTo(url, self._connected, d1, connects)
self.rc.verbose = True # get better code coverage
# give it a few tries, then tell it to stop trying
return self.stall(2)
d.addCallback(_start_connecting)
def _stop_trying(res):
self.failUnlessEqual(len(connects), 0)
f = self.rc.getLastFailure()
self.failUnless(f.check(error.ConnectionRefusedError))
delay = self.rc.getDelayUntilNextAttempt()
self.failUnless(delay > 0, delay)
self.failUnless(delay < 60, delay)
self.rc.reset()
delay = self.rc.getDelayUntilNextAttempt()
self.failUnless(delay < 2)
# this stopConnecting occurs while the reconnector's timer is
# active
self.rc.stopConnecting()
self.failUnlessEqual(self.rc.getDelayUntilNextAttempt(), None)
d.addCallback(_stop_trying)
# if it keeps trying, we'll see a dirty reactor
return d
# another test: determine the target url early, but don't actually register
# the reference yet. Start the reconnector, let it fail once, then register
# the reference and make sure the retry succeeds. This will distinguish
# between connection/negotiation failures and object-lookup failures, both of
# which ought to be handled by Reconnector. I suspect the object-lookup
# failures are not yet.
# test that Tub shutdown really stops all Reconnectors
| 40.096774 | 77 | 0.612688 |
acf7865400331156763247e020d76ab80b124bcc | 15,358 | py | Python | heat/engine/resources/openstack/sahara/sahara_templates.py | pshchelo/heat | 6cf94a3ece89d77b839f61292e5f023c3f192c82 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/openstack/sahara/sahara_templates.py | pshchelo/heat | 6cf94a3ece89d77b839f61292e5f023c3f192c82 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/openstack/sahara/sahara_templates.py | pshchelo/heat | 6cf94a3ece89d77b839f61292e5f023c3f192c82 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
# NOTE(pshchelo): copied from sahara/utils/api_validator.py
SAHARA_NAME_REGEX = (r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]"
r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]"
r"[A-Za-z0-9\-]*[A-Za-z0-9])$")
class SaharaNodeGroupTemplate(resource.Resource):
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
NAME, PLUGIN_NAME, HADOOP_VERSION, FLAVOR, DESCRIPTION,
VOLUMES_PER_NODE, VOLUMES_SIZE, VOLUME_TYPE,
SECURITY_GROUPS, AUTO_SECURITY_GROUP,
AVAILABILITY_ZONE, VOLUMES_AVAILABILITY_ZONE,
NODE_PROCESSES, FLOATING_IP_POOL, NODE_CONFIGS, IMAGE_ID,
) = (
'name', 'plugin_name', 'hadoop_version', 'flavor', 'description',
'volumes_per_node', 'volumes_size', 'volume_type',
'security_groups', 'auto_security_group',
'availability_zone', 'volumes_availability_zone',
'node_processes', 'floating_ip_pool', 'node_configs', 'image_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_("Name for the Sahara Node Group Template."),
constraints=[
constraints.Length(min=1, max=50),
constraints.AllowedPattern(SAHARA_NAME_REGEX),
],
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the Node Group Template.'),
default="",
),
PLUGIN_NAME: properties.Schema(
properties.Schema.STRING,
_('Plugin name.'),
required=True,
),
HADOOP_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version of Hadoop running on instances.'),
required=True,
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('Name or ID Nova flavor for the nodes.'),
required=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
VOLUMES_PER_NODE: properties.Schema(
properties.Schema.INTEGER,
_("Volumes per node."),
constraints=[
constraints.Range(min=0),
],
),
VOLUMES_SIZE: properties.Schema(
properties.Schema.INTEGER,
_("Size of the volumes, in GB."),
constraints=[
constraints.Range(min=1),
],
),
VOLUME_TYPE: properties.Schema(
properties.Schema.STRING,
_("Type of the volume to create on Cinder backend."),
constraints=[
constraints.CustomConstraint('cinder.vtype')
]
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_("List of security group names or IDs to assign to this "
"Node Group template."),
schema=properties.Schema(
properties.Schema.STRING,
),
),
AUTO_SECURITY_GROUP: properties.Schema(
properties.Schema.BOOLEAN,
_("Defines whether auto-assign security group to this "
"Node Group template."),
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_("Availability zone to create servers in."),
),
VOLUMES_AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_("Availability zone to create volumes in."),
),
NODE_PROCESSES: properties.Schema(
properties.Schema.LIST,
_("List of processes to run on every node."),
required=True,
constraints=[
constraints.Length(min=1),
],
schema=properties.Schema(
properties.Schema.STRING,
),
),
FLOATING_IP_POOL: properties.Schema(
properties.Schema.STRING,
_("Name or UUID of the Neutron floating IP network or "
"name of the Nova floating ip pool to use. "
"Should not be provided when used with Nova-network "
"that auto-assign floating IPs."),
),
NODE_CONFIGS: properties.Schema(
properties.Schema.MAP,
_("Dictionary of node configurations."),
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_("ID of the image to use for the template."),
constraints=[
constraints.CustomConstraint('sahara.image'),
],
),
}
default_client_name = 'sahara'
physical_resource_name_limit = 50
def _ngt_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def handle_create(self):
plugin_name = self.properties[self.PLUGIN_NAME]
hadoop_version = self.properties[self.HADOOP_VERSION]
node_processes = self.properties[self.NODE_PROCESSES]
description = self.properties[self.DESCRIPTION]
flavor_id = self.client_plugin("nova").get_flavor_id(
self.properties[self.FLAVOR])
volumes_per_node = self.properties[self.VOLUMES_PER_NODE]
volumes_size = self.properties[self.VOLUMES_SIZE]
volume_type = self.properties[self.VOLUME_TYPE]
floating_ip_pool = self.properties[self.FLOATING_IP_POOL]
security_groups = self.properties[self.SECURITY_GROUPS]
auto_security_group = self.properties[self.AUTO_SECURITY_GROUP]
availability_zone = self.properties[self.AVAILABILITY_ZONE]
vol_availability_zone = self.properties[self.VOLUMES_AVAILABILITY_ZONE]
image_id = self.properties[self.IMAGE_ID]
if floating_ip_pool and self.is_using_neutron():
floating_ip_pool = self.client_plugin(
'neutron').find_neutron_resource(
self.properties, self.FLOATING_IP_POOL, 'network')
node_configs = self.properties[self.NODE_CONFIGS]
node_group_template = self.client().node_group_templates.create(
self._ngt_name(),
plugin_name, hadoop_version, flavor_id,
description=description,
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
volume_type=volume_type,
node_processes=node_processes,
floating_ip_pool=floating_ip_pool,
node_configs=node_configs,
security_groups=security_groups,
auto_security_group=auto_security_group,
availability_zone=availability_zone,
volumes_availability_zone=vol_availability_zone,
image_id=image_id
)
LOG.info(_LI("Node Group Template '%s' has been created"),
node_group_template.name)
self.resource_id_set(node_group_template.id)
return self.resource_id
def handle_delete(self):
if not self.resource_id:
return
try:
self.client().node_group_templates.delete(
self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
LOG.info(_LI("Node Group Template '%s' has been deleted."),
self._ngt_name())
def validate(self):
res = super(SaharaNodeGroupTemplate, self).validate()
if res:
return res
pool = self.properties[self.FLOATING_IP_POOL]
if pool:
if self.is_using_neutron():
try:
self.client_plugin('neutron').find_neutron_resource(
self.properties, self.FLOATING_IP_POOL, 'network')
except Exception as ex:
if (self.client_plugin('neutron').is_not_found(ex)
or self.client_plugin('neutron').is_no_unique(ex)):
raise exception.StackValidationFailed(
message=ex.message)
raise
else:
try:
self.client('nova').floating_ip_pools.find(name=pool)
except Exception as ex:
if self.client_plugin('nova').is_not_found(ex):
raise exception.StackValidationFailed(
message=ex.message)
raise
class SaharaClusterTemplate(resource.Resource):
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
NAME, PLUGIN_NAME, HADOOP_VERSION, DESCRIPTION,
ANTI_AFFINITY, MANAGEMENT_NETWORK,
CLUSTER_CONFIGS, NODE_GROUPS, IMAGE_ID,
) = (
'name', 'plugin_name', 'hadoop_version', 'description',
'anti_affinity', 'neutron_management_network',
'cluster_configs', 'node_groups', 'default_image_id',
)
_NODE_GROUP_KEYS = (
NG_NAME, COUNT, NG_TEMPLATE_ID,
) = (
'name', 'count', 'node_group_template_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_("Name for the Sahara Cluster Template."),
constraints=[
constraints.Length(min=1, max=50),
constraints.AllowedPattern(SAHARA_NAME_REGEX),
],
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the Sahara Group Template.'),
default="",
),
PLUGIN_NAME: properties.Schema(
properties.Schema.STRING,
_('Plugin name.'),
required=True,
),
HADOOP_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version of Hadoop running on instances.'),
required=True,
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_("ID of the default image to use for the template."),
constraints=[
constraints.CustomConstraint('sahara.image'),
],
),
MANAGEMENT_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of network.'),
constraints=[
constraints.CustomConstraint('neutron.network')
],
),
ANTI_AFFINITY: properties.Schema(
properties.Schema.LIST,
_("List of processes to enable anti-affinity for."),
schema=properties.Schema(
properties.Schema.STRING,
),
),
CLUSTER_CONFIGS: properties.Schema(
properties.Schema.MAP,
_('Cluster configs dictionary.'),
),
NODE_GROUPS: properties.Schema(
properties.Schema.LIST,
_('Node groups.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NG_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the Node group.'),
required=True
),
COUNT: properties.Schema(
properties.Schema.INTEGER,
_("Number of instances in the Node group."),
required=True,
constraints=[
constraints.Range(min=1)
]
),
NG_TEMPLATE_ID: properties.Schema(
properties.Schema.STRING,
_("ID of the Node Group Template."),
required=True
),
}
),
),
}
default_client_name = 'sahara'
physical_resource_name_limit = 50
def _cluster_template_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def handle_create(self):
plugin_name = self.properties[self.PLUGIN_NAME]
hadoop_version = self.properties[self.HADOOP_VERSION]
description = self.properties[self.DESCRIPTION]
image_id = self.properties[self.IMAGE_ID]
net_id = self.properties[self.MANAGEMENT_NETWORK]
if net_id:
if self.is_using_neutron():
net_id = self.client_plugin('neutron').find_neutron_resource(
self.properties, self.MANAGEMENT_NETWORK, 'network')
else:
net_id = self.client_plugin('nova').get_nova_network_id(
net_id)
anti_affinity = self.properties[self.ANTI_AFFINITY]
cluster_configs = self.properties[self.CLUSTER_CONFIGS]
node_groups = self.properties[self.NODE_GROUPS]
cluster_template = self.client().cluster_templates.create(
self._cluster_template_name(),
plugin_name, hadoop_version,
description=description,
default_image_id=image_id,
anti_affinity=anti_affinity,
net_id=net_id,
cluster_configs=cluster_configs,
node_groups=node_groups
)
LOG.info(_LI("Cluster Template '%s' has been created"),
cluster_template.name)
self.resource_id_set(cluster_template.id)
return self.resource_id
def handle_delete(self):
if not self.resource_id:
return
try:
self.client().cluster_templates.delete(
self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
LOG.info(_LI("Cluster Template '%s' has been deleted."),
self._cluster_template_name())
def validate(self):
res = super(SaharaClusterTemplate, self).validate()
if res:
return res
# check if running on neutron and MANAGEMENT_NETWORK missing
if (self.is_using_neutron() and
not self.properties[self.MANAGEMENT_NETWORK]):
msg = _("%s must be provided"
) % self.MANAGEMENT_NETWORK
raise exception.StackValidationFailed(message=msg)
def resource_mapping():
return {
'OS::Sahara::NodeGroupTemplate': SaharaNodeGroupTemplate,
'OS::Sahara::ClusterTemplate': SaharaClusterTemplate,
}
| 36.918269 | 79 | 0.583084 |
acf787417709d7779aadd9b003bb56a2409c70fb | 10,504 | py | Python | bcs-ui/backend/templatesets/legacy_apps/instance/drivers/base.py | masanqi/bk-bcs | 70d97b674fbd5beacde21d6ca8be914d7eb56865 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/templatesets/legacy_apps/instance/drivers/base.py | masanqi/bk-bcs | 70d97b674fbd5beacde21d6ca8be914d7eb56865 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/templatesets/legacy_apps/instance/drivers/base.py | masanqi/bk-bcs | 70d97b674fbd5beacde21d6ca8be914d7eb56865 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
import math
from backend.components import paas_cc
from backend.container_service.clusters.models import NodeStatus
from backend.templatesets.legacy_apps.instance.constants import EventType, InsState
from backend.templatesets.legacy_apps.instance.models import (
InstanceConfig,
InstanceEvent,
MetricConfig,
VersionInstance,
)
from backend.utils.exceptions import APIError, ConfigError, Rollback
logger = logging.getLogger(__name__)
class ClusterNotReady(Exception):
pass
class BCSRollback(Rollback):
pass
class SchedulerBase(object):
INIT_ORDERING = {
"secret": 1,
"configmap": 2,
"service": 3,
"metric": 4,
"ingress": 5,
"deployment": 10,
"application": 11,
# k8s 相关资源
"K8sIngress": 100,
"K8sSecret": 101,
"K8sConfigMap": 102,
"K8sService": 103,
"K8sDaemonSet": 104,
"K8sJob": 105,
"K8sStatefulSet": 106,
"K8sDeployment": 107,
}
def __init__(self, access_token, project_id, configuration, kind, is_rollback):
self.access_token = access_token
self.project_id = project_id
self.configuration = configuration
self.plugin_client = SchedulerPluginCC(access_token, project_id)
self.rollback_stack = {}
self.kind = kind
# 所有的操作都不回滚 2018-08-22
self.is_rollback = False
def instantiation_ns(self, ns_id, config, is_update):
"""单个命名空间实例化"""
# 创建必须按顺序执行
config = sorted(config.items(), key=lambda x: self.INIT_ORDERING.get(x[0], math.inf))
for res, specs in config:
if is_update:
handler = getattr(self, "handler_update_%s" % res.lower(), None)
else:
handler = getattr(self, "handler_%s" % res.lower(), None)
# plugin_handler = getattr(self.plugin_client, 'handler_%s' % res, None)
if not handler:
raise NotImplementedError("%s not have handler" % res)
# if not plugin_handler:
# raise NotImplementedError('plugin %s not have handler' % res)
for spec in specs:
# 只获取需要使用的字段
cluster_id = spec["context"]["SYS_CLUSTER_ID"]
ns = spec["context"]["SYS_NAMESPACE"]
self.rollback_stack.setdefault(ns_id, [])
# application deployment 最后一步,失败没有创建成功,不需要回滚
if res not in ["application", "deployment"]:
self.rollback_stack[ns_id].append([res, ns, cluster_id, spec["config"]])
# 获取状态信息
if res == "metric":
queryset = MetricConfig.objects.filter(pk=spec["instance_config_id"])
else:
queryset = InstanceConfig.objects.filter(pk=spec["instance_config_id"])
# 需要更新的参数
is_update_save_kwargs = spec.get("is_update_save_kwargs", False)
if is_update_save_kwargs:
save_kwargs = spec.get("save_kwargs", {})
else:
save_kwargs = {}
# ref = queryset.first()
# # 已经成功的,且不是更新操作, 不需要再下发
# if ref.ins_state == InsState.INS_SUCCESS.value and not is_update:
# continue
try:
handler(ns, cluster_id, spec["config"])
if is_update:
ins_state = InsState.UPDATE_SUCCESS.value
# queryset.update(ins_state=InsState.UPDATE_SUCCESS.value, is_bcs_success=True)
else:
ins_state = InsState.INS_SUCCESS.value
# queryset.update(ins_state=InsState.INS_SUCCESS.value, is_bcs_success=True)
save_kwargs["ins_state"] = ins_state
save_kwargs["is_bcs_success"] = True
queryset.update(**save_kwargs)
except Rollback as error:
# 捕获错误消息
result = error.args[0]
InstanceEvent.log(
spec["instance_config_id"], res, EventType.REQ_FAILED.value, result, spec["context"]
)
# 修改资源对应状态
if is_update:
ins_state = InsState.UPDATE_FAILED.value
# queryset.update(ins_state=InsState.UPDATE_FAILED.value, is_bcs_success=False)
else:
ins_state = InsState.INS_FAILED.value
# queryset.update(ins_state=InsState.INS_FAILED.value, is_bcs_success=False)
save_kwargs["ins_state"] = ins_state
save_kwargs["is_bcs_success"] = False
queryset.update(**save_kwargs)
# 需要抛出异常到上层进行回滚, 错误消息需要提示到前台
result["res_type"] = res
raise BCSRollback(result)
def cluster_ready(self, cluster_id):
"""集群状态检查,至少有一个节点状态为Normal"""
result = paas_cc.get_node_list(self.access_token, self.project_id, cluster_id)
if result.get("code") != 0:
raise ClusterNotReady("获取状态失败,请联系蓝鲸管理员解决")
data = result["data"]["results"] or []
normal_nodes = [i for i in data if i["status"] == NodeStatus.Normal]
if len(normal_nodes) == 0:
raise ClusterNotReady("没有可用节点,请添加或启用节点")
def instantiation(self, is_update=False):
"""实例化"""
instantiation_result = {"success": [], "failed": []}
for ns_id, config in self.configuration.items():
cluster_id = [i for i in config.values()][0][0]["context"]["SYS_CLUSTER_ID"]
ns_name = [i for i in config.values()][0][0]["context"]["SYS_NAMESPACE"]
try:
# 前置检查
self.cluster_ready(cluster_id)
except ClusterNotReady as error:
logger.warning("bcs_instantiation failed, cluster not ready %s", error)
raise APIError("初始化失败,%s绑定的集群(%s) %s" % (ns_name, cluster_id, error))
for ns_id, config in self.configuration.items():
instance_id = [i for i in config.values()][0][0]["context"]["SYS_INSTANCE_ID"]
ns_name = [i for i in config.values()][0][0]["context"]["SYS_NAMESPACE"]
ns = {"ns_id": ns_id, "ns_name": ns_name, "instance_id": instance_id, "res_type": "", "err_msg": ""}
bcs_success = True
try:
self.instantiation_ns(ns_id, config, is_update)
except Rollback as error:
if self.is_rollback and (not is_update):
self.handler_rollback(ns_id)
ns["res_type"] = error.args[0]["res_type"]
ns["err_msg"] = error.args[0].get("message", "")
bcs_success = False
logger.warning("bcs_api: error, %s, add failed to result %s", ns, instantiation_result)
except ConfigError as error:
if self.is_rollback and (not is_update):
self.handler_rollback(ns_id)
bcs_success = False
ns["err_msg"] = str(error)
ns["show_err_msg"] = True
logger.exception("bcs_api: %s, instantiation error, %s", ns, error)
logger.warning("bcs_api: exception, %s, add failed to result %s", ns, instantiation_result)
except Exception as error:
if self.is_rollback and (not is_update):
self.handler_rollback(ns_id)
bcs_success = False
ns["err_msg"] = str(error)
logger.exception("bcs_api: %s, instantiation error, %s", ns, error)
logger.warning("bcs_api: exception, %s, add failed to result %s", ns, instantiation_result)
# 统一修改状态
try:
VersionInstance.objects.filter(pk=instance_id).update(is_bcs_success=bcs_success)
# InstanceConfig.objects.filter(instance_id=instance_id).update(
# is_bcs_success=bcs_success)
# MetricConfig.objects.filter(instance_id=instance_id).update(
# is_bcs_success=bcs_success)
except Exception:
logging.exception("save is_bcs_success error")
if bcs_success is False:
instantiation_result["failed"].append(ns)
else:
instantiation_result["success"].append(ns)
logger.info("bcs_api: instantiation_result, %s", instantiation_result)
return instantiation_result
def handler_rollback(self, ns_id):
""""""
roll_back_list = self.rollback_stack[ns_id]
roll_back_list = roll_back_list[:-1]
for s in roll_back_list:
handler = getattr(self, "rollback_%s" % s[0].lower(), None)
if handler:
logger.warning("try to rollback, %s, %s", s, s[1:])
handler(*s[1:])
else:
logging.warning("have not rollback handler, %s, %s, will ignore", s, s[1:])
class SchedulerPlugin(object):
def __init__(self, access_token, project_id):
self.access_token = access_token
self.project_id = project_id
class SchedulerPluginCC(SchedulerPlugin):
def handler_application(self, ns, cluster_id, spec):
pass
def handler_deployment(self, ns, cluster_id, spec):
pass
def handler_service(self, ns, cluster_id, spec):
pass
def handler_lb(self, ns, cluster_id, spec):
"""负载均衡"""
def handler_configmap(self, ns, cluster_id, spec):
pass
def handler_secret(self, ns, cluster_id, spec):
pass
| 41.354331 | 115 | 0.582635 |
acf788d313d962ec03ae88dcaed6b87ec5b773a3 | 2,505 | py | Python | src/http-server.py | MIAOMIAOUP/GHS | 5d8ac87b27707b1e446356a319c8eb13feb904cf | [
"Apache-2.0"
] | null | null | null | src/http-server.py | MIAOMIAOUP/GHS | 5d8ac87b27707b1e446356a319c8eb13feb904cf | [
"Apache-2.0"
] | null | null | null | src/http-server.py | MIAOMIAOUP/GHS | 5d8ac87b27707b1e446356a319c8eb13feb904cf | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import re
import socket
from multiprocessing import Process
# 设置静态文件根目录
HTML_ROOT_DIR = "./view"
#
# HTML_ROOT_DIR = "D:\迅雷下载完成"
PORT = 8000
class HTTPServer(object):
""""""
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def start(self):
self.server_socket.listen(128)
while True:
client_socket, client_address = self.server_socket.accept()
# print("[%s, %s]用户连接上了" % (client_address[0],client_address[1]))
print("[%s, %s]用户连接上了" % client_address)
handle_client_process = Process(target=self.handle_client, args=(client_socket,))
handle_client_process.start()
client_socket.close()
def handle_client(self, client_socket):
"""处理客户端请求"""
# 获取客户端请求数据
request_data = client_socket.recv(1024)
print("request data:", request_data)
request_lines = request_data.splitlines()
for line in request_lines:
print(line)
# 解析请求报文
# 'GET / HTTP/1.1'
request_start_line = request_lines[0]
# 提取用户请求的文件名
print("*" * 10)
print(request_start_line.decode("utf-8"))
file_name = re.match(r"\w+ +(/[^ ]*) ", request_start_line.decode("utf-8")).group(1)
if "/" == file_name:
file_name = "/index.html"
# 打开文件,读取内容
try:
file = open(HTML_ROOT_DIR + file_name, "rb")
except IOError:
response_start_line = "HTTP/1.1 404 Not Found\r\n"
response_headers = "Server: My server\r\n"
response_body = "The file is not found!"
else:
file_data = file.read()
file.close()
# 构造响应数据
response_start_line = "HTTP/1.1 200 OK\r\n"
response_headers = "Server: My server\r\n"
response_body = file_data.decode("utf-8")
response = response_start_line + response_headers + "\r\n" + response_body
print("response data:", response)
# 向客户端返回响应数据
client_socket.send(bytes(response, "utf-8"))
# 关闭客户端连接
client_socket.close()
def bind(self, port):
self.server_socket.bind(("", port))
def main():
http_server = HTTPServer()
# http_server.set_port
http_server.bind(PORT)
http_server.start()
if __name__ == "__main__":
main()
| 27.833333 | 93 | 0.59481 |
acf788e1c62e10de2f74fd6de69491476eb5aed7 | 2,314 | py | Python | examples/multiprocess.py | yeeliu01/pyrfa | 536c94f1bcff232415495cbe04b8897ad91e0c76 | [
"MIT"
] | 33 | 2016-11-29T08:18:28.000Z | 2021-11-11T15:40:19.000Z | examples/multiprocess.py | yeeliu01/pyrfa | 536c94f1bcff232415495cbe04b8897ad91e0c76 | [
"MIT"
] | 41 | 2016-09-20T10:15:11.000Z | 2021-10-20T01:14:22.000Z | examples/multiprocess.py | devcartel/thomsonreuters | 536c94f1bcff232415495cbe04b8897ad91e0c76 | [
"MIT"
] | 9 | 2016-10-19T00:09:22.000Z | 2020-08-03T03:02:15.000Z | #!/usr/bin/python
#
# Interactive (server) publisher for market price domain
#
import time
import multiprocessing
import random
import pyrfa
def publish():
try:
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.acquireSession("Session4")
p.createOMMProvider()
p.login()
p.dictionaryRequest()
IMAGES = {'RIC':'EUR=', 'RDNDISPLAY':200, 'RDN_EXCHID':155, 'BID':0.988, 'ASK':0.999, 'DIVPAYDATE':'20110623'},
IMAGES = {'RIC':'JPY=', 'RDNDISPLAY':200, 'RDN_EXCHID':'NAS', 'OFFCL_CODE':'isin1234XYZ', 'BID':4.23, 'DIVPAYDATE':'20110623', 'OPEN_TIME':'09:00:01.000'},
p.marketPriceSubmit(IMAGES)
vol = 1000
while True:
time.sleep(1)
vol += 1
price = round(round(4 + random.random(),3),3)
UPDATES = {'RIC':'EUR=', 'ACVOL_1':vol, 'TRDPRC_1':price},
UPDATES += {'RIC':'JPY=', 'BID_NET_CH':0.0041, 'BID':0.988, 'ASK':0.999,'ASK_TIME':'now'},
p.marketPriceSubmit(UPDATES)
except:
pass
def subscribe():
try:
try:
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.acquireSession("Session3")
p.createOMMConsumer()
p.login()
p.directoryRequest()
p.dictionaryRequest()
p.marketPriceRequest("EUR=,JPY=")
while True:
time.sleep(0.01)
for u in p.dispatchEventQueue():
print("\n" + u['SERVICE'] + " - " + u['RIC'])
for k,v in u.items():
print("%15s %g" % (k,v) if type(v) is float else "%15s %s" % (k,v))
finally:
p.marketPriceCloseAllRequest()
except:
pass
if __name__ == "__main__":
try:
try:
t_publish = multiprocessing.Process(target=publish)
t_publish.start()
#wait 10s until the service is up
time.sleep(10)
t_subscribe = multiprocessing.Process(target=subscribe)
t_subscribe.start()
while True:
time.sleep(0.01)
finally:
time.sleep(1)
for process in multiprocessing.active_children():
process.terminate()
except:
pass
| 31.27027 | 163 | 0.521608 |
acf78a270f8fcd018a66d6b0b09dd76fa8f6ab99 | 1,281 | py | Python | _zadania/syntax_comments_b.py | DJG-coderslab/2021-12-elearning-pythonana | e072d6cbd231c4b858e92a59af768a069b74ba67 | [
"MIT"
] | null | null | null | _zadania/syntax_comments_b.py | DJG-coderslab/2021-12-elearning-pythonana | e072d6cbd231c4b858e92a59af768a069b74ba67 | [
"MIT"
] | null | null | null | _zadania/syntax_comments_b.py | DJG-coderslab/2021-12-elearning-pythonana | e072d6cbd231c4b858e92a59af768a069b74ba67 | [
"MIT"
] | null | null | null | """
* Assignment: Syntax Comments Multiline
* Complexity: easy
* Lines of code: 3 lines
* Time: 2 min
English:
1. Add multiline comment
a. first line: This is a first line of a multiline comment
b. second line: This is a second line of a multiline comment
b. third line: This is a third line of a multiline comment
2. Run doctests - all must succeed
Polish:
1. Dodaj komentarz wieloliniowy
a. pierwsza linia: This is a first line of a multiline comment
b. druga linia: This is a second line of a multiline comment
c. trzecia linia: This is a third line of a multiline comment
2. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> result = open(__file__).read()
>>> assert '# This is a first line of a multiline comment' in result, \
'Please write proper multiline comment'
>>> assert '# This is a second line of a multiline comment' in result, \
'Please write proper multiline comment'
>>> assert '# This is a third line of a multiline comment' in result, \
'Please write proper multiline comment'
"""
# This is a first line of a multiline comment
# This is a second line of a multiline comment
# This is a third line of a multiline comment
| 36.6 | 76 | 0.691647 |
acf78aa1cfcbcef6669a2bcc5907b0882258f4cd | 861 | py | Python | ewatercycle_experiment_launcher/api/opendap.py | eWaterCycle/experiment-launcher | dd003084b6ac79ca243018f391e636a5facb528a | [
"Apache-2.0"
] | 2 | 2019-11-19T09:43:13.000Z | 2020-05-05T17:00:07.000Z | ewatercycle_experiment_launcher/api/opendap.py | eWaterCycle/experiment-launcher | dd003084b6ac79ca243018f391e636a5facb528a | [
"Apache-2.0"
] | 26 | 2018-04-03T11:10:16.000Z | 2021-09-21T13:03:07.000Z | ewatercycle_experiment_launcher/api/opendap.py | eWaterCycle/experiment-launcher | dd003084b6ac79ca243018f391e636a5facb528a | [
"Apache-2.0"
] | null | null | null | import textwrap
from nbformat import NotebookNode
from nbformat.v4 import new_markdown_cell, new_code_cell, new_notebook
from ewatercycle_experiment_launcher.generate import PY3_META
from ewatercycle_experiment_launcher.process import process_notebook
def post(body):
"""Generate notebook and launch it"""
return process_notebook(body['notebook'], notebook(body['opendap']))
def notebook(opendap) -> NotebookNode:
"""Generates a Jupyter notebook"""
welcome = textwrap.dedent("""
# Welcome
This notebook was generated by the eWaterCycle experiment launcher.
""")
cells = [
new_markdown_cell(welcome),
new_code_cell("import xarray as xr"),
new_code_cell("ds = xr.open_dataset('{0}')".format(opendap)),
new_code_cell("print(ds)"),
]
return new_notebook(cells=cells, metadata=PY3_META)
| 29.689655 | 72 | 0.724739 |
acf78b4e72369dea14edd9597100f670944c7558 | 1,775 | py | Python | third/dlib/find_candidate_object_locations.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 13 | 2020-01-04T07:37:38.000Z | 2021-08-31T05:19:58.000Z | third/dlib/find_candidate_object_locations.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 3 | 2020-06-05T22:42:53.000Z | 2020-08-24T07:18:54.000Z | third/dlib/find_candidate_object_locations.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 9 | 2020-10-19T04:53:06.000Z | 2021-08-31T05:20:01.000Z | #!/usr/bin/python
#
# This example shows how to use find_candidate_object_locations(). The
# function takes an input image and generates a set of candidate rectangles
# which are expected to bound any objects in the image.
# It is based on the paper:
# Segmentation as Selective Search for Object Recognition by Koen E. A. van de Sande, et al.
#
# Typically, you would use this as part of an object detection pipeline.
# find_candidate_object_locations() nominates boxes that might contain an
# object and you then run some expensive classifier on each one and throw away
# the false alarms. Since find_candidate_object_locations() will only generate
# a few thousand rectangles it is much faster than scanning all possible
# rectangles inside an image.
#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
#
# Compiling dlib should work on any operating system so long as you have
# CMake installed. On Ubuntu, this can be done easily by running the
# command:
# sudo apt-get install cmake
#
# Also note that this example requires Numpy which can be installed
# via the command:
# pip install numpy
import dlib
image_file = './faces/2009_004587.jpg'
img = dlib.load_rgb_image(image_file)
# Locations of candidate objects will be saved into rects
rects = []
dlib.find_candidate_object_locations(img, rects, min_size=500)
print("number of rectangles found {}".format(len(rects)))
for k, d in enumerate(rects):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
| 37.765957 | 95 | 0.733521 |
acf78cc3bc95edc55432f7364f1e460a48725e2d | 943 | py | Python | django_cockroachdb/utils.py | otan-cockroach/django-cockroachdb | 3a69bcc3f283be2eb0366e749e273bc047415453 | [
"Apache-2.0"
] | null | null | null | django_cockroachdb/utils.py | otan-cockroach/django-cockroachdb | 3a69bcc3f283be2eb0366e749e273bc047415453 | [
"Apache-2.0"
] | null | null | null | django_cockroachdb/utils.py | otan-cockroach/django-cockroachdb | 3a69bcc3f283be2eb0366e749e273bc047415453 | [
"Apache-2.0"
] | null | null | null | import django
from django.core.exceptions import ImproperlyConfigured
from django.utils.timezone import get_fixed_timezone, utc
from django.utils.version import get_version_tuple
def utc_tzinfo_factory(offset):
if offset != 0:
return get_fixed_timezone(offset)
return utc
def check_django_compatability():
"""
Verify that this version of django-cockroachdb is compatible with the
installed version of Django. For example, any django-cockroachdb 2.2.x is
compatible with Django 2.2.y.
"""
from . import __version__
if django.VERSION[:2] != get_version_tuple(__version__)[:2]:
raise ImproperlyConfigured(
'You must use the latest version of django-cockroachdb {A}.{B}.x '
'with Django {A}.{B}.y (found django-cockroachdb {C}).'.format(
A=django.VERSION[0],
B=django.VERSION[1],
C=__version__,
)
)
| 32.517241 | 78 | 0.662778 |
acf78d3baff76f160b6e4edcb215269a959cd733 | 602 | py | Python | setup.py | nashif/junit2html | 9a0613d8972e134105791edccad622ebea87e85a | [
"MIT"
] | null | null | null | setup.py | nashif/junit2html | 9a0613d8972e134105791edccad622ebea87e85a | [
"MIT"
] | null | null | null | setup.py | nashif/junit2html | 9a0613d8972e134105791edccad622ebea87e85a | [
"MIT"
] | null | null | null | from distutils.core import setup
files = ["*.css"]
setup(
name="junit2html",
version="0.1.0",
description="Generate HTML reports from Junit results",
author="Ian Norton",
author_email="inorton@gmail.com",
url="https://gitlab.com/inorton/junit2html",
packages=["junit2htmlreport"],
package_data={"junit2htmlreport": files},
entry_points={'console_scripts': ['junit2html=junit2htmlreport.runner:start']},
platforms=["any"],
license="License :: OSI Approved :: MIT License",
long_description="Genearate a single file HTML report from a Junit XML file"
)
| 30.1 | 83 | 0.694352 |
acf78d43ea7fab5851dc63cdc664b5202f9e31dd | 159 | py | Python | todo/admin.py | lisagorewitdecker/immaculater | fe46d282ae1d6325d67ebcf8f2b3d3b95580d5e7 | [
"Apache-2.0"
] | null | null | null | todo/admin.py | lisagorewitdecker/immaculater | fe46d282ae1d6325d67ebcf8f2b3d3b95580d5e7 | [
"Apache-2.0"
] | null | null | null | todo/admin.py | lisagorewitdecker/immaculater | fe46d282ae1d6325d67ebcf8f2b3d3b95580d5e7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import ToDoList
admin.site.register(ToDoList)
| 17.666667 | 39 | 0.773585 |
acf78dc9bcbc74e029c2651123ee5124f9f70928 | 3,607 | py | Python | sem5/task8.py | shevdan/Algorithms-DataStructures | b6b7868c7dd0d6a4a33f4b09c60eb2a78af3270d | [
"MIT"
] | null | null | null | sem5/task8.py | shevdan/Algorithms-DataStructures | b6b7868c7dd0d6a4a33f4b09c60eb2a78af3270d | [
"MIT"
] | null | null | null | sem5/task8.py | shevdan/Algorithms-DataStructures | b6b7868c7dd0d6a4a33f4b09c60eb2a78af3270d | [
"MIT"
] | null | null | null |
class TreeNode:
def __init__(self, val, left=None, right=None) -> None:
self.val = val
self.left = left
self.right = right
class BST:
def __init__(self) -> None:
self.root = None
def max_sum(self):
sum = [0]
def recurse(node):
if node is None:
return 0
max_left = recurse(node.left)
max_right = recurse(node.right)
sub_tree = node.val + max_left + max_right
sum[0] = max(sum[0], sub_tree)
return sub_tree
recurse(self.root)
return sum[0]
def is_empty(self):
return self.root is None
def inorder(self):
nodes_lst = []
def recurse(node):
if node is not None:
recurse(node.left)
nodes_lst.append(node.val)
recurse(node.right)
recurse(self.root)
return iter(nodes_lst)
def search(self, val):
def recurse(node):
if node is None or node.val == val:
return node
if val > node.val:
return recurse(node.right)
if val < node.val:
return recurse(node.left)
return recurse(self.root)
def iterative_search(self, val):
if self.root is None or self.root.val == val:
return self.root
node = self.root
while node is not None and node.val != val:
if val > node.val:
node = node.right
if val < node.val:
node = node.left
if val == node.val:
return node
return node
def node_min(self, node):
while node.left is not None:
node = node.left
return node
def node_max(self, node):
while node.right is not None:
node = node.right
return node
def bst_min(self):
if self.root is None:
return self.root
return self.node_min(self.root)
def bst_min(self):
if self.root is None:
return self.root
return self.node_max(self.root)
# def successor_node(self, item):
# if item.right:
# return self.node_min(item.right)
# return y
def insert(self, val):
if val is None:
return
if self.root is None:
self.root = TreeNode(val)
return
def recurse(node, val):
if node.val == val:
return
if val > node.val:
if node.right:
return recurse(node.right, val)
node.right = TreeNode(val)
return
if val < node.val:
if node.left:
return recurse(node.left, val)
node.left = TreeNode(val)
return
recurse(self.root, val)
def from_lst(self, lst):
for itm in lst:
self.insert(itm)
def solve(root):
sum = [0]
def recurse(node):
if node is None:
return
recurse(node.right)
sum[0] += node.val
node.val = sum[0]
recurse(node.left)
recurse(root)
return root
if __name__ == "__main__":
bst = BST()
bst.insert(1)
bst.insert(2)
bst.insert(5)
bst.insert(3)
bst.insert(4)
solve(bst.root)
for elm in bst.inorder():
print(elm) | 22.685535 | 59 | 0.47796 |
acf78e910c7fd78322b1cafd8c47300cd4807161 | 397 | py | Python | newsripper/newsripper/wsgi.py | GriceTurrble/newsripper | c169d503433b88dda3e018c06b9e32f1407477d5 | [
"MIT"
] | null | null | null | newsripper/newsripper/wsgi.py | GriceTurrble/newsripper | c169d503433b88dda3e018c06b9e32f1407477d5 | [
"MIT"
] | 1 | 2020-05-12T17:20:09.000Z | 2020-05-12T17:20:09.000Z | newsripper/newsripper/wsgi.py | GriceTurrble/newsripper | c169d503433b88dda3e018c06b9e32f1407477d5 | [
"MIT"
] | null | null | null | """
WSGI config for newsripper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "newsripper.settings")
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
acf78ed4427fa9440cac30d3401fc078400b74d1 | 3,655 | py | Python | examples/protocols/mqtt/tcp/mqtt_tcp_example_test.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | examples/protocols/mqtt/tcp/mqtt_tcp_example_test.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | examples/protocols/mqtt/tcp/mqtt_tcp_example_test.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | import os
import re
import socket
import struct
import sys
import time
from threading import Thread
import ttfw_idf
from tiny_test_fw import DUT
msgid = -1
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def mqqt_server_sketch(my_ip, port):
global msgid
print('Starting the server on {}'.format(my_ip))
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(60)
s.bind((my_ip, port))
s.listen(1)
q,addr = s.accept()
q.settimeout(30)
print('connection accepted')
except Exception:
print('Local server on {}:{} listening/accepting failure: {}'
'Possibly check permissions or firewall settings'
'to accept connections on this address'.format(my_ip, port, sys.exc_info()[0]))
raise
data = q.recv(1024)
# check if received initial empty message
print('received from client {}'.format(data))
data = bytearray([0x20, 0x02, 0x00, 0x00])
q.send(data)
# try to receive qos1
data = q.recv(1024)
msgid = struct.unpack('>H', data[15:17])[0]
print('received from client {}, msgid: {}'.format(data, msgid))
data = bytearray([0x40, 0x02, data[15], data[16]])
q.send(data)
time.sleep(5)
s.close()
print('server closed')
@ttfw_idf.idf_example_test(env_tag='Example_EthKitV1')
def test_examples_protocol_mqtt_qos1(env, extra_data):
global msgid
"""
steps: (QoS1: Happy flow)
1. start the broker broker (with correctly sending ACK)
2. DUT client connects to a broker and publishes qos1 message
3. Test evaluates that qos1 message is queued and removed from queued after ACK received
4. Test the broker received the same message id evaluated in step 3
"""
dut1 = env.get_dut('mqtt_tcp', 'examples/protocols/mqtt/tcp', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'mqtt_tcp.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('mqtt_tcp_bin_size', '{}KB'.format(bin_size // 1024))
# 1. start mqtt broker sketch
host_ip = get_my_ip()
thread1 = Thread(target=mqqt_server_sketch, args=(host_ip,1883))
thread1.start()
# 2. start the dut test and wait till client gets IP address
dut1.start_app()
# waiting for getting the IP address
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
print('writing to device: {}'.format('mqtt://' + host_ip + '\n'))
dut1.write('mqtt://' + host_ip + '\n')
thread1.join()
print('Message id received from server: {}'.format(msgid))
# 3. check the message id was enqueued and then deleted
msgid_enqueued = dut1.expect(re.compile(r'outbox: ENQUEUE msgid=([0-9]+)'), timeout=30)
msgid_deleted = dut1.expect(re.compile(r'outbox: DELETED msgid=([0-9]+)'), timeout=30)
# 4. check the msgid of received data are the same as that of enqueued and deleted from outbox
if (msgid_enqueued[0] == str(msgid) and msgid_deleted[0] == str(msgid)):
print('PASS: Received correct msg id')
else:
print('Failure!')
raise ValueError('Mismatch of msgid: received: {}, enqueued {}, deleted {}'.format(msgid, msgid_enqueued, msgid_deleted))
if __name__ == '__main__':
test_examples_protocol_mqtt_qos1()
| 36.188119 | 129 | 0.661833 |
acf78f353df330b293d9c137c8733ed3a0d670aa | 1,582 | py | Python | crio/setup.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | null | null | null | crio/setup.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | null | null | null | crio/setup.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | null | null | null | from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'crio', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CHECKS_BASE_REQ = 'datadog-checks-base'
setup(
name='datadog-crio',
version=ABOUT['__version__'],
description='The Crio check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent crio check container runtime kubernetes',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.crio'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| 30.423077 | 74 | 0.678255 |
acf79089d25f99560f52bc73dea1b984b19e9dc1 | 17,780 | py | Python | networkx/algorithms/link_analysis/pagerank_alg.py | amcandio/networkx | 143623135bc93fa69741387e4d3ed87ecbfbb419 | [
"BSD-3-Clause"
] | 1 | 2022-01-22T08:18:01.000Z | 2022-01-22T08:18:01.000Z | networkx/algorithms/link_analysis/pagerank_alg.py | amcandio/networkx | 143623135bc93fa69741387e4d3ed87ecbfbb419 | [
"BSD-3-Clause"
] | null | null | null | networkx/algorithms/link_analysis/pagerank_alg.py | amcandio/networkx | 143623135bc93fa69741387e4d3ed87ecbfbb419 | [
"BSD-3-Clause"
] | null | null | null | """PageRank analysis of graph structure. """
from warnings import warn
import networkx as nx
__all__ = ["pagerank", "pagerank_numpy", "pagerank_scipy", "google_matrix"]
def pagerank(
G,
alpha=0.85,
personalization=None,
max_iter=100,
tol=1.0e-6,
nstart=None,
weight="weight",
dangling=None,
):
"""Returns the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key some subset of graph nodes and personalization value each of those.
At least one personalization value must be non-zero.
If not specfiied, a nodes personalization value will be zero.
By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified). This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank(G, alpha=0.9)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop after
an error tolerance of ``len(G) * tol`` has been reached. If the
number of iterations exceed `max_iter`, a
:exc:`networkx.exception.PowerIterationFailedConvergence` exception
is raised.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs by converting each edge in the
directed graph to two edges.
See Also
--------
pagerank_numpy, pagerank_scipy, google_matrix
Raises
------
PowerIterationFailedConvergence
If the algorithm fails to converge to the specified tolerance
within the specified number of iterations of the power iteration
method.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
return pagerank_scipy(
G, alpha, personalization, max_iter, tol, nstart, weight, dangling
)
def _pagerank_python(
G,
alpha=0.85,
personalization=None,
max_iter=100,
tol=1.0e-6,
nstart=None,
weight="weight",
dangling=None,
):
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
# Create a copy in (right) stochastic form
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
# Choose fixed starting vector if not given
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
# Normalized nstart vector
s = float(sum(nstart.values()))
x = {k: v / s for k, v in nstart.items()}
if personalization is None:
# Assign uniform personalization vector if not given
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = {k: v / s for k, v in personalization.items()}
if dangling is None:
# Use personalization vector if dangling vector not specified
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = {k: v / s for k, v in dangling.items()}
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
# this matrix multiply looks odd because it is
# doing a left multiply x^T=xlast^T*W
for _, nbr, wt in W.edges(n, data=weight):
x[nbr] += alpha * xlast[n] * wt
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
# check convergence, l1 norm
err = sum(abs(x[n] - xlast[n]) for n in x)
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter)
def google_matrix(
G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None
):
"""Returns the Google matrix of the graph.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float
The damping factor.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key some subset of graph nodes and personalization value each of those.
At least one personalization value must be non-zero.
If not specfiied, a nodes personalization value will be zero.
By default, a uniform distribution is used.
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified) This must be selected to result in an irreducible transition
matrix (see notes below). It may be common to have the dangling dict to
be the same as the personalization dict.
Returns
-------
A : NumPy matrix
Google matrix of the graph
Notes
-----
The matrix returned represents the transition matrix that describes the
Markov chain used in PageRank. For PageRank to converge to a unique
solution (i.e., a unique stationary distribution in a Markov chain), the
transition matrix must be irreducible. In other words, it must be that
there exists a path between every pair of nodes in the graph, or else there
is the potential of "rank sinks."
This implementation works with Multi(Di)Graphs. For multigraphs the
weight between two nodes is set to be the sum of all edge weights
between those nodes.
See Also
--------
pagerank, pagerank_numpy, pagerank_scipy
"""
import numpy as np
# TODO: Remove this warning in version 3.0
import warnings
warnings.warn(
"google_matrix will return an np.ndarray instead of a np.matrix in\n"
"NetworkX version 3.0.",
FutureWarning,
stacklevel=2,
)
if nodelist is None:
nodelist = list(G)
A = nx.to_numpy_array(G, nodelist=nodelist, weight=weight)
N = len(G)
if N == 0:
# TODO: Remove np.asmatrix wrapper in version 3.0
return np.asmatrix(A)
# Personalization vector
if personalization is None:
p = np.repeat(1.0 / N, N)
else:
p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float)
if p.sum() == 0:
raise ZeroDivisionError
p /= p.sum()
# Dangling nodes
if dangling is None:
dangling_weights = p
else:
# Convert the dangling dictionary into an array in nodelist order
dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float)
dangling_weights /= dangling_weights.sum()
dangling_nodes = np.where(A.sum(axis=1) == 0)[0]
# Assign dangling_weights to any dangling nodes (nodes with no out links)
A[dangling_nodes] = dangling_weights
A /= A.sum(axis=1)[:, np.newaxis] # Normalize rows to sum to 1
# TODO: Remove np.asmatrix wrapper in version 3.0
return np.asmatrix(alpha * A + (1 - alpha) * p)
def pagerank_numpy(G, alpha=0.85, personalization=None, weight="weight", dangling=None):
"""Returns the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key some subset of graph nodes and personalization value each of those.
At least one personalization value must be non-zero.
If not specfiied, a nodes personalization value will be zero.
By default, a uniform distribution is used.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified) This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value.
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank_numpy(G, alpha=0.9)
Notes
-----
The eigenvector calculation uses NumPy's interface to the LAPACK
eigenvalue solvers. This will be the fastest and most accurate
for small graphs.
This implementation works with Multi(Di)Graphs. For multigraphs the
weight between two nodes is set to be the sum of all edge weights
between those nodes.
See Also
--------
pagerank, pagerank_scipy, google_matrix
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
msg = "networkx.pagerank_numpy is deprecated and will be removed in NetworkX 3.0, use networkx.pagerank instead."
warn(msg, DeprecationWarning, stacklevel=2)
import numpy as np
if len(G) == 0:
return {}
M = google_matrix(
G, alpha, personalization=personalization, weight=weight, dangling=dangling
)
# use numpy LAPACK solver
eigenvalues, eigenvectors = np.linalg.eig(M.T)
ind = np.argmax(eigenvalues)
# eigenvector of largest eigenvalue is at ind, normalized
largest = np.array(eigenvectors[:, ind]).flatten().real
norm = float(largest.sum())
return dict(zip(G, map(float, largest / norm)))
def pagerank_scipy(
G,
alpha=0.85,
personalization=None,
max_iter=100,
tol=1.0e-6,
nstart=None,
weight="weight",
dangling=None,
):
"""Returns the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key some subset of graph nodes and personalization value each of those.
At least one personalization value must be non-zero.
If not specfiied, a nodes personalization value will be zero.
By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified) This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank_scipy(G, alpha=0.9)
Notes
-----
The eigenvector calculation uses power iteration with a SciPy
sparse matrix representation.
This implementation works with Multi(Di)Graphs. For multigraphs the
weight between two nodes is set to be the sum of all edge weights
between those nodes.
See Also
--------
pagerank, pagerank_numpy, google_matrix
Raises
------
PowerIterationFailedConvergence
If the algorithm fails to converge to the specified tolerance
within the specified number of iterations of the power iteration
method.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
msg = "networkx.pagerank_scipy is deprecated and will be removed in NetworkX 3.0, use networkx.pagerank instead."
warn(msg, DeprecationWarning, stacklevel=2)
import numpy as np
import scipy as sp
import scipy.sparse # call as sp.sparse
N = len(G)
if N == 0:
return {}
nodelist = list(G)
A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float)
S = A.sum(axis=1)
S[S != 0] = 1.0 / S[S != 0]
# TODO: csr_array
Q = sp.sparse.csr_array(sp.sparse.spdiags(S.T, 0, *A.shape))
A = Q @ A
# initial vector
if nstart is None:
x = np.repeat(1.0 / N, N)
else:
x = np.array([nstart.get(n, 0) for n in nodelist], dtype=float)
x = x / x.sum()
# Personalization vector
if personalization is None:
p = np.repeat(1.0 / N, N)
else:
p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float)
if p.sum() == 0:
raise ZeroDivisionError
p = p / p.sum()
# Dangling nodes
if dangling is None:
dangling_weights = p
else:
# Convert the dangling dictionary into an array in nodelist order
dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float)
dangling_weights /= dangling_weights.sum()
is_dangling = np.where(S == 0)[0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = alpha * (x @ A + sum(x[is_dangling]) * dangling_weights) + (1 - alpha) * p
# check convergence, l1 norm
err = np.absolute(x - xlast).sum()
if err < N * tol:
return dict(zip(nodelist, map(float, x)))
raise nx.PowerIterationFailedConvergence(max_iter)
| 34.794521 | 117 | 0.663555 |
acf79248d19c262b9849b7ac1697e292419bcec5 | 5,978 | py | Python | sfdata_annexa_clean/log/cin_log.py | kws/sfdata-annexa-clean | 267db90558f00364fb44911c2bc87a39187f6c0f | [
"MIT"
] | 7 | 2020-01-30T18:18:08.000Z | 2020-03-31T07:19:10.000Z | sfdata_annexa_clean/log/cin_log.py | kws/sfdata-annexa-clean | 267db90558f00364fb44911c2bc87a39187f6c0f | [
"MIT"
] | 1 | 2020-01-30T18:21:53.000Z | 2020-01-30T18:21:53.000Z | sfdata_annexa_clean/log/cin_log.py | kws/sfdata-annexa-clean | 267db90558f00364fb44911c2bc87a39187f6c0f | [
"MIT"
] | 2 | 2019-12-22T13:21:10.000Z | 2020-01-31T11:13:07.000Z | from lxml import etree
import pandas as pd
import re
# Function to pull all the files data into a unique dataframe
# We recommend including all of the events into the cin log: it is the default list included below in build_cinrecord
# You can edit if you only need certain events
def build_cinrecord(files, include_cincensus, tag_list=['CINreferralDate', 'CINclosureDate', 'DateOfInitialCPC', 'AssessmentActualStartDate',
'AssessmentAuthorisationDate', 'S47ActualStartDate', 'CPPstartDate', 'CPPendDate']):
if include_cincensus == True:
data_list = []
for i, file in enumerate(files):
# Upload files and set root
tree = etree.parse(file)
root = tree.getroot()
NS = get_namespace(root)
children = root.find('Children', NS)
# Get data
print('Extracting data from file {} out of {} from CIN Census'.format(i+1, len(files)))
file_data = buildchildren(children, tag_list, NS)
data_list.append(file_data)
cinrecord = pd.concat(data_list, sort=False)
# Remove duplicates of LAchildID, Date and Type - we keep the one with the least null values
cinrecord['null_values'] = cinrecord.isnull().sum(axis=1)
cinrecord = cinrecord.sort_values('null_values')
cinrecord.drop_duplicates(subset=['LAchildID', 'Date', 'Type'], keep='first', inplace=True)
cinrecord.drop(labels='null_values', axis=1, inplace=True)
return cinrecord
else:
return None
# Functions to build dataframes containing information of the child within each file
def buildchildren(children, tag_list, NS):
df_list = []
for child in children:
data = buildchild(child, tag_list, NS)
df_list.append(data)
children_data = pd.concat(df_list, sort=False)
return children_data
def buildchild(child, tag_list, NS):
'''
Creates a dataframe storing all the events (specified in tag_list) that happened to the child
Pass if no ChildIdentifiers, ChildCharacteristics and CINdetails
'''
df_list = []
if 'ChildIdentifiers' in get_childrentags(child) and \
'ChildCharacteristics' in get_childrentags(child) and \
'CINdetails' in get_childrentags(child):
for group in child:
if group.tag.endswith('ChildIdentifiers'):
childidentifiers = get_ChildIdentifiers(group)
if group.tag.endswith('ChildCharacteristics'):
childcharacteristics = get_ChildCharacteristics(group, NS)
if group.tag.endswith('CINdetails'):
for tag in tag_list:
event_list = group.findall('.//{}'.format(tag), NS)
for event in event_list:
event_group = get_group(event, NS)
df = pd.DataFrame(event_group)
df_list.append(df)
child_data = pd.concat(df_list, sort=False)
for key, value in childidentifiers.items() :
child_data[key] = value
for key, value in childcharacteristics.items() :
child_data[key] = value
return(child_data)
return None
# Functions to store the information at child level
def get_ChildIdentifiers(element, NS=None):
childidentifiers = {}
for group in element:
column = etree.QName(group).localname
value = group.text
childidentifiers[column] = value
return childidentifiers
def get_ChildCharacteristics(element, NS):
childcharacteristics = {}
for group in element:
if group.tag.endswith('Ethnicity'):
column = etree.QName(group).localname
value = group.text
elif group.tag.endswith('Disabilities'):
column = etree.QName(group).localname
value = get_list(group, 'Disability', NS)
childcharacteristics[column] = value
return childcharacteristics
# Functions to get information at element level
def get_list(element, tag, NS):
'''
Starting from the 'element', makes a list of the contents of 'tag' nieces (siblings' children sharing the same tag)
and returns a string
'''
value_list = []
values = element.getparent().findall('.//{}'.format(tag), NS)
for value in values:
value_list.append(value.text.strip())
value_list = (',').join(value_list)
value_list = value_list.replace(' ', '')
return value_list
def get_group(element, NS):
group = {}
# Load our reference element
group['Date'] = element.text
group['Type'] = etree.QName(element).localname
# Get the other elements on the same level (siblings)
siblings = element.getparent().getchildren()
for sibling in siblings:
if len(sibling.getchildren())==0: # if siblings don't have children, just get their value
column = etree.QName(sibling).localname
value = sibling.text
group[column] = [value]
# If we're in the Assessment or ChildProtectionPlans modules, we need to get down one level
# to collect all AssessmentFactors and CPPreviewDate
if element.getparent().tag.endswith('Assessments'):
group['Factors'] = get_list(element, 'AssessmentFactors', NS)
if element.getparent().tag.endswith('ChildProtectionPlans'):
group['CPPreview'] = get_list(element, 'CPPreviewDate', NS)
return group
def get_childrentags(element):
'''
Returns the list of tags of the element's children
'''
children = element.getchildren()
tags = []
for child in children:
tags.append(etree.QName(child).localname)
return tags
# Function to dentify namespace
def get_namespace(root):
regex = r'{(.*)}.*' # pattern to pick up namespace
namespace = re.findall(regex, root.tag)
if len(namespace)>0:
namespace = namespace[0]
else:
namespace = None
NS = {None: namespace}
return NS | 36.901235 | 142 | 0.646872 |
acf79344004453db2dac844a4abbdf63fde3d55c | 27,826 | py | Python | tensor2tensor/models/image_transformer_2d.py | micmelesse/tensor2tensor | 93d34d69092f86b203f0f0a8230fcd9ecbe9086f | [
"Apache-2.0"
] | 1 | 2018-02-16T15:07:27.000Z | 2018-02-16T15:07:27.000Z | tensor2tensor/models/image_transformer_2d.py | micmelesse/tensor2tensor | 93d34d69092f86b203f0f0a8230fcd9ecbe9086f | [
"Apache-2.0"
] | null | null | null | tensor2tensor/models/image_transformer_2d.py | micmelesse/tensor2tensor | 93d34d69092f86b203f0f0a8230fcd9ecbe9086f | [
"Apache-2.0"
] | 2 | 2018-08-24T11:13:17.000Z | 2018-11-01T07:00:33.000Z | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""image generation with transformer (attention).
encoder: [Self-Attention, Feed-forward] x n
decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
@registry.register_model
class Imagetransformer2d(t2t_model.T2TModel):
"""Conditional image generation with attention. See file docstring."""
def body(self, features):
hparams = copy.copy(self._hparams)
inputs = features["inputs"]
targets = features["targets"]
targets_shape = common_layers.shape_list(targets)
if not (tf.get_variable_scope().reuse or
hparams.mode == tf.estimator.ModeKeys.PREDICT):
tf.summary.image("targets", targets, max_outputs=1)
decoder_input, rows, cols = cia.prepare_decoder(
targets, hparams)
# Add class label to decoder input.
if not hparams.unconditional:
decoder_input += tf.reshape(inputs,
[targets_shape[0], 1, 1, hparams.hidden_size])
decoder_output = cia.transformer_decoder_layers(
decoder_input, None,
hparams.num_decoder_layers,
hparams,
attention_type=hparams.dec_attention_type,
name="decoder")
output = cia.create_output(decoder_output, rows, cols, targets, hparams)
return output
@registry.register_model
class Img2imgTransformer(t2t_model.T2TModel):
"""Image 2 Image transformer net."""
def body(self, features):
hparams = copy.copy(self._hparams)
targets = features["targets"]
inputs = features["inputs"]
if not (tf.get_variable_scope().reuse or
hparams.mode == tf.estimator.ModeKeys.PREDICT):
tf.summary.image("inputs", inputs, max_outputs=1)
tf.summary.image("targets", targets, max_outputs=1)
encoder_input = cia.prepare_encoder(inputs, hparams)
encoder_output = cia.transformer_encoder_layers(
encoder_input,
hparams.num_encoder_layers,
hparams,
attention_type=hparams.enc_attention_type,
name="encoder")
decoder_input, rows, cols = cia.prepare_decoder(
targets, hparams)
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output,
hparams.num_decoder_layers,
hparams,
attention_type=hparams.dec_attention_type,
name="decoder")
output = cia.create_output(decoder_output, rows, cols, targets, hparams)
return output
@registry.register_model
class Img2imgTransformerBlockParallel(t2t_model.T2TModel):
"""Image-to-image transformer predicting blocks of the output in parallel."""
def body(self, features):
assert self._hparams.block_size > 0
assert not common_layers.is_xla_compiled()
hparams = copy.copy(self._hparams)
targets = features["targets"]
inputs = features["inputs"]
if not (tf.get_variable_scope().reuse or
hparams.mode == tf.estimator.ModeKeys.PREDICT):
tf.summary.image("inputs", inputs, max_outputs=1)
tf.summary.image("targets", targets, max_outputs=1)
encoder_input = cia.prepare_encoder(inputs, hparams)
encoder_output = cia.transformer_encoder_layers(
encoder_input,
hparams.num_encoder_layers,
hparams,
attention_type=hparams.enc_attention_type,
name="encoder")
decoder_input, rows, cols = cia.prepare_decoder(
targets, hparams)
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output,
hparams.num_decoder_layers,
hparams,
attention_type=hparams.dec_attention_type,
name="decoder")
assert not isinstance(decoder_output, tuple)
assert len(decoder_output.shape) == 4
relu_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(self._hparams, "relu_dropout_broadcast_dims", "")))
with tf.variable_scope("block_size_%d" % self._hparams.block_size):
tf.logging.info("Using block_size %d", self._hparams.block_size)
block_output = common_layers.dense_relu_dense(
decoder_output,
self._hparams.block_size * self._hparams.filter_size,
self._hparams.block_size * self._hparams.hidden_size,
dropout=self._hparams.relu_dropout,
dropout_broadcast_dims=relu_dropout_broadcast_dims)
batch_size, rows, cols = common_layers.shape_list(decoder_output)[:3]
decoder_output = tf.reshape(decoder_output, [
batch_size,
rows,
cols,
1,
self._hparams.hidden_size
])
block_output = tf.reshape(block_output, [
batch_size,
rows,
cols,
self._hparams.block_size,
self._hparams.hidden_size
])
block_output = common_layers.layer_postprocess(
decoder_output, block_output, self._hparams)
return block_output
def top(self, body_output, features):
assert self._hparams.block_size > 0
train_or_eval = (
self._hparams.mode == tf.estimator.ModeKeys.TRAIN or
self._hparams.mode == tf.estimator.ModeKeys.EVAL)
if train_or_eval:
if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:
features["block_index"] = tf.random_uniform(
shape=[], minval=0, maxval=self._hparams.block_size, dtype=tf.int64)
else:
features["block_index"] = 0
body_output = body_output[:, :, :, features["block_index"], :]
decoded_image = tf.layers.dense(
body_output, 256, use_bias=True, activation=None, name="output_conv")
assert len(features["targets"].shape) == 4
targets_shape = common_layers.shape_list(features["targets"])
if train_or_eval:
output = tf.reshape(decoded_image, targets_shape + [256])
else:
output = tf.reshape(decoded_image, [
targets_shape[0], -1, self._hparams.block_size, 1, 256])
output = output[:, :targets_shape[1], :, :, :]
return output
def loss(self, logits, features):
assert self._hparams.block_size > 0
if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
return 0.0
def shift_left_2d(x, k):
return tf.pad(x, [[0, 0], [0, k]])[:, k:]
def shift_left_4d_raster_scan(x, k):
batch_size = common_layers.shape_list(x)[0]
return tf.reshape(
shift_left_2d(tf.reshape(x, [batch_size, -1]), k), tf.shape(x))
targets = features["targets"]
assert len(targets.shape) == 4
targets = tf.stack([
shift_left_4d_raster_scan(targets, i)
for i in range(self._hparams.block_size)
], axis=4)
if (self._hparams.mode == tf.estimator.ModeKeys.TRAIN or
self._hparams.mode == tf.estimator.ModeKeys.EVAL):
assert "block_index" in features
targets = targets[:, :, :, :, features["block_index"]]
features["targets"] = targets
loss = super(Img2imgTransformerBlockParallel, self).loss(logits, features)
if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:
k = features["block_index"]
loss_num, loss_den = loss
loss_val = loss_num / loss_den
for i in range(self._hparams.block_size):
# Hack: if you report a loss of NaN, TensorBoard will plot a point at
# the previous value without a connecting line. This is used here to
# separate out the training losses by block index.
one_or_nan = tf.cond(tf.equal(k, i), lambda: 1.0, lambda: float("nan"))
tf.summary.scalar(
"block_index_%d" % i, one_or_nan * loss_val, family="losses")
return loss
def _greedy_infer(self, features, decode_length, use_tpu=False):
assert not use_tpu
return self._slow_greedy_infer_guess_and_check(features, decode_length)
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha):
raise NotImplementedError
def _slow_greedy_infer_guess_and_check(self, features, decode_length):
assert self._hparams.block_size > 0
assert self._hparams.force_full_predict
assert self._hparams.sampling_method == "argmax"
assert self._decode_hparams.batch_size == 1
assert self._decode_hparams.block_size > 0
assert self._decode_hparams.block_size <= self._hparams.block_size
assert (
(self._decode_hparams.guess_and_check_top_k > 0) +
(self._decode_hparams.guess_and_check_epsilon >= 0) == 1)
inputs_old = features["inputs"]
assert "targets" not in features
assert len(features["inputs"].shape) in [3, 4]
if len(features["inputs"].shape) < 4:
features["inputs"] = tf.expand_dims(features["inputs"], 2)
block_size = self._decode_hparams.block_size
decode_length += tf.shape(features["inputs"])[1]
def while_exit_cond(result, length): # pylint: disable=unused-argument
return length < decode_length
def infer_step(result, length):
"""Inference step."""
def print_info(samples, result, length, new_length):
tf.logging.info(
"length=%s new_length=%s length_diff=%s samples-result=%s",
length,
new_length,
new_length - length,
np.array_str(
samples[0, -block_size-1:-1, 0, 0] -
result[0, -block_size:, 0, 0]
).replace("\n", ""),
)
features["targets"] = tf.pad(result, [[0, 0], [0, 1], [0, 0], [0, 0]])
samples, logits, losses = self.sample(features) # pylint: disable=unused-variable
_, top_k_indices = tf.nn.top_k(
logits[:, :-1, :1, :, :],
k=self._decode_hparams.guess_and_check_top_k)
in_top_k = tf.reduce_any(
tf.equal(tf.to_int64(top_k_indices), tf.expand_dims(result, 4)),
axis=4)
within_epsilon = tf.less_equal(
tf.abs(result - samples[:, :-1, :1, :]),
self._decode_hparams.guess_and_check_epsilon)
if self._decode_hparams.guess_and_check_top_k:
tf.logging.info(
"Using guess_and_check_top_k=%s",
self._decode_hparams.guess_and_check_top_k)
correct = in_top_k
else:
tf.logging.info(
"Using guess_and_check_epsilon=%s",
self._decode_hparams.guess_and_check_epsilon)
correct = within_epsilon
correct_cumsum = tf.cumsum(tf.to_int32(correct), axis=1)
perfect_cumsum = 1 + tf.range(tf.shape(correct)[1])
for axis in [0, 2, 3]:
perfect_cumsum = tf.expand_dims(perfect_cumsum, axis=axis)
new_length = tf.reduce_sum(
tf.to_int32(tf.equal(correct_cumsum, perfect_cumsum)), axis=1)
new_length = tf.squeeze(new_length, axis=[0, 1, 2])
new_length = tf.minimum(new_length, decode_length)
new_result = tf.concat([
result[:, :new_length, :, :],
tf.reshape(
samples[:, new_length, :block_size, :], [1, block_size, 1, 1])
], axis=1)
with tf.control_dependencies([
tf.py_func(print_info, [samples, result, length, new_length], [])
]):
new_result = tf.identity(new_result)
return new_result, new_length
result = tf.zeros((1, 0, 1, 1), dtype=tf.int64)
length = tf.squeeze(tf.zeros(1, dtype=tf.int32))
result, length = tf.while_loop(
while_exit_cond,
infer_step,
[result, length],
shape_invariants=[
tf.TensorShape([1, None, 1, 1]),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
result = result[:, :length, :, :]
features["inputs"] = inputs_old
return {
"outputs": result,
"scores": None,
}
@registry.register_hparams
def image_transformer2d_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 0.2
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.modality["targets"] = modalities.ModalityType.IDENTITY
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.add_hparam("filter_size", 512) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("num_output_layers", 3)
hparams.add_hparam("block_size", 1)
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
# Local 2D attention params
hparams.add_hparam("query_shape", (16, 16))
hparams.add_hparam("memory_flange", (16, 32))
hparams.add_hparam("num_encoder_layers", 4)
hparams.add_hparam("num_decoder_layers", 8)
# attention type related params
hparams.add_hparam("enc_attention_type", cia.AttentionType.GLOBAL)
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_2D)
hparams.add_hparam("block_raster_scan", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("unconditional", False) # unconditional generation
# relative embedding hparams
hparams.add_hparam("shared_rel", False)
return hparams
@registry.register_hparams
def imagetransformer2d_base():
hparams = image_transformer2d_base()
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.block_raster_scan = True
return hparams
@registry.register_hparams
def imagetransformer2d_base_8l_8_16():
hparams = image_transformer2d_base()
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.memory_flange = (8, 16)
return hparams
@registry.register_hparams
def imagetransformer2d_base_8l_8_16_ls():
hparams = image_transformer2d_base()
hparams.num_decoder_layers = 8
hparams.label_smoothing = 0.05
hparams.batch_size = 1
hparams.memory_flange = (8, 16)
return hparams
@registry.register_hparams
def imagetransformer2d_base_8l_8_16_big():
hparams = image_transformer2d_base()
hparams.filter_size = 1024
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.memory_flange = (8, 16)
return hparams
@registry.register_hparams
def imagetransformer2d_base_12l_8_16_big():
hparams = image_transformer2d_base()
hparams.filter_size = 1024
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.memory_flange = (8, 16)
hparams.sampling_method = "random"
hparams.beam_size = 1
return hparams
@registry.register_hparams
def imagetransformer2d_base_8l_8_32_big():
"""hparams fo 8 layer big 2d model for cifar 10."""
hparams = image_transformer2d_base()
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.3
hparams.query_shape = (8, 16)
hparams.memory_flange = (0, 32)
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d():
"""big 1d model for unconditional generation on imagenet."""
hparams = image_transformer2d_base()
hparams.unconditional = True
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.img_len = 64
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 1
hparams.max_length = 3075
hparams.max_length = 14000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.query_shape = (16, 16)
hparams.memory_flange = (8, 8)
return hparams
@registry.register_hparams
def imagetransformer2d_base_8l_8_64_64by64():
"""hparams fo 12 layer big 2d model for imagenet 64x64."""
hparams = image_transformer2d_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
hparams.query_shape = (8, 64)
hparams.memory_flange = (4, 32)
hparams.unconditional = int(False)
hparams.max_length = 14000
return hparams
@registry.register_hparams
def imagetransformer2d_base_12l_8_64_64by64():
"""hparams fo 12 layer big 2d model for imagenet 64x64."""
hparams = image_transformer2d_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
hparams.query_shape = (8, 64)
hparams.memory_flange = (4, 32)
hparams.unconditional = int(False)
hparams.max_length = 14000
return hparams
@registry.register_hparams
def imagetransformer2d_base_14l_8_16_big():
hparams = image_transformer2d_base()
hparams.filter_size = 1024
hparams.num_decoder_layers = 14
hparams.batch_size = 1
hparams.memory_flange = (8, 16)
return hparams
@registry.register_hparams
def imagetransformer2d_base_14l_8_16_big_uncond():
hparams = imagetransformer2d_base_14l_8_16_big()
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer2d_base_8l_8_16_big_16k():
hparams = image_transformer2d_base()
hparams.filter_size = 1024
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.memory_flange = (8, 16)
hparams.learning_rate_warmup_steps = 16000
return hparams
@registry.register_hparams
def img2img_transformer2d_base():
"""Base params for img2img 2d attention."""
hparams = image_transformer2d_base()
# learning related flags
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# This version seems to benefit from a higher learning rate.
hparams.learning_rate = 0.2
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate_warmup_steps = 12000
hparams.filter_size = 2048
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 8
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.block_raster_scan = True
return hparams
@registry.register_hparams
def img2img_transformer2d_q1():
hparams = img2img_transformer2d_base()
hparams.batch_size = 2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.query_shape = (16, 16)
hparams.memory_flange = (16, 64)
return hparams
@registry.register_hparams
def img2img_transformer2d_q2():
hparams = img2img_transformer2d_q1()
hparams.batch_size = 2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.query_shape = (16, 16)
hparams.memory_flange = (16, 32)
return hparams
@registry.register_hparams
def img2img_transformer2d_q3():
"""Current best hparams for local 2d."""
hparams = img2img_transformer2d_q1()
hparams.batch_size = 2
hparams.query_shape = (8, 16)
hparams.memory_flange = (8, 32)
return hparams
@registry.register_hparams
def img2img_transformer_base():
"""Base params for local1d attention."""
hparams = image_transformer2d_base()
# learning related flags
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# This version seems to benefit from a higher learning rate.
hparams.learning_rate = 0.2
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate_warmup_steps = 12000
hparams.filter_size = 2048
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 8
hparams.block_length = 256
hparams.block_width = 256
hparams.dec_attention_type = cia.AttentionType.LOCAL_1D
hparams.block_raster_scan = False
return hparams
@registry.register_hparams
def img2img_transformer_b1():
hparams = img2img_transformer_base()
hparams.batch_size = 2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.block_length = 512
return hparams
@registry.register_hparams
def img2img_transformer_b2():
hparams = img2img_transformer_base()
hparams.batch_size = 2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.block_length = 256
return hparams
@registry.register_hparams
def img2img_transformer_b3():
"""Current best hparams for local 1d."""
hparams = img2img_transformer_base()
hparams.batch_size = 2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.block_length = 128
hparams.sampling_temp = 0.9
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs1():
hparams = img2img_transformer_b3()
hparams.block_size = 1
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs2():
hparams = img2img_transformer_b3()
hparams.block_size = 2
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs3():
hparams = img2img_transformer_b3()
hparams.block_size = 3
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs4():
hparams = img2img_transformer_b3()
hparams.block_size = 4
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs5():
hparams = img2img_transformer_b3()
hparams.block_size = 5
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs6():
hparams = img2img_transformer_b3()
hparams.block_size = 6
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs7():
hparams = img2img_transformer_b3()
hparams.block_size = 7
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs8():
hparams = img2img_transformer_b3()
hparams.block_size = 8
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs9():
hparams = img2img_transformer_b3()
hparams.block_size = 9
return hparams
@registry.register_hparams
def img2img_transformer_b3_bs10():
hparams = img2img_transformer_b3()
hparams.block_size = 10
return hparams
@registry.register_hparams
def img2img_transformer_dilated():
"""Try dilated."""
hparams = img2img_transformer_base()
hparams.add_hparam("num_memory_blocks", 1)
hparams.num_heads = 8
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.sampling_method = "random"
hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0]
hparams.dec_attention_type = cia.AttentionType.DILATED
hparams.img_len = 64
hparams.block_length = 128
hparams.block_width = 128
return hparams
@registry.register_hparams
def imagetransformer2d_tiny():
hparams = imagetransformer2d_base()
hparams.num_decoder_layers = 2
hparams.hidden_size = 64
hparams.batch_size = 1
return hparams
def update_hparams_for_tpu(hparams):
hparams.use_pad_remover = False # where op not supported
hparams.optimizer = "true_adam"
hparams.batch_size = 4
@registry.register_hparams
def img2img_transformer_base_tpu():
"""Hparams for training img2img_transformer on tpu."""
hparams = img2img_transformer_base()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 8
hparams.num_encoder_layers = 4
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def img2img_transformer_tiny_tpu():
hparams = img2img_transformer_base_tpu()
hparams.num_hidden_layers = 2
hparams.hidden_size = 16
hparams.batch_size = 2
hparams.num_heads = 2
return hparams
@registry.register_hparams
def img2img_transformer2d_n3():
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 12
hparams.query_shape = (16, 32)
hparams.memory_flange = (16, 16)
hparams.layer_prepostprocess_dropout = 0.0
return hparams
@registry.register_hparams
def img2img_transformer2d_n31():
"""Set of hyperparameters."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_encoder_layers = 6
hparams.num_decoder_layers = 12
hparams.num_heads = 8
hparams.query_shape = (16, 32)
hparams.memory_flange = (16, 32)
return hparams
@registry.register_hparams
def img2img_transformer2d_n24():
"""Set of hyperparameters."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.hidden_size = 1024
hparams.filter_size = 2048
hparams.layer_prepostprocess_dropout = 0.2
hparams.num_decoder_layers = 8
hparams.query_shape = (8, 16)
hparams.memory_flange = (8, 32)
return hparams
@registry.register_hparams
def img2img_transformer2d_n44():
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_decoder_layers = 8
hparams.query_shape = (8, 16)
hparams.memory_flange = (8, 32)
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def img2img_transformer2d_n103():
"""Best config for img2img."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_decoder_layers = 12
hparams.num_encoder_layers = 6
hparams.query_shape = (8, 32)
hparams.memory_flange = (8, 64)
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def img2img_transformer2d_tiny():
"""Tiny params."""
hparams = img2img_transformer2d_base()
hparams.num_decoder_layers = 2
hparams.hidden_size = 128
hparams.batch_size = 4
hparams.max_length = 128
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.filter_size = 128
hparams.num_heads = 4
hparams.pos = "timing"
hparams.img_len = 32
return hparams
@registry.register_hparams
def img2img_transformer_tiny():
"""Tiny params."""
hparams = img2img_transformer2d_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.batch_size = 4
hparams.max_length = 128
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.filter_size = 128
hparams.num_heads = 1
hparams.pos = "timing"
return hparams
| 30.746961 | 88 | 0.727018 |
acf795e2ab8d3193eabd16e95919ca12ef4511dc | 933 | py | Python | quasimodo/web_search/archit_module.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 16 | 2019-11-28T13:26:37.000Z | 2022-02-09T09:53:10.000Z | quasimodo/web_search/archit_module.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 1 | 2021-03-26T20:31:48.000Z | 2021-07-15T08:52:47.000Z | quasimodo/web_search/archit_module.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 3 | 2020-08-14T23:23:25.000Z | 2021-12-24T14:02:35.000Z | from quasimodo.data_structures.module_interface import ModuleInterface
from quasimodo.default_submodule_factory import DefaultSubmoduleFactory
import logging
class ArchitModule(ModuleInterface):
def __init__(self):
module_names = [
"web-count",
"web-regression",
"youtube-count",
"youtube-regression",
"flickr-count",
"flickr-regression",
"pinterest-count",
"pinterest-regression",
"istockphoto-count",
"istockphoto-regression"
]
super().__init__(
module_names, DefaultSubmoduleFactory())
self._name = "Archit Module"
def process(self, input_interface):
logging.info("Start the archit module")
for submodule in self._submodules:
input_interface = submodule.process(input_interface)
return input_interface
| 31.1 | 71 | 0.617363 |
acf7964fb2ae37234a54eabbb3eaed8175f95f8f | 1,436 | py | Python | app/user/serializers.py | dkamola/recipe-app-api | cfdd831f3287520c3bf376726beeb2430f73d74a | [
"MIT"
] | null | null | null | app/user/serializers.py | dkamola/recipe-app-api | cfdd831f3287520c3bf376726beeb2430f73d74a | [
"MIT"
] | null | null | null | app/user/serializers.py | dkamola/recipe-app-api | cfdd831f3287520c3bf376726beeb2430f73d74a | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 29.306122 | 74 | 0.64415 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.