id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3372737 | from ftfy import bad_codecs, guess_bytes
def test_cesu8():
cls1 = bad_codecs.search_function('cesu8').__class__
cls2 = bad_codecs.search_function('cesu-8').__class__
assert cls1 == cls2
test_bytes = (b'\xed\xa6\x9d\xed\xbd\xb7 is an unassigned character, '
b'and \xc0\x80 is null')
test_text = '\U00077777 is an unassigned character, and \x00 is null'
assert test_bytes.decode('cesu8') == test_text
def test_russian_crash():
thebytes = b'\xe8\xed\xe2\xe5\xed\xf2\xe0\xf0\xe8\xe7\xe0\xf6\xe8\xff '
# We don't care what the result is, but this shouldn't crash
thebytes.decode('utf-8-variants', 'replace')
# This shouldn't crash either
guess_bytes(thebytes)
| StarcoderdataPython |
3446537 | <filename>tests/test_robustats.py
import unittest
import robustats
class TestWeightedMedian(unittest.TestCase):
def test_same_weights(self):
x = [1., 2., 3.]
weights = [1., 1., 1.]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 2.)
def test_edge_case(self):
x = [1., 2., 3.]
weights = [2., 1., 1.]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 2.)
def test_dominant_weight(self):
x = [1., 2., 3.]
weights = [3., 1., 1.]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 1.)
def test_even_list(self):
x = [1., 2.]
weights = [1., 1.]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 1.)
def test_generic_1(self):
x = [1.3, 5.1, 2.9, 1.9, 7.4]
weights = [1.4, 0.9, 0.6, 1.2, 1.7]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 2.9)
def test_generic_2(self):
x = [4.2, 1.3, 7.4, 0.2, 4.6, 9.8, 5.5, 3.7]
weights = [0.4, 2.1, 1.1, 1.6, 0.3, 0.9, 1.2, 1.7]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 3.7)
def test_generic_3(self):
x = [0.1, 0.35, 0.05, 0.1, 0.15, 0.05, 0.2]
weights = [0.1, 0.35, 0.05, 0.1, 0.15, 0.05, 0.2]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 0.2)
def test_generic_4(self):
x = [
0.49, 0.36, 0.36, 0.18, 0.75, 0.33, 0.68, 0.82, 0.38, 0.75, 0.61,
0.02, 0.57, 0.23, 0.56, 0.03, 0.45, 0.44, 0.36, 0.92
]
weights = [
0.08, 0.22, 0.79, 0.84, 0.69, 0.84, 0.08, 0.87, 0.95, 0.27, 0.9,
0.34, 0.75, 0.65, 0.02, 0.83, 0.32, 0.68, 0.92, 0.37
]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 0.38)
def test_generic_5(self):
x = [
0.64, 0.95, 0.05, 0.08, 0.32, 0.25, 0.58, 0.69, 0.88, 0.53, 0.48,
0.58, 0.32, 0.52, 0.42, 0.69, 0.43, 0.91, 0.15, 0.27, 0.31, 0.16,
0.56, 0.68, 0.58, 0.04, 0.51, 0.06, 0.18, 0.03
]
weights = [
0.97, 0.2, 0.12, 0.01, 0.86, 0.29, 0.93, 0.96, 0.89, 0.03, 0.24,
0.56, 0.81, 0.97, 0.48, 0.32, 0.33, 0.22, 0.8, 0.17, 0.96, 0.75,
0.43, 0.24, 0.81, 0.4, 0.93, 0.43, 0.17, 0.13
]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 0.51)
def test_generic_6(self):
x = [
0.19, 0.14, 0.15, 0.79, 0.36, 0.13, 0.44, 0.67, 0.44, 0.98, 0.2,
0.11, 0.78, 0.67, 0.28, 0.29, 0.99, 0.55, 0.34, 0.36, 0.09, 0.13,
0.56, 0.19, 0.08, 0.46, 0.62, 0.98, 0.46, 0.37, 0.09, 0.94, 0.84,
0.64, 0.18, 0.64, 0.78, 0.88, 0.17, 0.28
]
weights = [
0.67, 0.39, 0.31, 0.06, 0.93, 0.21, 0.09, 0.29, 0.78, 0.42, 0.79,
0.27, 0.77, 0.35, 0.11, 0.99, 0.05, 0.39, 0.34, 0.97, 0.82, 0.4,
0.09, 0.77, 0.28, 0.03, 0.63, 0.67, 0.1, 0.3, 0.85, 0.44, 0.66,
0.52, 0.15, 0.4, 0.82, 0.66, 0.21, 0.72
]
weighted_median = robustats.weighted_median(x, weights)
self.assertEqual(weighted_median, 0.36)
class TestMedcouple(unittest.TestCase):
def test_homogeneous_sample_1(self):
x = [1., 2., 3.]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, 0.)
def test_homogeneous_sample_2(self):
x = [-1., 0., 1.]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, 0.)
def test_homogeneous_sample_3(self):
x = [1., 2., 3., 4., 5., 6.]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, 0.)
def test_generic_1(self):
x = [1., 2., 2., 2., 3., 4., 5., 6.]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, 1.)
def test_generic_2(self):
x = [0.2, 0.17, 0.08, 0.16, 0.88, 0.86, 0.09, 0.54, 0.27, 0.14]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, 0.7692307692307692)
def test_generic_3(self):
x = [
0.61, 0.96, 0.76, 0.69, 0.18, 0.81, 0.32, 0.69, 0.91, 0.37, 0.0,
0.66, 0.99, 0.59, 0.73, 0.41, 0.28, 0.45, 0.63, 0.03
]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, -0.3333333333333333)
def test_generic_4(self):
x = [
0.44, 0.66, 0.18, 0.51, 0.34, 0.7, 0.86, 0.97, 0.15, 0.53, 0.85,
0.28, 0.13, 0.74, 0.52, 0.21, 0.87, 0.7, 0.17, 0.84, 0.86, 0.01,
0.42, 0.27, 0.22, 0.88, 0.16, 0.57, 0.66, 0.88
]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, -0.014925373134328474)
def test_generic_5(self):
x = [
0.7, 0.49, 0.07, 0.4, 0.44, 0.36, 0.02, 0.88, 0.94, 0.9, 0.46,
0.93, 0.81, 0.92, 0.32, 0.43, 0.64, 0.01, 0.37, 0.46, 0.47, 0.13,
0.29, 0.1, 0.04, 0.9, 0.55, 0.27, 0.28, 0.46, 0.46, 0.1, 0.81,
0.55, 0.95, 0.58, 0.12, 0.61, 0.92, 0.93
]
weighted_median = robustats.medcouple(x)
self.assertEqual(weighted_median, 0.11363636363636356)
class TestMode(unittest.TestCase):
def test_homogeneous_sample(self):
x = [1., 2., 3., 4., 5.]
mode = robustats.mode(x)
self.assertEqual(mode, 2.)
def test_generic_1(self):
x = [1., 2., 3., 3., 4., 5.]
mode = robustats.mode(x)
self.assertEqual(mode, 3.)
def test_generic_2(self):
x = [1., 2., 2., 3., 3., 3., 4., 4., 5.]
mode = robustats.mode(x)
self.assertEqual(mode, 3.)
def test_generic_3(self):
x = [1., 2., 3., 3., 3., 4., 4., 4., 4., 5.]
mode = robustats.mode(x)
self.assertEqual(mode, 3.)
def test_generic_4(self):
x = [1., 2., 3., 3., 3., 4., 4., 4., 4., 4., 5., 6., 7.]
mode = robustats.mode(x)
self.assertEqual(mode, 4.)
def test_gaussian_1(self):
# Gaussian distribution with mu = 1.0 and sigma = 0.2 --> mode = 1.0
x = [1.06, 1.25, 0.99, 1.07, 1.46, 1.02, 1.14, 1.04, 0.6, 1.0]
mode = robustats.mode(x)
self.assertEqual(mode, 0.995)
def test_gaussian_2(self):
# Gaussian distribution with mu = 3.0 and sigma = 0.5 --> mode = 3.0
x = [
2.89, 3.32, 3.19, 3.35, 3.84, 3.22, 3.46, 3.45, 3.06, 3.59, 2.44,
3.51, 3.73, 3.35, 2.26, 2.0, 2.15, 3.25, 3.21, 3.4
]
mode = robustats.mode(x)
self.assertEqual(mode, 3.35)
def test_gaussian_3(self):
# Gaussian distribution with mu = 10.0 and sigma = 1.0 --> mode = 10.0
x = [
9.67, 10.43, 8.34, 8.47, 10.31, 11.01, 9.99, 10.72, 8.61, 11.33,
10.87, 9.38, 8.79, 9.07, 10.7, 11.14, 9.73, 9.72, 9.8, 12.06,
10.99, 10.12, 10.67, 9.71, 9.74, 9.85, 8.65, 8.71, 10.07, 8.54
]
mode = robustats.mode(x)
self.assertEqual(mode, 9.715)
def test_gamma_1(self):
# Gamma distribution with alpha = 11.0 and beta = 10.0 --> mode = 1.0
x = [1.17, 0.82, 0.85, 2.1, 1.35, 1.21, 1.07, 0.81, 1.09, 1.27]
mode = robustats.mode(x)
self.assertEqual(mode, 1.08)
def test_gamma_2(self):
# Gamma distribution with alpha = 21.0 and beta = 10.0 --> mode = 2.0
x = [
2.04, 2.22, 2.27, 1.71, 2.45, 1.55, 2.38, 2.15, 1.95, 3.34, 2.74,
1.92, 1.68, 2.1, 2.13, 2.6, 1.36, 2.25, 2.85, 1.55
]
mode = robustats.mode(x)
self.assertEqual(mode, 2.26)
def test_gamma_3(self):
# Gamma distribution with alpha = 61.0 and beta = 20.0 --> mode = 3.0
x = [
2.95, 2.29, 3.24, 3.57, 3.7, 3.0, 3.07, 3.73, 2.98, 2.96, 2.59,
3.61, 3.09, 2.65, 2.37, 2.66, 2.88, 2.92, 2.3, 3.9, 3.49, 3.67,
2.09, 2.98, 2.52, 3.37, 3.29, 3.18, 3.16, 2.68
]
mode = robustats.mode(x)
self.assertEqual(mode, 2.98)
| StarcoderdataPython |
6442113 | import wtforms_widgets
from wtforms import ValidationError
from pycroft.model.user import User
class UserIDField(wtforms_widgets.fields.core.StringField):
"""A User-ID Field """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, **kwargs):
return super().__call__(
**kwargs
)
def pre_validate(self, form):
if User.get(self.data) is None:
raise ValidationError("Ungültige Nutzer-ID.")
| StarcoderdataPython |
346750 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from lookup.api import views as lv
app_name = "lookup"
router = DefaultRouter()
router.register(r"lookup", lv.LookupViewSet, app_name)
urlpatterns = [
path("", include(router.urls)),
] | StarcoderdataPython |
3552052 | # (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import os, warnings
import numpy as np
# VerticaPy Modules
from verticapy import vDataFrame
from verticapy.learn.mlplot import *
from verticapy.learn.model_selection import *
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
from verticapy.learn.metrics import *
##
# ___ ___ ___ ___ ______ ________ _______ ___
# |" \ /" ||" \ /" | / " \ |" "\ /" "||" |
# \ \ // / \ \ // | // ____ \ (. ___ :)(: ______)|| |
# \\ \/. ./ /\\ \/. | / / ) :)|: \ ) || \/ | |: |
# \. // |: \. |(: (____/ // (| (___\ || // ___)_ \ |___
# \\ / |. \ /: | \ / |: :)(: "|( \_|: \
# \__/ |___|\__/|___| \"_____/ (________/ \_______) \_______)
#
#
# ---#
class vModel:
"""
---------------------------------------------------------------------------
Main Class for Vertica Model
"""
# ---#
def __repr__(self):
"""
---------------------------------------------------------------------------
Returns the model Representation.
"""
try:
rep = ""
if self.type not in (
"DBSCAN",
"NearestCentroid",
"VAR",
"SARIMAX",
"LocalOutlierFactor",
"KNeighborsRegressor",
"KNeighborsClassifier",
"CountVectorizer",
):
name = self.tree_name if self.type in ("KernelDensity") else self.name
try:
version(cursor=self.cursor, condition=[9, 0, 0])
executeSQL(
self.cursor,
"SELECT GET_MODEL_SUMMARY(USING PARAMETERS model_name = '{}')".format(
name
),
"Summarizing the model.",
)
except:
executeSQL(
self.cursor,
"SELECT SUMMARIZE_MODEL('{}')".format(name),
"Summarizing the model.",
)
return self.cursor.fetchone()[0]
elif self.type == "DBSCAN":
rep = "=======\ndetails\n=======\nNumber of Clusters: {}\nNumber of Outliers: {}".format(
self.n_cluster_, self.n_noise_
)
elif self.type == "LocalOutlierFactor":
rep = "=======\ndetails\n=======\nNumber of Errors: {}".format(
self.n_errors_
)
elif self.type == "NearestCentroid":
rep = "=======\ndetails\n=======\n" + self.centroids_.__repr__()
elif self.type == "VAR":
rep = "=======\ndetails\n======="
for idx, elem in enumerate(self.X):
rep += "\n\n # " + str(elem) + "\n\n" + self.coef_[idx].__repr__()
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\nX : {}".format(", ".join(self.X))
rep += "\nts : {}".format(self.ts)
elif self.type == "SARIMAX":
rep = "=======\ndetails\n======="
rep += "\n\n# Coefficients\n\n" + self.coef_.__repr__()
if self.ma_piq_:
rep += "\n\n# MA PIQ\n\n" + self.ma_piq_.__repr__()
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\ny : {}".format(self.y)
rep += "\nts : {}".format(self.ts)
if self.exogenous:
rep += "\nExogenous Variables : {}".format(
", ".join(self.exogenous)
)
if self.ma_avg_:
rep += "\nMA AVG : {}".format(self.ma_avg_)
elif self.type == "CountVectorizer":
rep = "=======\ndetails\n======="
if self.vocabulary_:
voc = [str(elem) for elem in self.vocabulary_]
if len(voc) > 100:
voc = voc[0:100] + [
"... ({} more)".format(len(self.vocabulary_) - 100)
]
rep += "\n\n# Vocabulary\n\n" + ", ".join(voc)
if self.stop_words_:
rep += "\n\n# Stop Words\n\n" + ", ".join(
[str(elem) for elem in self.stop_words_]
)
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\nX : {}".format(", ".join(self.X))
if self.type in (
"DBSCAN",
"NearestCentroid",
"LocalOutlierFactor",
"KNeighborsRegressor",
"KNeighborsClassifier",
):
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\nX : {}".format(", ".join(self.X))
if self.type in (
"NearestCentroid",
"KNeighborsRegressor",
"KNeighborsClassifier",
):
rep += "\ny : {}".format(self.y)
return rep
except:
return "<{}>".format(self.type)
# ---#
def deploySQL(self, X: list = []):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Parameters
----------
X: list, optional
List of the columns used to deploy the model. If empty, the model
predictors will be used.
Returns
-------
str
the SQL code needed to deploy the model.
"""
if self.type not in ("DBSCAN", "LocalOutlierFactor"):
name = self.tree_name if self.type in ("KernelDensity") else self.name
check_types([("X", X, [list],)])
X = [str_column(elem) for elem in X]
fun = self.get_model_fun()[1]
sql = "{}({} USING PARAMETERS model_name = '{}', match_by_pos = 'true')"
return sql.format(fun, ", ".join(self.X if not (X) else X), name)
else:
raise FunctionError(
"Method 'deploySQL' for '{}' doesn't exist.".format(self.type)
)
# ---#
def drop(self):
"""
---------------------------------------------------------------------------
Drops the model from the Vertica DB.
"""
with warnings.catch_warnings(record=True) as w:
drop_model(
self.name, self.cursor,
)
# ---#
def features_importance(
self, ax=None, tree_id: int = None, show: bool = True, **style_kwds,
):
"""
---------------------------------------------------------------------------
Computes the model features importance.
Parameters
----------
ax: Matplotlib axes object, optional
The axes to plot on.
tree_id: int
Tree ID in case of Tree Based models.
show: bool
If set to True, draw the features importance.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if self.type in (
"RandomForestClassifier",
"RandomForestRegressor",
"KernelDensity",
):
check_types([("tree_id", tree_id, [int])])
name = self.tree_name if self.type in ("KernelDensity") else self.name
version(cursor=self.cursor, condition=[9, 1, 1])
tree_id = "" if not (tree_id) else ", tree_id={}".format(tree_id)
query = "SELECT predictor_name AS predictor, ROUND(100 * importance_value / SUM(importance_value) OVER (), 2)::float AS importance, SIGN(importance_value)::int AS sign FROM (SELECT RF_PREDICTOR_IMPORTANCE ( USING PARAMETERS model_name = '{}'{})) VERTICAPY_SUBTABLE ORDER BY 2 DESC;".format(
name, tree_id,
)
print_legend = False
elif self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
"SARIMAX",
):
if self.type == "SARIMAX":
relation = (
self.transform_relation.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_ts]", self.ts)
.replace(
"[VerticaPy_key_columns]", ", ".join(self.exogenous + [self.ts])
)
.format(self.input_relation)
)
else:
relation = self.input_relation
version(cursor=self.cursor, condition=[8, 1, 1])
query = "SELECT predictor, ROUND(100 * importance / SUM(importance) OVER(), 2) AS importance, sign FROM "
query += "(SELECT stat.predictor AS predictor, ABS(coefficient * (max - min))::float AS importance, SIGN(coefficient)::int AS sign FROM "
query += '(SELECT LOWER("column") AS predictor, min, max FROM (SELECT SUMMARIZE_NUMCOL({}) OVER() '.format(
", ".join(self.X)
)
query += " FROM {}) VERTICAPY_SUBTABLE) stat NATURAL JOIN ({})".format(
relation, self.coef_.to_sql()
)
query += " coeff) importance_t ORDER BY 2 DESC;"
print_legend = True
else:
raise FunctionError(
"Method 'features_importance' for '{}' doesn't exist.".format(self.type)
)
executeSQL(self.cursor, query, "Computing Features Importance.")
result = self.cursor.fetchall()
coeff_importances, coeff_sign = {}, {}
for elem in result:
coeff_importances[elem[0]] = elem[1]
coeff_sign[elem[0]] = elem[2]
if show:
plot_importance(
coeff_importances,
coeff_sign,
print_legend=print_legend,
ax=ax,
**style_kwds,
)
importances = {"index": ["importance", "sign"]}
for elem in coeff_importances:
importances[elem] = [coeff_importances[elem], coeff_sign[elem]]
return tablesample(values=importances).transpose()
# ---#
def get_attr(self, attr_name: str = ""):
"""
---------------------------------------------------------------------------
Returns the model attribute.
Parameters
----------
attr_name: str, optional
Attribute Name.
Returns
-------
tablesample
model attribute
"""
if self.type not in ("DBSCAN", "LocalOutlierFactor", "VAR", "SARIMAX"):
name = self.tree_name if self.type in ("KernelDensity") else self.name
version(cursor=self.cursor, condition=[8, 1, 1])
result = to_tablesample(
query="SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}'{})".format(
name, ", attr_name = '{}'".format(attr_name) if attr_name else "",
),
cursor=self.cursor,
title="Getting Model Attributes.",
)
return result
elif self.type in ("DBSCAN"):
if attr_name == "n_cluster":
return self.n_cluster_
elif attr_name == "n_noise":
return self.n_noise_
elif not (attr_name):
result = tablesample(
values={
"attr_name": ["n_cluster", "n_noise"],
"value": [self.n_cluster_, self.n_noise_],
},
name="Attributes",
)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("LocalOutlierFactor"):
if attr_name == "n_errors":
return self.n_errors_
elif not (attr_name):
result = tablesample(
values={"attr_name": ["n_errors"], "value": [self.n_errors_]},
)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("SARIMAX"):
if attr_name == "coef":
return self.coef_
elif attr_name == "ma_avg":
return self.ma_avg_
elif attr_name == "ma_piq":
return self.ma_piq_
elif not (attr_name):
result = tablesample(
values={"attr_name": ["coef", "ma_avg", "ma_piq"]},
)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("VAR"):
if attr_name == "coef":
return self.coef_
elif not (attr_name):
result = tablesample(values={"attr_name": ["coef"]},)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("KernelDensity"):
if attr_name == "map":
return self.map_
elif not (attr_name):
result = tablesample(values={"attr_name": ["map"]},)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
else:
raise FunctionError(
"Method 'get_attr' for '{}' doesn't exist.".format(self.type)
)
# ---#
def get_model_fun(self):
"""
---------------------------------------------------------------------------
Returns the Vertica associated functions.
Returns
-------
tuple
(FIT, PREDICT, INVERSE)
"""
if self.type in ("LinearRegression", "SARIMAX"):
return ("LINEAR_REG", "PREDICT_LINEAR_REG", "")
elif self.type == "LogisticRegression":
return ("LOGISTIC_REG", "PREDICT_LOGISTIC_REG", "")
elif self.type == "LinearSVC":
return ("SVM_CLASSIFIER", "PREDICT_SVM_CLASSIFIER", "")
elif self.type == "LinearSVR":
return ("SVM_REGRESSOR", "PREDICT_SVM_REGRESSOR", "")
elif self.type in ("RandomForestRegressor", "KernelDensity"):
return ("RF_REGRESSOR", "PREDICT_RF_REGRESSOR", "")
elif self.type == "RandomForestClassifier":
return ("RF_CLASSIFIER", "PREDICT_RF_CLASSIFIER", "")
elif self.type == "NaiveBayes":
return ("NAIVE_BAYES", "PREDICT_NAIVE_BAYES", "")
elif self.type == "KMeans":
return ("KMEANS", "APPLY_KMEANS", "")
elif self.type == "BisectingKMeans":
return ("BISECTING_KMEANS", "APPLY_BISECTING_KMEANS", "")
elif self.type == "PCA":
return ("PCA", "APPLY_PCA", "APPLY_INVERSE_PCA")
elif self.type == "SVD":
return ("SVD", "APPLY_SVD", "APPLY_INVERSE_SVD")
elif self.type == "Normalizer":
return ("NORMALIZE_FIT", "APPLY_NORMALIZE", "REVERSE_NORMALIZE")
elif self.type == "OneHotEncoder":
return ("ONE_HOT_ENCODER_FIT", "APPLY_ONE_HOT_ENCODER", "")
else:
return ("", "", "")
# ---#
def get_params(self):
"""
---------------------------------------------------------------------------
Returns the model Parameters.
Returns
-------
dict
model parameters
"""
return self.parameters
# ---#
def plot(
self, max_nb_points: int = 100, ax=None, **style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the Model.
Parameters
----------
max_nb_points: int
Maximum number of points to display.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
check_types([("max_nb_points", max_nb_points, [int, float],)])
if self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
):
coefficients = self.coef_.values["coefficient"]
if self.type == "LogisticRegression":
return logit_plot(
self.X,
self.y,
self.input_relation,
coefficients,
self.cursor,
max_nb_points,
ax=ax,
**style_kwds,
)
elif self.type == "LinearSVC":
return svm_classifier_plot(
self.X,
self.y,
self.input_relation,
coefficients,
self.cursor,
max_nb_points,
ax=ax,
**style_kwds,
)
else:
return regression_plot(
self.X,
self.y,
self.input_relation,
coefficients,
self.cursor,
max_nb_points,
ax=ax,
**style_kwds,
)
elif self.type in ("KMeans", "BisectingKMeans", "DBSCAN"):
if self.type != "DBSCAN":
vdf = vdf_from_relation(self.input_relation, cursor=self.cursor)
self.predict(vdf, name="kmeans_cluster")
catcol = "kmeans_cluster"
else:
vdf = vdf_from_relation(self.name, cursor=self.cursor)
catcol = "dbscan_cluster"
if 2 <= len(self.X) <= 3:
return vdf.scatter(
columns=self.X,
catcol=catcol,
max_cardinality=100,
max_nb_points=max_nb_points,
ax=ax,
**style_kwds,
)
else:
raise Exception("Clustering Plots are only available in 2D or 3D.")
elif self.type in ("PCA", "SVD"):
if 2 <= self.parameters["n_components"] or (
self.parameters["n_components"] <= 0 and len(self.X) > 1
):
X = [
"col{}".format(i + 1)
for i in range(min(max(self.parameters["n_components"], 2), 3))
]
return self.transform().scatter(
columns=X, max_nb_points=max_nb_points, ax=ax, **style_kwds,
)
else:
raise Exception("Decomposition Plots are not available in 1D")
elif self.type in ("LocalOutlierFactor"):
query = "SELECT COUNT(*) FROM {}".format(self.name)
tablesample = 100 * min(
float(max_nb_points / self.cursor.execute(query).fetchone()[0]), 1
)
return lof_plot(
self.name, self.X, "lof_score", self.cursor, 100, ax=ax, **style_kwds,
)
else:
raise FunctionError(
"Method 'plot' for '{}' doesn't exist.".format(self.type)
)
# ---#
def set_cursor(self, cursor):
"""
---------------------------------------------------------------------------
Sets a new DB cursor. It can be very usefull if the connection to the DB is
lost.
Parameters
----------
cursor: DBcursor
New cursor.
Returns
-------
model
self
"""
check_cursor(cursor)
cursor.execute("SELECT 1;")
self.cursor = cursor
return self
# ---#
def set_params(self, parameters: dict = {}):
"""
---------------------------------------------------------------------------
Sets the parameters of the model.
Parameters
----------
parameters: dict, optional
New parameters.
"""
try:
self.parameters
except:
self.parameters = {}
model_parameters = {}
default_parameters = default_model_parameters(self.type)
if self.type in ("LinearRegression", "LogisticRegression", "SARIMAX", "VAR"):
if "solver" in parameters:
check_types([("solver", parameters["solver"], [str],)])
assert str(parameters["solver"]).lower() in [
"newton",
"bfgs",
"cgd",
], ParameterError(
"Incorrect parameter 'solver'.\nThe optimizer must be in (Newton | BFGS | CGD), found '{}'.".format(
parameters["solver"]
)
)
model_parameters["solver"] = parameters["solver"]
elif "solver" not in self.parameters:
model_parameters["solver"] = default_parameters["solver"]
else:
model_parameters["solver"] = self.parameters["solver"]
if "penalty" in parameters and self.type in (
"LinearRegression",
"LogisticRegression",
):
check_types([("penalty", parameters["penalty"], [str],)])
assert str(parameters["penalty"]).lower() in [
"none",
"l1",
"l2",
"enet",
], ParameterError(
"Incorrect parameter 'penalty'.\nThe regularization must be in (None | L1 | L2 | ENet), found '{}'.".format(
parameters["penalty"]
)
)
model_parameters["penalty"] = parameters["penalty"]
elif (
self.type in ("LinearRegression", "LogisticRegression")
and "penalty" not in self.parameters
):
model_parameters["penalty"] = default_parameters["penalty"]
elif self.type in ("LinearRegression", "LogisticRegression"):
model_parameters["penalty"] = self.parameters["penalty"]
if "max_iter" in parameters:
check_types([("max_iter", parameters["max_iter"], [int, float],)])
assert 0 <= parameters["max_iter"], ParameterError(
"Incorrect parameter 'max_iter'.\nThe maximum number of iterations must be positive."
)
model_parameters["max_iter"] = parameters["max_iter"]
elif "max_iter" not in self.parameters:
model_parameters["max_iter"] = default_parameters["max_iter"]
else:
model_parameters["max_iter"] = self.parameters["max_iter"]
if "l1_ratio" in parameters and self.type in (
"LinearRegression",
"LogisticRegression",
):
check_types([("l1_ratio", parameters["l1_ratio"], [int, float],)])
assert 0 <= parameters["l1_ratio"] <= 1, ParameterError(
"Incorrect parameter 'l1_ratio'.\nThe ENet Mixture must be between 0 and 1."
)
model_parameters["l1_ratio"] = parameters["l1_ratio"]
elif (
self.type in ("LinearRegression", "LogisticRegression")
and "l1_ratio" not in self.parameters
):
model_parameters["l1_ratio"] = default_parameters["l1_ratio"]
elif self.type in ("LinearRegression", "LogisticRegression"):
model_parameters["l1_ratio"] = self.parameters["l1_ratio"]
if "C" in parameters and self.type in (
"LinearRegression",
"LogisticRegression",
):
check_types([("C", parameters["C"], [int, float],)])
assert 0 <= parameters["C"], ParameterError(
"Incorrect parameter 'C'.\nThe regularization parameter value must be positive."
)
model_parameters["C"] = parameters["C"]
elif (
self.type in ("LinearRegression", "LogisticRegression")
and "C" not in self.parameters
):
model_parameters["C"] = default_parameters["C"]
elif self.type in ("LinearRegression", "LogisticRegression"):
model_parameters["C"] = self.parameters["C"]
if "tol" in parameters:
check_types([("tol", parameters["tol"], [int, float],)])
assert 0 <= parameters["tol"], ParameterError(
"Incorrect parameter 'tol'.\nThe tolerance parameter value must be positive."
)
model_parameters["tol"] = parameters["tol"]
elif "tol" not in self.parameters:
model_parameters["tol"] = default_parameters["tol"]
else:
model_parameters["tol"] = self.parameters["tol"]
if "p" in parameters and self.type in ("SARIMAX", "VAR"):
check_types([("p", parameters["p"], [int, float],)])
assert 0 <= parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe order of the AR part must be positive."
)
model_parameters["p"] = parameters["p"]
elif self.type in ("SARIMAX", "VAR") and "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
elif self.type in ("SARIMAX", "VAR"):
model_parameters["p"] = self.parameters["p"]
if "q" in parameters and self.type == "SARIMAX":
check_types([("q", parameters["q"], [int, float],)])
assert 0 <= parameters["q"], ParameterError(
"Incorrect parameter 'q'.\nThe order of the MA part must be positive."
)
model_parameters["q"] = parameters["q"]
elif self.type == "SARIMAX" and "q" not in self.parameters:
model_parameters["q"] = default_parameters["q"]
elif self.type == "SARIMAX":
model_parameters["q"] = self.parameters["q"]
if "d" in parameters and self.type == "SARIMAX":
check_types([("d", parameters["d"], [int, float],)])
assert 0 <= parameters["d"], ParameterError(
"Incorrect parameter 'd'.\nThe order of the I part must be positive."
)
model_parameters["d"] = parameters["d"]
elif self.type == "SARIMAX" and "d" not in self.parameters:
model_parameters["d"] = default_parameters["d"]
elif self.type == "SARIMAX":
model_parameters["d"] = self.parameters["d"]
if "P" in parameters and self.type == "SARIMAX":
check_types([("P", parameters["P"], [int, float],)])
assert 0 <= parameters["P"], ParameterError(
"Incorrect parameter 'P'.\nThe seasonal order of the AR part must be positive."
)
model_parameters["P"] = parameters["P"]
elif self.type == "SARIMAX" and "P" not in self.parameters:
model_parameters["P"] = default_parameters["P"]
elif self.type == "SARIMAX":
model_parameters["P"] = self.parameters["P"]
if "Q" in parameters and self.type == "SARIMAX":
check_types([("Q", parameters["Q"], [int, float],)])
assert 0 <= parameters["Q"], ParameterError(
"Incorrect parameter 'Q'.\nThe seasonal order of the MA part must be positive."
)
model_parameters["Q"] = parameters["Q"]
elif self.type == "SARIMAX" and "Q" not in self.parameters:
model_parameters["Q"] = default_parameters["Q"]
elif self.type == "SARIMAX":
model_parameters["Q"] = self.parameters["Q"]
if "D" in parameters and self.type == "SARIMAX":
check_types([("D", parameters["D"], [int, float],)])
assert 0 <= parameters["D"], ParameterError(
"Incorrect parameter 'D'.\nThe seasonal order of the I part must be positive."
)
model_parameters["D"] = parameters["D"]
elif self.type == "SARIMAX" and "D" not in self.parameters:
model_parameters["D"] = default_parameters["D"]
elif self.type == "SARIMAX":
model_parameters["D"] = self.parameters["D"]
if "s" in parameters and self.type == "SARIMAX":
check_types([("s", parameters["s"], [int, float],)])
assert 0 <= parameters["s"], ParameterError(
"Incorrect parameter 's'.\nThe Span of the seasonality must be positive."
)
model_parameters["s"] = parameters["s"]
elif self.type == "SARIMAX" and "s" not in self.parameters:
model_parameters["s"] = default_parameters["s"]
elif self.type == "SARIMAX":
model_parameters["s"] = self.parameters["s"]
if "max_pik" in parameters and self.type == "SARIMAX":
check_types([("max_pik", parameters["max_pik"], [int, float],)])
assert 0 <= parameters["max_pik"], ParameterError(
"Incorrect parameter 'max_pik'.\nThe Maximum number of inverse MA coefficients took during the computation must be positive."
)
model_parameters["max_pik"] = parameters["max_pik"]
elif self.type == "SARIMAX" and "max_pik" not in self.parameters:
model_parameters["max_pik"] = default_parameters["max_pik"]
elif self.type == "SARIMAX":
model_parameters["max_pik"] = self.parameters["max_pik"]
if "papprox_ma" in parameters and self.type == "SARIMAX":
check_types([("papprox_ma", parameters["papprox_ma"], [int, float],)])
assert 0 <= parameters["papprox_ma"], ParameterError(
"Incorrect parameter 'papprox_ma'.\nThe Maximum number of AR(P) used to approximate the MA during the computation must be positive."
)
model_parameters["papprox_ma"] = parameters["papprox_ma"]
elif self.type == "SARIMAX" and "papprox_ma" not in self.parameters:
model_parameters["papprox_ma"] = default_parameters["papprox_ma"]
elif self.type == "SARIMAX":
model_parameters["papprox_ma"] = self.parameters["papprox_ma"]
elif self.type in ("KernelDensity"):
if "bandwidth" in parameters:
check_types([("bandwidth", parameters["bandwidth"], [int, float],)])
assert 0 <= parameters["bandwidth"], ParameterError(
"Incorrect parameter 'bandwidth'.\nThe bandwidth must be positive."
)
model_parameters["bandwidth"] = parameters["bandwidth"]
elif "bandwidth" not in self.parameters:
model_parameters["bandwidth"] = default_parameters["bandwidth"]
else:
model_parameters["bandwidth"] = self.parameters["bandwidth"]
if "kernel" in parameters:
check_types(
[
(
"kernel",
parameters["kernel"],
["gaussian", "logistic", "sigmoid", "silverman"],
)
]
)
assert parameters["kernel"] in [
"gaussian",
"logistic",
"sigmoid",
"silverman",
], ParameterError(
"Incorrect parameter 'kernel'.\nThe parameter 'kernel' must be in [gaussian|logistic|sigmoid|silverman], found '{}'.".format(
kernel
)
)
model_parameters["kernel"] = parameters["kernel"]
elif "kernel" not in self.parameters:
model_parameters["kernel"] = default_parameters["kernel"]
else:
model_parameters["kernel"] = self.parameters["kernel"]
if "max_leaf_nodes" in parameters:
check_types(
[
(
"max_leaf_nodes",
parameters["max_leaf_nodes"],
[int, float],
False,
)
]
)
assert 1 <= parameters["max_leaf_nodes"] <= 1e9, ParameterError(
"Incorrect parameter 'max_leaf_nodes'.\nThe maximum number of leaf nodes must be between 1 and 1e9, inclusive."
)
model_parameters["max_leaf_nodes"] = parameters["max_leaf_nodes"]
elif "max_leaf_nodes" not in self.parameters:
model_parameters["max_leaf_nodes"] = default_parameters[
"max_leaf_nodes"
]
else:
model_parameters["max_leaf_nodes"] = self.parameters["max_leaf_nodes"]
if "max_depth" in parameters:
check_types([("max_depth", parameters["max_depth"], [int],)])
assert 1 <= parameters["max_depth"] <= 100, ParameterError(
"Incorrect parameter 'max_depth'.\nThe maximum depth for growing each tree must be between 1 and 100, inclusive."
)
model_parameters["max_depth"] = parameters["max_depth"]
elif "max_depth" not in self.parameters:
model_parameters["max_depth"] = default_parameters["max_depth"]
else:
model_parameters["max_depth"] = self.parameters["max_depth"]
if "min_samples_leaf" in parameters:
check_types(
[
(
"min_samples_leaf",
parameters["min_samples_leaf"],
[int, float],
False,
)
]
)
assert 1 <= parameters["min_samples_leaf"] <= 1e6, ParameterError(
"Incorrect parameter 'min_samples_leaf'.\nThe minimum number of samples each branch must have after splitting a node must be between 1 and 1e6, inclusive."
)
model_parameters["min_samples_leaf"] = parameters["min_samples_leaf"]
elif "min_samples_leaf" not in self.parameters:
model_parameters["min_samples_leaf"] = default_parameters[
"min_samples_leaf"
]
else:
model_parameters["min_samples_leaf"] = self.parameters[
"min_samples_leaf"
]
if "nbins" in parameters:
check_types([("nbins", parameters["nbins"], [int, float],)])
assert 2 <= parameters["nbins"], ParameterError(
"Incorrect parameter 'nbins'.\nThe number of bins to use for continuous features must be greater than 2."
)
model_parameters["nbins"] = parameters["nbins"]
elif "nbins" not in self.parameters:
model_parameters["nbins"] = default_parameters["nbins"]
else:
model_parameters["nbins"] = self.parameters["nbins"]
if "p" in parameters:
check_types([("p", parameters["p"], [int, float],)])
assert 0 < parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe p of the p-distance must be strictly positive."
)
model_parameters["p"] = parameters["p"]
elif "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
else:
model_parameters["p"] = self.parameters["p"]
if "xlim" in parameters:
check_types([("xlim", parameters["xlim"], [list],)])
model_parameters["xlim"] = parameters["xlim"]
elif "xlim" not in self.parameters:
model_parameters["xlim"] = default_parameters["xlim"]
else:
model_parameters["xlim"] = self.parameters["xlim"]
elif self.type in ("RandomForestClassifier", "RandomForestRegressor"):
if "n_estimators" in parameters:
check_types([("n_estimators", parameters["n_estimators"], [int],)])
assert 0 <= parameters["n_estimators"] <= 1000, ParameterError(
"Incorrect parameter 'n_estimators'.\nThe number of trees must be lesser than 1000."
)
model_parameters["n_estimators"] = parameters["n_estimators"]
elif "n_estimators" not in self.parameters:
model_parameters["n_estimators"] = default_parameters["n_estimators"]
else:
model_parameters["n_estimators"] = self.parameters["n_estimators"]
if "max_features" in parameters:
check_types(
[
(
"max_features",
parameters["max_features"],
[int, float, str],
False,
)
]
)
if isinstance(parameters["max_features"], str):
assert str(parameters["max_features"]).lower() in [
"max",
"auto",
], ParameterError(
"Incorrect parameter 'init'.\nThe maximum number of features to test must be in (max | auto) or an integer, found '{}'.".format(
parameters["max_features"]
)
)
model_parameters["max_features"] = parameters["max_features"]
elif "max_features" not in self.parameters:
model_parameters["max_features"] = default_parameters["max_features"]
else:
model_parameters["max_features"] = self.parameters["max_features"]
if "max_leaf_nodes" in parameters:
check_types(
[
(
"max_leaf_nodes",
parameters["max_leaf_nodes"],
[int, float],
False,
)
]
)
assert 1 <= parameters["max_leaf_nodes"] <= 1e9, ParameterError(
"Incorrect parameter 'max_leaf_nodes'.\nThe maximum number of leaf nodes must be between 1 and 1e9, inclusive."
)
model_parameters["max_leaf_nodes"] = parameters["max_leaf_nodes"]
elif "max_leaf_nodes" not in self.parameters:
model_parameters["max_leaf_nodes"] = default_parameters[
"max_leaf_nodes"
]
else:
model_parameters["max_leaf_nodes"] = self.parameters["max_leaf_nodes"]
if "sample" in parameters:
check_types([("sample", parameters["sample"], [int, float],)])
assert 0 <= parameters["sample"] <= 1, ParameterError(
"Incorrect parameter 'sample'.\nThe portion of the input data set that is randomly picked for training each tree must be between 0.0 and 1.0, inclusive."
)
model_parameters["sample"] = parameters["sample"]
elif "sample" not in self.parameters:
model_parameters["sample"] = default_parameters["sample"]
else:
model_parameters["sample"] = self.parameters["sample"]
if "max_depth" in parameters:
check_types([("max_depth", parameters["max_depth"], [int],)])
assert 1 <= parameters["max_depth"] <= 100, ParameterError(
"Incorrect parameter 'max_depth'.\nThe maximum depth for growing each tree must be between 1 and 100, inclusive."
)
model_parameters["max_depth"] = parameters["max_depth"]
elif "max_depth" not in self.parameters:
model_parameters["max_depth"] = default_parameters["max_depth"]
else:
model_parameters["max_depth"] = self.parameters["max_depth"]
if "min_samples_leaf" in parameters:
check_types(
[
(
"min_samples_leaf",
parameters["min_samples_leaf"],
[int, float],
False,
)
]
)
assert 1 <= parameters["min_samples_leaf"] <= 1e6, ParameterError(
"Incorrect parameter 'min_samples_leaf'.\nThe minimum number of samples each branch must have after splitting a node must be between 1 and 1e6, inclusive."
)
model_parameters["min_samples_leaf"] = parameters["min_samples_leaf"]
elif "min_samples_leaf" not in self.parameters:
model_parameters["min_samples_leaf"] = default_parameters[
"min_samples_leaf"
]
else:
model_parameters["min_samples_leaf"] = self.parameters[
"min_samples_leaf"
]
if "min_info_gain" in parameters:
check_types(
[
(
"min_info_gain",
parameters["min_info_gain"],
[int, float],
False,
)
]
)
assert 0 <= parameters["min_info_gain"] <= 1, ParameterError(
"Incorrect parameter 'min_info_gain'.\nThe minimum threshold for including a split must be between 0.0 and 1.0, inclusive."
)
model_parameters["min_info_gain"] = parameters["min_info_gain"]
elif "min_info_gain" not in self.parameters:
model_parameters["min_info_gain"] = default_parameters["min_info_gain"]
else:
model_parameters["min_info_gain"] = self.parameters["min_info_gain"]
if "nbins" in parameters:
check_types([("nbins", parameters["nbins"], [int, float],)])
assert 2 <= parameters["nbins"] <= 1000, ParameterError(
"Incorrect parameter 'nbins'.\nThe number of bins to use for continuous features must be between 2 and 1000, inclusive."
)
model_parameters["nbins"] = parameters["nbins"]
elif "nbins" not in self.parameters:
model_parameters["nbins"] = default_parameters["nbins"]
else:
model_parameters["nbins"] = self.parameters["nbins"]
elif self.type in ("NaiveBayes",):
if "alpha" in parameters:
check_types([("alpha", parameters["alpha"], [int, float],)])
assert 0 <= parameters["alpha"], ParameterError(
"Incorrect parameter 'alpha'.\nThe smoothing factor must be positive."
)
model_parameters["alpha"] = parameters["alpha"]
elif "alpha" not in self.parameters:
model_parameters["alpha"] = default_parameters["alpha"]
else:
model_parameters["alpha"] = self.parameters["alpha"]
if "nbtype" in parameters:
check_types([("nbtype", parameters["nbtype"], [str],)])
if isinstance(parameters["nbtype"], str):
assert str(parameters["nbtype"]).lower() in [
"bernoulli",
"categorical",
"multinomial",
"gaussian",
"auto",
], ParameterError(
"Incorrect parameter 'nbtype'.\nThe Naive Bayes type must be in (bernoulli | categorical | multinomial | gaussian | auto), found '{}'.".format(
parameters["init"]
)
)
model_parameters["nbtype"] = parameters["nbtype"]
elif "nbtype" not in self.parameters:
model_parameters["nbtype"] = default_parameters["nbtype"]
else:
model_parameters["nbtype"] = self.parameters["nbtype"]
elif self.type in ("KMeans", "BisectingKMeans"):
if "max_iter" in parameters:
check_types([("max_iter", parameters["max_iter"], [int, float],)])
assert 0 <= parameters["max_iter"], ParameterError(
"Incorrect parameter 'max_iter'.\nThe maximum number of iterations must be positive."
)
model_parameters["max_iter"] = parameters["max_iter"]
elif "max_iter" not in self.parameters:
model_parameters["max_iter"] = default_parameters["max_iter"]
else:
model_parameters["max_iter"] = self.parameters["max_iter"]
if "tol" in parameters:
check_types([("tol", parameters["tol"], [int, float],)])
assert 0 <= parameters["tol"], ParameterError(
"Incorrect parameter 'tol'.\nThe tolerance parameter value must be positive."
)
model_parameters["tol"] = parameters["tol"]
elif "tol" not in self.parameters:
model_parameters["tol"] = default_parameters["tol"]
else:
model_parameters["tol"] = self.parameters["tol"]
if "n_cluster" in parameters:
check_types([("n_cluster", parameters["n_cluster"], [int, float],)])
assert 1 <= parameters["n_cluster"] <= 10000, ParameterError(
"Incorrect parameter 'n_cluster'.\nThe number of clusters must be between 1 and 10000, inclusive."
)
model_parameters["n_cluster"] = parameters["n_cluster"]
elif "n_cluster" not in self.parameters:
model_parameters["n_cluster"] = default_parameters["n_cluster"]
else:
model_parameters["n_cluster"] = self.parameters["n_cluster"]
if "init" in parameters:
check_types([("init", parameters["init"], [str, list],)])
if isinstance(parameters["init"], str):
if self.type in ("BisectingKMeans",):
assert str(parameters["init"]).lower() in [
"random",
"kmeanspp",
"pseudo",
], ParameterError(
"Incorrect parameter 'init'.\nThe initialization method of the clusters must be in (random | kmeanspp | pseudo) or a list of the initial clusters position, found '{}'.".format(
parameters["init"]
)
)
else:
assert str(parameters["init"]).lower() in [
"random",
"kmeanspp",
], ParameterError(
"Incorrect parameter 'init'.\nThe initialization method of the clusters must be in (random | kmeanspp) or a list of the initial clusters position, found '{}'.".format(
parameters["init"]
)
)
model_parameters["init"] = parameters["init"]
elif "init" not in self.parameters:
model_parameters["init"] = default_parameters["init"]
else:
model_parameters["init"] = self.parameters["init"]
if "bisection_iterations" in parameters:
check_types(
[
(
"bisection_iterations",
parameters["bisection_iterations"],
[int, float],
False,
)
]
)
assert (
1 <= parameters["bisection_iterations"] <= 1000000
), ParameterError(
"Incorrect parameter 'bisection_iterations'.\nThe number of iterations the bisecting k-means algorithm performs for each bisection step must be between 1 and 1e6, inclusive."
)
model_parameters["bisection_iterations"] = parameters[
"bisection_iterations"
]
elif (
self.type == "BisectingKMeans"
and "bisection_iterations" not in self.parameters
):
model_parameters["bisection_iterations"] = default_parameters[
"bisection_iterations"
]
elif self.type == "BisectingKMeans":
model_parameters["bisection_iterationss"] = self.parameters[
"bisection_iterations"
]
if "split_method" in parameters:
check_types([("split_method", parameters["split_method"], [str],)])
assert str(parameters["split_method"]).lower() in [
"size",
"sum_squares",
], ParameterError(
"Incorrect parameter 'split_method'.\nThe split method must be in (size | sum_squares), found '{}'.".format(
parameters["split_method"]
)
)
model_parameters["split_method"] = parameters["split_method"]
elif (
self.type == "BisectingKMeans" and "split_method" not in self.parameters
):
model_parameters["split_method"] = default_parameters["split_method"]
elif self.type == "BisectingKMeans":
model_parameters["split_method"] = self.parameters["split_method"]
if "min_divisible_cluster_size" in parameters:
check_types(
[
(
"min_divisible_cluster_size",
parameters["min_divisible_cluster_size"],
[int, float],
False,
)
]
)
assert 2 <= parameters["min_divisible_cluster_size"], ParameterError(
"Incorrect parameter 'min_divisible_cluster_size'.\nThe minimum number of points of a divisible cluster must be greater than or equal to 2."
)
model_parameters["min_divisible_cluster_size"] = parameters[
"min_divisible_cluster_size"
]
elif (
self.type == "BisectingKMeans"
and "min_divisible_cluster_size" not in self.parameters
):
model_parameters["min_divisible_cluster_size"] = default_parameters[
"min_divisible_cluster_size"
]
elif self.type == "BisectingKMeans":
model_parameters["min_divisible_cluster_size"] = self.parameters[
"min_divisible_cluster_size"
]
if "distance_method" in parameters:
check_types(
[("distance_method", parameters["distance_method"], [str],)]
)
assert str(parameters["distance_method"]).lower() in [
"euclidean"
], ParameterError(
"Incorrect parameter 'distance_method'.\nThe distance method must be in (euclidean), found '{}'.".format(
parameters["distance_method"]
)
)
model_parameters["distance_method"] = parameters["distance_method"]
elif (
self.type == "BisectingKMeans"
and "distance_method" not in self.parameters
):
model_parameters["distance_method"] = default_parameters[
"distance_method"
]
elif self.type == "BisectingKMeans":
model_parameters["distance_method"] = self.parameters["distance_method"]
elif self.type in ("LinearSVC", "LinearSVR"):
if "tol" in parameters:
check_types([("tol", parameters["tol"], [int, float],)])
assert 0 <= parameters["tol"], ParameterError(
"Incorrect parameter 'tol'.\nThe tolerance parameter value must be positive."
)
model_parameters["tol"] = parameters["tol"]
elif "tol" not in self.parameters:
model_parameters["tol"] = default_parameters["tol"]
else:
model_parameters["tol"] = self.parameters["tol"]
if "C" in parameters:
check_types([("C", parameters["C"], [int, float],)])
assert 0 <= parameters["C"], ParameterError(
"Incorrect parameter 'C'.\nThe weight for misclassification cost must be positive."
)
model_parameters["C"] = parameters["C"]
elif "C" not in self.parameters:
model_parameters["C"] = default_parameters["C"]
else:
model_parameters["C"] = self.parameters["C"]
if "max_iter" in parameters:
check_types([("max_iter", parameters["max_iter"], [int, float],)])
assert 0 <= parameters["max_iter"], ParameterError(
"Incorrect parameter 'max_iter'.\nThe maximum number of iterations must be positive."
)
model_parameters["max_iter"] = parameters["max_iter"]
elif "max_iter" not in self.parameters:
model_parameters["max_iter"] = default_parameters["max_iter"]
else:
model_parameters["max_iter"] = self.parameters["max_iter"]
if "fit_intercept" in parameters:
check_types([("fit_intercept", parameters["fit_intercept"], [bool],)])
model_parameters["fit_intercept"] = parameters["fit_intercept"]
elif "fit_intercept" not in self.parameters:
model_parameters["fit_intercept"] = default_parameters["fit_intercept"]
else:
model_parameters["fit_intercept"] = self.parameters["fit_intercept"]
if "intercept_scaling" in parameters:
check_types(
[
(
"intercept_scaling",
parameters["intercept_scaling"],
[float],
False,
)
]
)
assert 0 <= parameters["intercept_scaling"], ParameterError(
"Incorrect parameter 'intercept_scaling'.\nThe Intercept Scaling parameter value must be positive."
)
model_parameters["intercept_scaling"] = parameters["intercept_scaling"]
elif "intercept_scaling" not in self.parameters:
model_parameters["intercept_scaling"] = default_parameters[
"intercept_scaling"
]
else:
model_parameters["intercept_scaling"] = self.parameters[
"intercept_scaling"
]
if "intercept_mode" in parameters:
check_types([("intercept_mode", parameters["intercept_mode"], [str],)])
assert str(parameters["intercept_mode"]).lower() in [
"regularized",
"unregularized",
], ParameterError(
"Incorrect parameter 'intercept_mode'.\nThe Intercept Mode must be in (size | sum_squares), found '{}'.".format(
parameters["intercept_mode"]
)
)
model_parameters["intercept_mode"] = parameters["intercept_mode"]
elif "intercept_mode" not in self.parameters:
model_parameters["intercept_mode"] = default_parameters[
"intercept_mode"
]
else:
model_parameters["intercept_mode"] = self.parameters["intercept_mode"]
if ("class_weight" in parameters) and self.type in ("LinearSVC"):
check_types(
[("class_weight", parameters["class_weight"], [list, tuple],)]
)
model_parameters["class_weight"] = parameters["class_weight"]
elif self.type in ("LinearSVC",) and "class_weight" not in self.parameters:
model_parameters["class_weight"] = default_parameters["class_weight"]
elif self.type in ("LinearSVC",):
model_parameters["class_weight"] = self.parameters["class_weight"]
if ("acceptable_error_margin" in parameters) and self.type in ("LinearSVR"):
check_types(
[
(
"acceptable_error_margin",
parameters["acceptable_error_margin"],
[int, float],
False,
)
]
)
assert 0 <= parameters["acceptable_error_margin"], ParameterError(
"Incorrect parameter 'acceptable_error_margin'.\nThe Acceptable Error Margin parameter value must be positive."
)
model_parameters["acceptable_error_margin"] = parameters[
"acceptable_error_margin"
]
elif (
self.type in ("LinearSVR",)
and "acceptable_error_margin" not in self.parameters
):
model_parameters["acceptable_error_margin"] = default_parameters[
"acceptable_error_margin"
]
elif self.type in ("LinearSVR",):
model_parameters["acceptable_error_margin"] = self.parameters[
"acceptable_error_margin"
]
elif self.type in ("PCA", "SVD"):
if ("scale" in parameters) and self.type in ("PCA"):
check_types([("scale", parameters["scale"], [bool],)])
model_parameters["scale"] = parameters["scale"]
elif self.type in ("PCA",) and "scale" not in self.parameters:
model_parameters["scale"] = default_parameters["scale"]
elif self.type in ("PCA",):
model_parameters["scale"] = self.parameters["scale"]
if "method" in parameters:
check_types([("method", parameters["method"], [str],)])
assert str(parameters["method"]).lower() in ["lapack"], ParameterError(
"Incorrect parameter 'method'.\nThe decomposition method must be in (lapack), found '{}'.".format(
parameters["method"]
)
)
model_parameters["method"] = parameters["method"]
elif "method" not in self.parameters:
model_parameters["method"] = default_parameters["method"]
else:
model_parameters["method"] = self.parameters["method"]
if "n_components" in parameters:
check_types(
[("n_components", parameters["n_components"], [int, float],)]
)
assert 0 <= parameters["n_components"], ParameterError(
"Incorrect parameter 'n_components'.\nThe number of components must be positive. If it is equal to 0, all the components will be considered."
)
model_parameters["n_components"] = parameters["n_components"]
elif "n_components" not in self.parameters:
model_parameters["n_components"] = default_parameters["n_components"]
else:
model_parameters["n_components"] = self.parameters["n_components"]
elif self.type in ("OneHotEncoder",):
if "extra_levels" in parameters:
check_types([("extra_levels", parameters["extra_levels"], [dict],)])
model_parameters["extra_levels"] = parameters["extra_levels"]
elif "extra_levels" not in self.parameters:
model_parameters["extra_levels"] = default_parameters["extra_levels"]
else:
model_parameters["extra_levels"] = self.parameters["extra_levels"]
if "drop_first" in parameters:
check_types([("drop_first", parameters["drop_first"], [bool],)])
model_parameters["drop_first"] = parameters["drop_first"]
elif "drop_first" not in self.parameters:
model_parameters["drop_first"] = default_parameters["drop_first"]
else:
model_parameters["drop_first"] = self.parameters["drop_first"]
if "ignore_null" in parameters:
check_types([("ignore_null", parameters["ignore_null"], [bool],)])
model_parameters["ignore_null"] = parameters["ignore_null"]
elif "ignore_null" not in self.parameters:
model_parameters["ignore_null"] = default_parameters["ignore_null"]
else:
model_parameters["ignore_null"] = self.parameters["ignore_null"]
if "separator" in parameters:
check_types([("separator", parameters["separator"], [str],)])
model_parameters["separator"] = parameters["separator"]
elif "separator" not in self.parameters:
model_parameters["separator"] = default_parameters["separator"]
else:
model_parameters["separator"] = self.parameters["separator"]
if "null_column_name" in parameters:
check_types(
[("null_column_name", parameters["null_column_name"], [str],)]
)
model_parameters["null_column_name"] = parameters["null_column_name"]
elif "null_column_name" not in self.parameters:
model_parameters["null_column_name"] = default_parameters[
"null_column_name"
]
else:
model_parameters["null_column_name"] = self.parameters[
"null_column_name"
]
if "column_naming" in parameters:
check_types([("column_naming", parameters["column_naming"], [str],)])
assert str(parameters["column_naming"]).lower() in [
"indices",
"values",
"values_relaxed",
], ParameterError(
"Incorrect parameter 'column_naming'.\nThe column_naming method must be in (indices | values | values_relaxed), found '{}'.".format(
parameters["column_naming"]
)
)
model_parameters["column_naming"] = parameters["column_naming"]
elif "column_naming" not in self.parameters:
model_parameters["column_naming"] = default_parameters["column_naming"]
else:
model_parameters["column_naming"] = self.parameters["column_naming"]
elif self.type in ("Normalizer",):
if "method" in parameters:
check_types([("method", parameters["method"], [str],)])
assert str(parameters["method"]).lower() in [
"zscore",
"robust_zscore",
"minmax",
], ParameterError(
"Incorrect parameter 'method'.\nThe normalization method must be in (zscore | robust_zscore | minmax), found '{}'.".format(
parameters["method"]
)
)
model_parameters["method"] = parameters["method"]
elif "method" not in self.parameters:
model_parameters["method"] = default_parameters["method"]
else:
model_parameters["method"] = self.parameters["method"]
elif self.type in ("DBSCAN",):
if "eps" in parameters:
check_types([("eps", parameters["eps"], [int, float],)])
assert 0 < parameters["eps"], ParameterError(
"Incorrect parameter 'eps'.\nThe radius of a neighborhood must be strictly positive."
)
model_parameters["eps"] = parameters["eps"]
elif "eps" not in self.parameters:
model_parameters["eps"] = default_parameters["eps"]
else:
model_parameters["eps"] = self.parameters["eps"]
if "p" in parameters:
check_types([("p", parameters["p"], [int, float],)])
assert 0 < parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe p of the p-distance must be strictly positive."
)
model_parameters["p"] = parameters["p"]
elif "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
else:
model_parameters["p"] = self.parameters["p"]
if "min_samples" in parameters:
check_types([("min_samples", parameters["min_samples"], [int, float],)])
assert 0 < parameters["min_samples"], ParameterError(
"Incorrect parameter 'min_samples'.\nThe minimum number of points required to form a dense region must be strictly positive."
)
model_parameters["min_samples"] = parameters["min_samples"]
elif "min_samples" not in self.parameters:
model_parameters["min_samples"] = default_parameters["min_samples"]
else:
model_parameters["min_samples"] = self.parameters["min_samples"]
elif self.type in (
"NearestCentroid",
"KNeighborsClassifier",
"KNeighborsRegressor",
"LocalOutlierFactor",
):
if "p" in parameters:
check_types([("p", parameters["p"], [int, float],)])
assert 0 < parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe p of the p-distance must be strictly positive."
)
model_parameters["p"] = parameters["p"]
elif "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
else:
model_parameters["p"] = self.parameters["p"]
if ("n_neighbors" in parameters) and (self.type != "NearestCentroid"):
check_types([("n_neighbors", parameters["n_neighbors"], [int, float],)])
assert 0 < parameters["n_neighbors"], ParameterError(
"Incorrect parameter 'n_neighbors'.\nThe number of neighbors must be strictly positive."
)
model_parameters["n_neighbors"] = parameters["n_neighbors"]
elif (
self.type != "NearestCentroid" and "n_neighbors" not in self.parameters
):
model_parameters["n_neighbors"] = default_parameters["n_neighbors"]
elif self.type != "NearestCentroid":
model_parameters["n_neighbors"] = self.parameters["n_neighbors"]
elif self.type in ("CountVectorizer",):
if "max_df" in parameters:
check_types([("max_df", parameters["max_df"], [int, float],)])
assert 0 <= parameters["max_df"] <= 1, ParameterError(
"Incorrect parameter 'max_df'.\nIt must be between 0 and 1, inclusive."
)
model_parameters["max_df"] = parameters["max_df"]
elif "max_df" not in self.parameters:
model_parameters["max_df"] = default_parameters["max_df"]
else:
model_parameters["max_df"] = self.parameters["max_df"]
if "min_df" in parameters:
check_types([("min_df", parameters["min_df"], [int, float],)])
assert 0 <= parameters["min_df"] <= 1, ParameterError(
"Incorrect parameter 'min_df'.\nIt must be between 0 and 1, inclusive."
)
model_parameters["min_df"] = parameters["min_df"]
elif "min_df" not in self.parameters:
model_parameters["min_df"] = default_parameters["min_df"]
else:
model_parameters["min_df"] = self.parameters["min_df"]
if "lowercase" in parameters:
check_types([("lowercase", parameters["lowercase"], [bool],)])
model_parameters["lowercase"] = parameters["lowercase"]
elif "lowercase" not in self.parameters:
model_parameters["lowercase"] = default_parameters["lowercase"]
else:
model_parameters["lowercase"] = self.parameters["lowercase"]
if "ignore_special" in parameters:
check_types([("ignore_special", parameters["ignore_special"], [bool],)])
model_parameters["ignore_special"] = parameters["ignore_special"]
elif "ignore_special" not in self.parameters:
model_parameters["ignore_special"] = default_parameters[
"ignore_special"
]
else:
model_parameters["ignore_special"] = self.parameters["ignore_special"]
if "max_text_size" in parameters:
check_types(
[
(
"max_text_size",
parameters["max_text_size"],
[int, float],
False,
)
]
)
assert 0 < parameters["max_text_size"], ParameterError(
"Incorrect parameter 'max_text_size'.\nThe maximum text size must be positive."
)
model_parameters["max_text_size"] = parameters["max_text_size"]
elif "max_text_size" not in self.parameters:
model_parameters["max_text_size"] = default_parameters["max_text_size"]
else:
model_parameters["max_text_size"] = self.parameters["max_text_size"]
if "max_features" in parameters:
check_types(
[("max_features", parameters["max_features"], [int, float],)]
)
model_parameters["max_features"] = parameters["max_features"]
elif "max_features" not in self.parameters:
model_parameters["max_features"] = default_parameters["max_features"]
else:
model_parameters["max_features"] = self.parameters["max_features"]
from verticapy.learn.linear_model import Lasso, Ridge, LinearRegression
from verticapy.learn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
DummyTreeClassifier,
DummyTreeRegressor,
)
if isinstance(self, Lasso):
model_parameters["penalty"] = "l1"
if "l1_ratio" in model_parameters:
del model_parameters["l1_ratio"]
elif isinstance(self, Ridge):
model_parameters["penalty"] = "l2"
if "l1_ratio" in model_parameters:
del model_parameters["l1_ratio"]
elif isinstance(self, LinearRegression):
model_parameters["penalty"] = "none"
if "l1_ratio" in model_parameters:
del model_parameters["l1_ratio"]
if "C" in model_parameters:
del model_parameters["C"]
elif isinstance(
self,
(
DecisionTreeClassifier,
DecisionTreeRegressor,
DummyTreeClassifier,
DummyTreeRegressor,
),
):
model_parameters["n_estimators"] = 1
model_parameters["sample"] = 1.0
if isinstance(self, (DummyTreeClassifier, DummyTreeRegressor)):
model_parameters["max_features"] = "max"
model_parameters["max_leaf_nodes"] = 1e9
model_parameters["max_depth"] = 100
model_parameters["min_samples_leaf"] = 1
model_parameters["min_info_gain"] = 0.0
self.parameters = model_parameters
# ---#
def shapExplainer(self):
"""
---------------------------------------------------------------------------
Creates the Model shapExplainer. Only linear models are supported.
Returns
-------
shap.Explainer
the shap Explainer.
"""
try:
import shap
except:
raise ImportError(
"The shap module seems to not be installed in your environment.\nTo be able to use this method, you'll have to install it.\n[Tips] Run: 'pip3 install shap' in your terminal to install the module."
)
if self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
):
vdf = vdf_from_relation(self.input_relation, cursor=self.cursor)
cov_matrix = vdf.cov(self.X, show=False)
if len(self.X) == 1:
cov_matrix = np.array([[1]])
elif len(self.X) == 2:
cov_matrix = np.array([[1, cov_matrix], [cov_matrix, 1]])
else:
cov_matrix = cov_matrix.to_numpy()
data = (vdf.avg(self.X).to_numpy(), cov_matrix)
model = self.to_sklearn()
with warnings.catch_warnings(record=True) as w:
return shap.LinearExplainer(
model, data, feature_perturbation="correlation_dependent"
)
else:
raise FunctionError(
"The method 'to_shapExplainer' is not available for model type '{}'.".format(
self.type
)
)
# ---#
def to_sklearn(self):
"""
---------------------------------------------------------------------------
Converts the Vertica Model to sklearn model.
Returns
-------
object
sklearn model.
"""
import verticapy.learn.linear_model as lm
import verticapy.learn.svm as svm
import verticapy.learn.naive_bayes as vnb
import verticapy.learn.cluster as vcl
import verticapy.learn.ensemble as vens
import verticapy.learn.neighbors as vng
import verticapy.learn.preprocessing as vpp
import verticapy.learn.decomposition as vdcp
try:
import sklearn
except:
raise ImportError(
"The scikit-learn module seems to not be installed in your environment.\nTo be able to use this method, you'll have to install it.\n[Tips] Run: 'pip3 install scikit-learn' in your terminal to install the module."
)
params = self.get_params()
if self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
):
import sklearn.linear_model as sklm
import sklearn.svm as sksvm
if isinstance(self, lm.LinearRegression):
model = sklm.LinearRegression()
elif isinstance(self, lm.ElasticNet):
model = sklm.ElasticNet(
alpha=params["C"],
l1_ratio=params["l1_ratio"],
max_iter=params["max_iter"],
tol=params["tol"],
)
elif isinstance(self, lm.Lasso):
model = sklm.Lasso(max_iter=params["max_iter"], tol=params["tol"],)
elif isinstance(self, lm.Ridge):
model = sklm.Ridge(max_iter=params["max_iter"], tol=params["tol"],)
elif isinstance(self, lm.LogisticRegression):
if "C" not in params:
params["C"] = 1.0
if "l1_ratio" not in params:
params["l1_ratio"] = None
model = sklm.LogisticRegression(
penalty=params["penalty"].lower(),
C=float(1 / params["C"]),
l1_ratio=params["l1_ratio"],
max_iter=params["max_iter"],
tol=params["tol"],
)
elif isinstance(self, svm.LinearSVC):
if params["intercept_mode"] == "regularized":
params["penalty"] = "l2"
else:
params["penalty"] = "l1"
model = sksvm.LinearSVC(
penalty=params["penalty"],
C=params["C"],
fit_intercept=params["fit_intercept"],
intercept_scaling=params["intercept_scaling"],
max_iter=params["max_iter"],
tol=params["tol"],
)
elif isinstance(self, svm.LinearSVR):
if params["intercept_mode"] == "regularized":
params["loss"] = "epsilon_insensitive"
else:
params["loss"] = "squared_epsilon_insensitive"
model = sksvm.LinearSVR(
loss=params["loss"],
C=params["C"],
fit_intercept=params["fit_intercept"],
intercept_scaling=params["intercept_scaling"],
max_iter=params["max_iter"],
tol=params["tol"],
)
if isinstance(self, (lm.LogisticRegression, svm.LinearSVC)):
model.classes_ = np.array([0, 1])
model.coef_ = np.array([self.coef_["coefficient"][1:]])
model.intercept_ = self.coef_["coefficient"][0]
try:
model.n_iter_ = self.get_attr("iteration_count")["iteration_count"][0]
except:
model.n_iter_ = 1
elif self.type in ("Normalizer", "OneHotEncoder"):
import sklearn.preprocessing as skpp
if isinstance(self, (vpp.Normalizer,)):
attr = self.get_attr("details")
if "avg" in attr.values:
model = skpp.StandardScaler()
model.mean_ = np.array(attr["avg"])
model.scale_ = np.array(attr["std_dev"])
model.var_ = model.scale_ ** 2
model.n_features_in_ = len(self.X)
model.n_samples_seen_ = np.array(
vdf_from_relation(
self.input_relation, cursor=self.cursor
).count(columns=self.X)["count"]
)
elif "median" in attr.values:
model = skpp.RobustScaler()
model.center_ = np.array(attr["median"])
model.scale_ = np.array(attr["mad"])
model.n_features_in_ = len(self.X)
elif "min" in attr.values:
model = skpp.MinMaxScaler()
model.data_min_ = np.array(attr["min"])
model.data_max_ = np.array(attr["max"])
model.data_range_ = np.array(attr["max"]) - np.array(attr["min"])
model.scale_ = 1 / model.data_range_
model.min_ = 0 - model.data_min_ * model.scale_
model.n_features_in_ = len(self.X)
self.cursor.execute(
"SELECT COUNT(*) FROM {} WHERE {}".format(
self.input_relation,
" AND ".join(
["{} IS NOT NULL".format(elem) for elem in self.X]
),
)
)
model.n_samples_seen_ = self.cursor.fetchone()[0]
elif isinstance(self, (vpp.OneHotEncoder,)):
drop = None
model = skpp.OneHotEncoder()
model.drop_idx_ = None
if self.parameters["drop_first"]:
model.drop_idx_ = np.array([0 for elem in range(len(self.X))])
params = self.param_
vdf = vdf_from_relation(self.input_relation, cursor=self.cursor)
categories = []
for column in self.X:
idx = []
for i in range(len(params["category_name"])):
if str_column(params["category_name"][i]) == str_column(
column
) and (
not (self.parameters["ignore_null"])
or params["category_level"][i] != None
):
idx += [i]
cat_tmp = []
for j, i in enumerate(idx):
elem = params["category_level"][i]
if vdf[column].dtype() == "int":
try:
elem = int(elem)
except:
pass
cat_tmp += [elem]
categories += [np.array(cat_tmp)]
model.categories_ = categories
elif self.type in ("PCA", "SVD"):
import sklearn.decomposition as skdcp
if isinstance(self, (vdcp.PCA,)):
model = skdcp.PCA(n_components=params["n_components"])
model.components_ = []
all_pc = self.get_attr("principal_components")
for idx, elem in enumerate(all_pc.values):
if idx > 0:
model.components_ += [np.array(all_pc.values[elem])]
model.components_ = np.array(model.components_)
model.explained_variance_ratio_ = np.array(
self.get_attr("singular_values")["explained_variance"]
)
model.explained_variance_ = np.array(
self.get_attr("singular_values")["explained_variance"]
)
model.singular_values_ = np.array(
self.get_attr("singular_values")["value"]
)
model.mean_ = np.array(self.get_attr("columns")["mean"])
model.n_components_ = params["n_components"]
model.n_features_ = len(self.X)
model.n_samples_ = self.get_attr("counters")["counter_value"][0]
model.noise_variance_ = 0.0
elif isinstance(self, (vdcp.SVD,)):
model = skdcp.TruncatedSVD(n_components=params["n_components"])
model.components_ = []
all_pc = self.get_attr("right_singular_vectors")
for idx, elem in enumerate(all_pc.values):
if idx > 0:
model.components_ += [np.array(all_pc.values[elem])]
model.components_ = np.array(model.components_)
model.explained_variance_ratio_ = np.array(
self.get_attr("singular_values")["explained_variance"]
)
model.explained_variance_ = np.array(
self.get_attr("singular_values")["explained_variance"]
)
model.singular_values_ = np.array(
self.get_attr("singular_values")["value"]
)
for i in range(len(model.components_)):
for j in range(len(model.components_[0])):
model.components_[i][j] /= model.singular_values_[i]
elif self.type in ("NaiveBayes",):
import sklearn.naive_bayes as sknb
if isinstance(self, (vnb.NaiveBayes,)):
all_attr = self.get_attr()
current_type = None
for elem in all_attr["attr_name"][6:]:
if current_type is None:
if "gaussian" in elem.lower():
current_type = "gaussian"
elif "multinomial" in elem.lower():
current_type = "multinomial"
elif "bernoulli" in elem.lower():
current_type = "bernoulli"
else:
current_type = "categorical"
elif current_type not in elem.lower():
raise ModelError(
"Naive Bayes Models using different variables types (multinomial, categorical, gaussian...) is not supported by Scikit Learn."
)
self.cursor.execute(
"SELECT COUNT(*) FROM {} WHERE {}".format(
self.input_relation,
" AND ".join(
["{} IS NOT NULL".format(elem) for elem in self.X]
),
)
)
total_count = self.cursor.fetchone()[0]
classes = np.array(self.get_attr("prior")["class"])
class_prior = np.array(self.get_attr("prior")["probability"])
if current_type == "gaussian":
model = sknb.GaussianNB()
model.epsilon_ = 0.0
model.sigma_, model.theta_ = [], []
for elem in classes:
model.sigma_ += [
self.get_attr("gaussian.{}".format(elem))["sigma_sq"]
]
model.theta_ += [
self.get_attr("gaussian.{}".format(elem))["mu"]
]
model.sigma_, model.theta_ = (
np.array(model.sigma_),
np.array(model.theta_),
)
model.class_prior_ = class_prior
elif current_type in ("multinomial", "bernoulli"):
if current_type == "multinomial":
model = sknb.MultinomialNB(alpha=params["alpha"])
else:
model = sknb.BernoulliNB(alpha=params["alpha"])
model.class_log_prior_ = np.log(class_prior)
model.n_features_ = len(self.X)
model.feature_count_, model.feature_log_prob_ = [], []
for elem in classes:
model.feature_count_ += [
self.get_attr("{}.{}".format(current_type, elem))[
"probability"
]
]
model.feature_log_prob_ += [
self.get_attr("{}.{}".format(current_type, elem))[
"probability"
]
]
model.feature_count_, model.feature_log_prob_ = (
(total_count * np.array(model.feature_count_)).astype(int),
np.log(np.array(model.feature_log_prob_)),
)
elif current_type == "categorical":
model = sknb.CategoricalNB(alpha=params["alpha"])
model.class_log_prior_ = np.log(class_prior)
model.n_features_ = len(self.X)
model.feature_log_prob_, model.category_count_ = [], []
for elem in self.get_attr("details")["predictor"]:
if str_column(elem) != str_column(self.y):
column_class = []
categorical = self.get_attr("categorical.{}".format(elem))
for idx in classes:
column_class += [categorical[idx]]
model.feature_log_prob_ += [np.log(np.array(column_class))]
model.category_count_ += [np.array(column_class)]
for idx in range(len(model.category_count_)):
for i in range(len(model.category_count_[idx])):
for j in range(len(model.category_count_[idx][i])):
model.category_count_[idx][i][j] = int(
model.category_count_[idx][i][j]
* class_prior[i]
* total_count
)
model.classes_ = classes
model.class_count_ = (total_count * class_prior).astype(int)
elif self.type in ("NearestCentroid",):
import sklearn.neighbors as skng
if isinstance(self, (vng.NearestCentroid,)):
if params["p"] == 1:
metric = "manhattan"
elif params["p"] == 2:
metric = "euclidean"
else:
raise ModelError(
"Model Conversion failed. NearestCentroid using parameter 'p' > 2 is not supported."
)
model = skng.NearestCentroid(metric=metric,)
model.classes_ = np.array(self.classes_)
model.centroids_ = []
for i in range(len(self.classes_)):
raw = []
for idx, elem in enumerate(self.X):
raw += [self.centroids_[elem][i]]
model.centroids_ += [raw]
model.centroids_ = np.array(model.centroids_)
elif self.type in ("KMeans"):
import sklearn.cluster as skcl
if isinstance(self, (vcl.KMeans,)):
if params["init"] == "kmeanspp":
params["init"] = "k-means++"
model = skcl.KMeans(
n_clusters=params["n_cluster"],
init=params["init"],
max_iter=params["max_iter"],
tol=params["tol"],
)
centers_attribute = self.get_attr("centers").values
model.cluster_centers_ = []
for i in range(params["n_cluster"]):
model.cluster_centers_ += [
[centers_attribute[elem][i] for elem in centers_attribute]
]
model.cluster_centers_ = np.array(model.cluster_centers_)
model.inertia_ = self.metrics_["value"][2]
model.n_iter_ = int(
self.get_attr("metrics")["metrics"][0]
.split("Number of iterations performed: ")[1]
.split("\n")[0]
)
model._n_threads = None
elif self.type in ("RandomForestClassifier", "RandomForestRegressor"):
if isinstance(self, (vens.RandomForestClassifier,)) or self.type in (
"RandomForestClassifier",
"RandomForestRegressor",
):
raise ModelError(
"Model Conversion failed. Tree Based Models are not yet supported."
)
import sklearn.tree._tree as sktree
import sklearn.tree as skdtree
import sklearn.ensemble as skens
features = {}
parameters = {
"max_depth": params["max_depth"],
"min_samples_leaf": params["min_samples_leaf"],
"max_features": params["max_features"],
"min_impurity_split": params["min_info_gain"],
"max_leaf_nodes": params["max_leaf_nodes"],
}
for i in range(len(self.X)):
features[str_column(self.X[i]).lower()] = i
if (
isinstance(self, (vens.RandomForestRegressor,))
or self.type == "RandomForestRegressor"
):
model = skens.RandomForestRegressor(
n_estimators=params["n_estimators"], **parameters
)
model.base_estimator_ = skdtree.DecisionTreeRegressor(**parameters)
elif (
isinstance(self, (vens.RandomForestClassifier,))
or self.type == "RandomForestClassifier"
):
model = skens.RandomForestClassifier(
n_estimators=params["n_estimators"], **parameters
)
model.base_estimator_ = skdtree.DecisionTreeClassifier(**parameters)
model.classes_ = np.array(self.classes_)
model.n_classes_ = len(self.classes_)
model.n_features_ = len(self.X)
model.n_outputs_ = 1
model.features_importance_ = np.array(
[
elem / 100
for elem in self.features_importance(show=False,)["importance"]
]
)
model.estimators_ = []
for i in range(params["n_estimators"]):
vtree = self.get_tree(i)
ti = sktree.Tree(
model.n_features_,
np.array([1] * model.n_outputs_, dtype=np.intp),
model.n_outputs_,
)
ti.capacity = len(vtree["node_id"])
d = {}
d["max_depth"] = max(vtree["node_depth"])
d["node_count"] = len(vtree["node_id"])
d["nodes"] = []
left_child = np.array(
[
elem - 1 if elem is not None else -1
for elem in vtree["left_child_id"]
]
)
right_child = np.array(
[
elem - 1 if elem is not None else -1
for elem in vtree["right_child_id"]
]
)
feature = np.array(
[
features[str_column(elem).lower()] if elem is not None else -2
for elem in vtree["split_predictor"]
]
)
impurity = np.array(vtree["weighted_information_gain"])
threshold = np.array(
[elem if elem is not None else -2 for elem in vtree["split_value"]]
)
n_node_samples = np.array([100 for elem in vtree["right_child_id"]])
weighted_n_node_samples = np.array(
[100.0 for elem in vtree["right_child_id"]]
)
for k in range(len(left_child)):
d["nodes"] += [
(
left_child[k],
right_child[k],
feature[k],
threshold[k],
impurity[k],
n_node_samples[k],
weighted_n_node_samples[k],
)
]
dtype = [
("left_child", "<i8"),
("right_child", "<i8"),
("feature", "<i8"),
("threshold", "<f8"),
("impurity", "<f8"),
("n_node_samples", "<i8"),
("weighted_n_node_samples", "<f8"),
]
d["nodes"] = np.array(d["nodes"], dtype=dtype)
if isinstance(self, (vens.RandomForestClassifier,)):
dtree = skdtree.DecisionTreeClassifier(**parameters)
dtree.classes_ = np.array(self.classes_)
dtree.n_classes_ = len(self.classes_)
ti.max_n_classes = len(self.classes_)
d["values"] = [
[[None for id0 in range(len(self.classes_))]]
for id1 in range(len(left_child))
]
for k in range(len(left_child)):
if left_child[k] == right_child[k] == -1:
proba = vtree["probability/variance"][k]
for j in range(len(self.classes_)):
if int(self.classes_[j]) == int(vtree["prediction"][k]):
d["values"][k][0][j] = (
int((len(self.classes_) - 1) / (1 - proba))
if 1 - proba != 0
else 0
)
else:
d["values"][k][0][j] = (
int(1 / proba) if proba != 0 else 0
)
d["values"] = np.array(d["values"], dtype=np.float64)
elif isinstance(self, (vens.RandomForestRegressor,)):
dtree = skdtree.DecisionTreeRegressor(**parameters)
d["values"] = np.array(
[
[[vtree["prediction"][id1]]]
for id1 in range(len(left_child))
],
dtype=np.float64,
)
ti.__setstate__(d)
dtree.features_importance_ = np.array(
[
elem / 100
for elem in self.features_importance(show=False, tree_id=i)[
"importance"
]
]
)
if isinstance(parameters["max_features"], str):
if parameters["max_features"].lower() == "max":
dtree.max_features_ = len(self.X)
else:
dtree.max_features_ = int(len(self.X) / 3 + 1)
else:
dtree.max_features_ = params["max_features"]
dtree.n_features_ = len(self.X)
dtree.n_outputs_ = 1
dtree.tree_ = ti
model.estimators_ += [dtree]
else:
raise FunctionError(
"The method 'to_sklearn' is not available for model type '{}'.".format(
self.type
)
)
return model
# ---#
class Supervised(vModel):
# ---#
def fit(
self,
input_relation: (str, vDataFrame),
X: list,
y: str,
test_relation: (str, vDataFrame) = "",
):
"""
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Train relation.
X: list
List of the predictors.
y: str
Response column.
test_relation: str/vDataFrame, optional
Relation to use to test the model.
Returns
-------
object
model
"""
check_types(
[
("input_relation", input_relation, [str, vDataFrame],),
("X", X, [list],),
("y", y, [str],),
("test_relation", test_relation, [str, vDataFrame],),
]
)
if (self.type == "NaiveBayes") and (
self.parameters["nbtype"]
in ("bernoulli", "categorical", "multinomial", "gaussian")
):
new_types = {}
for elem in X:
if self.parameters["nbtype"] == "bernoulli":
new_types[elem] = "bool"
elif self.parameters["nbtype"] == "categorical":
new_types[elem] = "varchar"
elif self.parameters["nbtype"] == "multinomial":
new_types[elem] = "int"
elif self.parameters["nbtype"] == "gaussian":
new_types[elem] = "float"
if not (isinstance(input_relation, vDataFrame)):
input_relation = vdf_from_relation(input_relation, cursor=self.cursor)
input_relation.astype(new_types)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
check_model(name=self.name, cursor=self.cursor)
if isinstance(input_relation, vDataFrame):
self.input_relation = input_relation.__genSQL__()
schema, relation = schema_relation(self.name)
relation = "{}._VERTICAPY_TEMPORARY_VIEW_{}".format(
str_column(schema), get_session(self.cursor)
)
self.cursor.execute("DROP VIEW IF EXISTS {}".format(relation))
self.cursor.execute(
"CREATE VIEW {} AS SELECT * FROM {}".format(
relation, input_relation.__genSQL__()
)
)
else:
self.input_relation = input_relation
relation = input_relation
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
self.X = [str_column(column) for column in X]
self.y = str_column(y)
parameters = vertica_param_dict(self)
if (
"regularization" in parameters
and parameters["regularization"].lower() == "'enet'"
):
alpha = parameters["alpha"]
del parameters["alpha"]
else:
alpha = None
if "mtry" in parameters:
if parameters["mtry"] == "'auto'":
parameters["mtry"] = int(len(self.X) / 3 + 1)
elif parameters["mtry"] == "'max'":
parameters["mtry"] = len(self.X)
fun = self.get_model_fun()[0]
query = "SELECT {}('{}', '{}', '{}', '{}' USING PARAMETERS "
query = query.format(fun, self.name, relation, self.y, ", ".join(self.X))
query += ", ".join(
["{} = {}".format(elem, parameters[elem]) for elem in parameters]
)
if alpha != None:
query += ", alpha = {}".format(alpha)
query += ")"
try:
executeSQL(self.cursor, query, "Fitting the model.")
if isinstance(input_relation, vDataFrame):
self.cursor.execute("DROP VIEW {};".format(relation))
except:
if isinstance(input_relation, vDataFrame):
self.cursor.execute("DROP VIEW {};".format(relation))
raise
if self.type in (
"LinearSVC",
"LinearSVR",
"LogisticRegression",
"LinearRegression",
"SARIMAX",
):
self.coef_ = self.get_attr("details")
elif self.type in ("RandomForestClassifier", "NaiveBayes"):
if not (isinstance(input_relation, vDataFrame)):
self.cursor.execute(
"SELECT DISTINCT {} FROM {} WHERE {} IS NOT NULL ORDER BY 1".format(
self.y, input_relation, self.y
)
)
classes = self.cursor.fetchall()
self.classes_ = [item[0] for item in classes]
else:
self.classes_ = input_relation[self.y].distinct()
return self
# ---#
class Tree:
# ---#
def export_graphviz(self, tree_id: int = 0):
"""
---------------------------------------------------------------------------
Converts the input tree to graphviz.
Parameters
----------
tree_id: int, optional
Unique tree identifier. It is an integer between 0 and n_estimators - 1
Returns
-------
str
graphviz formatted tree.
"""
check_types([("tree_id", tree_id, [int, float],)])
version(cursor=self.cursor, condition=[9, 1, 1])
name = self.tree_name if self.type in ("KernelDensity") else self.name
query = "SELECT READ_TREE ( USING PARAMETERS model_name = '{}', tree_id = {}, format = 'graphviz');".format(
name, tree_id
)
executeSQL(self.cursor, query, "Exporting to graphviz.")
return self.cursor.fetchone()[1]
# ---#
def get_tree(self, tree_id: int = 0):
"""
---------------------------------------------------------------------------
Returns a table with all the input tree information.
Parameters
----------
tree_id: int, optional
Unique tree identifier. It is an integer between 0 and n_estimators - 1
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("tree_id", tree_id, [int, float],)])
version(cursor=self.cursor, condition=[9, 1, 1])
name = self.tree_name if self.type in ("KernelDensity") else self.name
query = "SELECT * FROM (SELECT READ_TREE ( USING PARAMETERS model_name = '{}', tree_id = {}, format = 'tabular')) x ORDER BY node_id;".format(
name, tree_id
)
result = to_tablesample(query=query, cursor=self.cursor, title="Reading Tree.",)
return result
# ---#
def plot_tree(self, tree_id: int = 0, pic_path: str = ""):
"""
---------------------------------------------------------------------------
Draws the input tree. The module anytree must be installed in the machine.
Parameters
----------
tree_id: int, optional
Unique tree identifier. It is an integer between 0 and n_estimators - 1
pic_path: str, optional
Absolute path to save the image of the tree.
"""
check_types(
[("tree_id", tree_id, [int, float],), ("pic_path", pic_path, [str],),]
)
plot_tree(
self.get_tree(tree_id=tree_id).values, metric="variance", pic_path=pic_path
)
# ---#
class Classifier(Supervised):
pass
# ---#
class BinaryClassifier(Classifier):
classes_ = [0, 1]
# ---#
def classification_report(self, cutoff: float = 0.5):
"""
---------------------------------------------------------------------------
Computes a classification report using multiple metrics to evaluate the model
(AUC, accuracy, PRC AUC, F1...).
Parameters
----------
cutoff: float, optional
Probability cutoff.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("cutoff", cutoff, [int, float],)])
if cutoff > 1 or cutoff < 0:
cutoff = self.score(method="best_cutoff")
return classification_report(
self.y,
[self.deploySQL(), self.deploySQL(cutoff)],
self.test_relation,
self.cursor,
cutoff=cutoff,
)
# ---#
def confusion_matrix(self, cutoff: float = 0.5):
"""
---------------------------------------------------------------------------
Computes the model confusion matrix.
Parameters
----------
cutoff: float, optional
Probability cutoff.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("cutoff", cutoff, [int, float],)])
return confusion_matrix(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
# ---#
def deploySQL(self, cutoff: float = -1, X: list = []):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Parameters
----------
cutoff: float, optional
Probability cutoff. If this number is not between 0 and 1, the method
will return the probability to be of class 1.
X: list, optional
List of the columns used to deploy the model. If empty, the model
predictors will be used.
Returns
-------
str
the SQL code needed to deploy the model.
"""
check_types([("cutoff", cutoff, [int, float],), ("X", X, [list],)])
X = [str_column(elem) for elem in X]
fun = self.get_model_fun()[1]
sql = "{}({} USING PARAMETERS model_name = '{}', type = 'probability', match_by_pos = 'true')"
if cutoff <= 1 and cutoff >= 0:
sql = "(CASE WHEN {} > {} THEN 1 ELSE 0 END)".format(sql, cutoff)
return sql.format(fun, ", ".join(self.X if not (X) else X), self.name)
# ---#
def lift_chart(
self, ax=None, nbins: int = 1000, **style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model Lift Chart.
Parameters
----------
ax: Matplotlib axes object, optional
The axes to plot on.
nbins: int, optional
Curve number of bins.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
return lift_chart(
self.y,
self.deploySQL(),
self.test_relation,
self.cursor,
ax=ax,
nbins=nbins,
**style_kwds,
)
# ---#
def prc_curve(
self, ax=None, nbins: int = 30, **style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model PRC curve.
Parameters
----------
ax: Matplotlib axes object, optional
The axes to plot on.
nbins: int, optional
Curve number of bins.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
return prc_curve(
self.y,
self.deploySQL(),
self.test_relation,
self.cursor,
ax=ax,
nbins=nbins,
**style_kwds,
)
# ---#
def predict(
self,
vdf: (str, vDataFrame),
X: list = [],
name: str = "",
cutoff: float = -1,
inplace: bool = True,
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: str/vDataFrame
Object to use to run the prediction. It can also be a customized relation
but you need to englobe it using an alias. For example "(SELECT 1) x" is
correct whereas "(SELECT 1)" or "SELECT 1" are incorrect.
X: list, optional
List of the columns used to deploy the models. If empty, the model
predictors will be used.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
cutoff: float, optional
Probability cutoff.
inplace: bool, optional
If set to True, the prediction will be added to the vDataFrame.
Returns
-------
vDataFrame
the input object.
"""
check_types(
[
("name", name, [str],),
("cutoff", cutoff, [int, float],),
("X", X, [list],),
("vdf", vdf, [str, vDataFrame],),
],
)
if isinstance(vdf, str):
vdf = vdf_from_relation(relation=vdf, cursor=self.cursor)
X = [str_column(elem) for elem in X]
name = (
"{}_".format(self.type) + "".join(ch for ch in self.name if ch.isalnum())
if not (name)
else name
)
if inplace:
return vdf.eval(name, self.deploySQL(cutoff=cutoff, X=X))
else:
return vdf.copy().eval(name, self.deploySQL(cutoff=cutoff, X=X))
# ---#
def cutoff_curve(
self, ax=None, nbins: int = 30, **style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model Cutoff curve.
Parameters
----------
ax: Matplotlib axes object, optional
The axes to plot on.
nbins: int, optional
Curve number of bins.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
return roc_curve(
self.y,
self.deploySQL(),
self.test_relation,
self.cursor,
ax=ax,
cutoff_curve=True,
nbins=nbins,
**style_kwds,
)
# ---#
def roc_curve(
self, ax=None, nbins: int = 30, **style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model ROC curve.
Parameters
----------
ax: Matplotlib axes object, optional
The axes to plot on.
nbins: int, optional
Curve number of bins.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
return roc_curve(
self.y,
self.deploySQL(),
self.test_relation,
self.cursor,
ax=ax,
nbins=nbins,
**style_kwds,
)
# ---#
def score(self, method: str = "accuracy", cutoff: float = 0.5):
"""
---------------------------------------------------------------------------
Computes the model score.
Parameters
----------
method: str, optional
The method to use to compute the score.
accuracy : Accuracy
auc : Area Under the Curve (ROC)
best_cutoff : Cutoff which optimised the ROC Curve prediction.
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
cutoff: float, optional
Cutoff for which the tested category will be accepted as prediction.
Returns
-------
float
score
"""
check_types([("cutoff", cutoff, [int, float],), ("method", method, [str],)])
if method in ("accuracy", "acc"):
return accuracy_score(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method == "auc":
return auc(self.y, self.deploySQL(), self.test_relation, self.cursor)
elif method == "prc_auc":
return prc_auc(self.y, self.deploySQL(), self.test_relation, self.cursor)
elif method in ("best_cutoff", "best_threshold"):
return roc_curve(
self.y,
self.deploySQL(),
self.test_relation,
self.cursor,
best_threshold=True,
nbins=1000,
)
elif method in ("recall", "tpr"):
return recall_score(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method in ("precision", "ppv"):
return precision_score(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method in ("specificity", "tnr"):
return specificity_score(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method in ("negative_predictive_value", "npv"):
return precision_score(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method in ("log_loss", "logloss"):
return log_loss(self.y, self.deploySQL(), self.test_relation, self.cursor)
elif method == "f1":
return f1_score(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method == "mcc":
return matthews_corrcoef(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method in ("bm", "informedness"):
return informedness(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method in ("mk", "markedness"):
return markedness(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
elif method in ("csi", "critical_success_index"):
return critical_success_index(
self.y, self.deploySQL(cutoff), self.test_relation, self.cursor
)
else:
raise ParameterError(
"The parameter 'method' must be in accuracy|auc|prc_auc|best_cutoff|recall|precision|log_loss|negative_predictive_value|specificity|mcc|informedness|markedness|critical_success_index"
)
# ---#
class MulticlassClassifier(Classifier):
# ---#
def classification_report(self, cutoff: (float, list) = [], labels: list = []):
"""
---------------------------------------------------------------------------
Computes a classification report using multiple metrics to evaluate the model
(AUC, accuracy, PRC AUC, F1...). In case of multiclass classification, it will
consider each category as positive and switch to the next one during the computation.
Parameters
----------
cutoff: float/list, optional
Cutoff for which the tested category will be accepted as prediction.
In case of multiclass classification, each tested category becomes
the positives and the others are merged into the negatives. The list will
represent the classes threshold. If it is empty, the best cutoff will be used.
labels: list, optional
List of the different labels to be used during the computation.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("cutoff", cutoff, [int, float, list],), ("labels", labels, [list],),]
)
if not (labels):
labels = self.classes_
return classification_report(
cutoff=cutoff, estimator=self, labels=labels, cursor=self.cursor
)
# ---#
def confusion_matrix(self, pos_label: (int, float, str) = None, cutoff: float = -1):
"""
---------------------------------------------------------------------------
Computes the model confusion matrix.
Parameters
----------
pos_label: int/float/str, optional
Label to consider as positive. All the other classes will be merged and
considered as negative in case of multi classification.
cutoff: float, optional
Cutoff for which the tested category will be accepted as prediction. If the
cutoff is not between 0 and 1, the entire confusion matrix will be drawn.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("cutoff", cutoff, [int, float],)])
pos_label = (
self.classes_[1]
if (pos_label == None and len(self.classes_) == 2)
else pos_label
)
if pos_label:
return confusion_matrix(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
pos_label=pos_label,
)
else:
return multilabel_confusion_matrix(
self.y, self.deploySQL(), self.test_relation, self.classes_, self.cursor
)
# ---#
def cutoff_curve(
self,
pos_label: (int, float, str) = None,
ax=None,
nbins: int = 30,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model Cutoff curve.
Parameters
----------
pos_label: int/float/str, optional
To draw the ROC curve, one of the response column class has to be the
positive one. The parameter 'pos_label' represents this class.
ax: Matplotlib axes object, optional
The axes to plot on.
nbins: int, optional
Curve number of bins.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
pos_label = (
self.classes_[1]
if (pos_label == None and len(self.classes_) == 2)
else pos_label
)
if pos_label not in self.classes_:
raise ParameterError(
"'pos_label' must be one of the response column classes"
)
return roc_curve(
self.y,
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
pos_label,
ax=ax,
cutoff_curve=True,
nbins=nbins,
**style_kwds,
)
# ---#
def deploySQL(
self,
pos_label: (int, float, str) = None,
cutoff: float = -1,
allSQL: bool = False,
X: list = [],
):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Parameters
----------
pos_label: int/float/str, optional
Label to consider as positive. All the other classes will be merged and
considered as negative in case of multi classification.
cutoff: float, optional
Cutoff for which the tested category will be accepted as prediction. If
the cutoff is not between 0 and 1, a probability will be returned.
allSQL: bool, optional
If set to True, the output will be a list of the different SQL codes
needed to deploy the different categories score.
X: list, optional
List of the columns used to deploy the model. If empty, the model
predictors will be used.
Returns
-------
str / list
the SQL code needed to deploy the self.
"""
check_types(
[
("cutoff", cutoff, [int, float],),
("allSQL", allSQL, [bool],),
("X", X, [list],),
]
)
X = [str_column(elem) for elem in X]
fun = self.get_model_fun()[1]
if allSQL:
sql = "{}({} USING PARAMETERS model_name = '{}', class = '{}', type = 'probability', match_by_pos = 'true')".format(
fun, ", ".join(self.X if not (X) else X), self.name, "{}"
)
sql = [
sql,
"{}({} USING PARAMETERS model_name = '{}', match_by_pos = 'true')".format(
fun, ", ".join(self.X if not (X) else X), self.name
),
]
else:
if pos_label in self.classes_ and cutoff <= 1 and cutoff >= 0:
sql = "{}({} USING PARAMETERS model_name = '{}', class = '{}', type = 'probability', match_by_pos = 'true')".format(
fun, ", ".join(self.X if not (X) else X), self.name, pos_label
)
if len(self.classes_) > 2:
sql = "(CASE WHEN {} >= {} THEN '{}' WHEN {} IS NULL THEN NULL ELSE 'Non-{}' END)".format(
sql, cutoff, pos_label, sql, pos_label
)
else:
non_pos_label = (
self.classes_[0]
if (self.classes_[0] != pos_label)
else self.classes_[1]
)
sql = "(CASE WHEN {} >= {} THEN '{}' WHEN {} IS NULL THEN NULL ELSE '{}' END)".format(
sql, cutoff, pos_label, sql, non_pos_label
)
elif pos_label in self.classes_:
sql = "{}({} USING PARAMETERS model_name = '{}', class = '{}', type = 'probability', match_by_pos = 'true')".format(
fun, ", ".join(self.X if not (X) else X), self.name, pos_label
)
else:
sql = "{}({} USING PARAMETERS model_name = '{}', match_by_pos = 'true')".format(
fun, ", ".join(self.X if not (X) else X), self.name
)
return sql
# ---#
def lift_chart(
self,
pos_label: (int, float, str) = None,
ax=None,
nbins: int = 1000,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model Lift Chart.
Parameters
----------
pos_label: int/float/str, optional
To draw a lift chart, one of the response column class has to be the
positive one. The parameter 'pos_label' represents this class.
ax: Matplotlib axes object, optional
The axes to plot on.
nbins: int, optional
Curve number of bins.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
pos_label = (
self.classes_[1]
if (pos_label == None and len(self.classes_) == 2)
else pos_label
)
if pos_label not in self.classes_:
raise ParameterError(
"'pos_label' must be one of the response column classes"
)
return lift_chart(
self.y,
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
pos_label,
ax=ax,
nbins=nbins,
**style_kwds,
)
# ---#
def prc_curve(
self,
pos_label: (int, float, str) = None,
ax=None,
nbins: int = 30,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model PRC curve.
Parameters
----------
pos_label: int/float/str, optional
To draw the PRC curve, one of the response column class has to be the
positive one. The parameter 'pos_label' represents this class.
ax: Matplotlib axes object, optional
The axes to plot on.
nbins: int, optional
Curve number of bins.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
pos_label = (
self.classes_[1]
if (pos_label == None and len(self.classes_) == 2)
else pos_label
)
if pos_label not in self.classes_:
raise ParameterError(
"'pos_label' must be one of the response column classes"
)
return prc_curve(
self.y,
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
pos_label,
ax=ax,
nbins=nbins,
**style_kwds,
)
# ---#
def predict(
self,
vdf: (str, vDataFrame),
X: list = [],
name: str = "",
cutoff: float = -1,
pos_label=None,
inplace: bool = True,
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: str/vDataFrame
Object to use to run the prediction. It can also be a customized relation
but you need to englobe it using an alias. For example "(SELECT 1) x" is
correct whereas "(SELECT 1)" or "SELECT 1" are incorrect.
X: list, optional
List of the columns used to deploy the models. If empty, the model
predictors will be used.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
cutoff: float, optional
Cutoff for which the tested category will be accepted as prediction.
If the parameter is not between 0 and 1, the class probability will
be returned.
pos_label: int/float/str, optional
Class label.
inplace: bool, optional
If set to True, the prediction will be added to the vDataFrame.
Returns
-------
vDataFrame
the input object.
"""
check_types(
[
("name", name, [str],),
("cutoff", cutoff, [int, float],),
("X", X, [list],),
("vdf", vdf, [str, vDataFrame],),
],
)
if isinstance(vdf, str):
vdf = vdf_from_relation(relation=vdf, cursor=self.cursor)
X = [str_column(elem) for elem in X]
name = (
"{}_".format(self.type) + "".join(ch for ch in self.name if ch.isalnum())
if not (name)
else name
)
if len(self.classes_) == 2 and pos_label == None:
pos_label = self.classes_[1]
if inplace:
return vdf.eval(
name, self.deploySQL(pos_label=pos_label, cutoff=cutoff, X=X)
)
else:
return vdf.copy().eval(
name, self.deploySQL(pos_label=pos_label, cutoff=cutoff, X=X)
)
# ---#
def roc_curve(
self,
pos_label: (int, float, str) = None,
ax=None,
nbins: int = 30,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the model ROC curve.
Parameters
----------
pos_label: int/float/str, optional
To draw the ROC curve, one of the response column class has to be the
positive one. The parameter 'pos_label' represents this class.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
pos_label = (
self.classes_[1]
if (pos_label == None and len(self.classes_) == 2)
else pos_label
)
if pos_label not in self.classes_:
raise ParameterError(
"'pos_label' must be one of the response column classes"
)
return roc_curve(
self.y,
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
pos_label,
ax=ax,
nbins=nbins,
**style_kwds,
)
# ---#
def score(
self,
method: str = "accuracy",
pos_label: (int, float, str) = None,
cutoff: float = -1,
):
"""
---------------------------------------------------------------------------
Computes the model score.
Parameters
----------
pos_label: int/float/str, optional
Label to consider as positive. All the other classes will be merged and
considered as negative in case of multi classification.
cutoff: float, optional
Cutoff for which the tested category will be accepted as prediction.
If the parameter is not between 0 and 1, an automatic cutoff is
computed.
method: str, optional
The method to use to compute the score.
accuracy : Accuracy
auc : Area Under the Curve (ROC)
best_cutoff : Cutoff which optimised the ROC Curve prediction.
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
Returns
-------
float
score
"""
check_types([("cutoff", cutoff, [int, float],), ("method", method, [str],)])
pos_label = (
self.classes_[1]
if (pos_label == None and len(self.classes_) == 2)
else pos_label
)
if (pos_label not in self.classes_) and (method != "accuracy"):
raise ParameterError(
"'pos_label' must be one of the response column classes"
)
elif (cutoff >= 1 or cutoff <= 0) and (method != "accuracy"):
cutoff = self.score("best_cutoff", pos_label, 0.5)
if method in ("accuracy", "acc"):
return accuracy_score(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
pos_label,
)
elif method == "auc":
return auc(
"DECODE({}, '{}', 1, 0)".format(self.y, pos_label),
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
)
elif method == "prc_auc":
return prc_auc(
"DECODE({}, '{}', 1, 0)".format(self.y, pos_label),
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
)
elif method in ("best_cutoff", "best_threshold"):
return roc_curve(
"DECODE({}, '{}', 1, 0)".format(self.y, pos_label),
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
best_threshold=True,
nbins=1000,
)
elif method in ("recall", "tpr"):
return recall_score(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method in ("precision", "ppv"):
return precision_score(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method in ("specificity", "tnr"):
return specificity_score(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method in ("negative_predictive_value", "npv"):
return precision_score(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method in ("log_loss", "logloss"):
return log_loss(
"DECODE({}, '{}', 1, 0)".format(self.y, pos_label),
self.deploySQL(allSQL=True)[0].format(pos_label),
self.test_relation,
self.cursor,
)
elif method == "f1":
return f1_score(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method == "mcc":
return matthews_corrcoef(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method in ("bm", "informedness"):
return informedness(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method in ("mk", "markedness"):
return markedness(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
elif method in ("csi", "critical_success_index"):
return critical_success_index(
self.y,
self.deploySQL(pos_label, cutoff),
self.test_relation,
self.cursor,
)
else:
raise ParameterError(
"The parameter 'method' must be in accuracy|auc|prc_auc|best_cutoff|recall|precision|log_loss|negative_predictive_value|specificity|mcc|informedness|markedness|critical_success_index"
)
# ---#
class Regressor(Supervised):
# ---#
def predict(
self, vdf: (str, vDataFrame), X: list = [], name: str = "", inplace: bool = True
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: str/vDataFrame
Object to use to run the prediction. It can also be a customized relation
but you need to englobe it using an alias. For example "(SELECT 1) x" is
correct whereas "(SELECT 1)" or "SELECT 1" are incorrect.
X: list, optional
List of the columns used to deploy the models. If empty, the model
predictors will be used.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
inplace: bool, optional
If set to True, the prediction will be added to the vDataFrame.
Returns
-------
vDataFrame
the input object.
"""
check_types(
[
("name", name, [str],),
("X", X, [list],),
("vdf", vdf, [str, vDataFrame],),
],
)
if isinstance(vdf, str):
vdf = vdf_from_relation(relation=vdf, cursor=self.cursor)
X = [str_column(elem) for elem in X]
name = (
"{}_".format(self.type) + "".join(ch for ch in self.name if ch.isalnum())
if not (name)
else name
)
if inplace:
return vdf.eval(name, self.deploySQL(X=X))
else:
return vdf.copy().eval(name, self.deploySQL(X=X))
# ---#
def regression_report(self, method: str = "metrics"):
"""
---------------------------------------------------------------------------
Computes a regression report using multiple metrics to evaluate the model
(r2, mse, max error...).
Parameters
----------
method: str, optional
The method to use to compute the regression report.
anova : Computes the model ANOVA table.
details : Computes the model details.
metrics : Computes the model different metrics.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("method", method, ["anova", "metrics", "details"],)])
if method in ("anova", "details") and self.type in (
"SARIMAX",
"VAR",
"KernelDensity",
):
raise ModelError(
"'{}' method is not available for {} models.".format(method, self.type)
)
prediction = self.deploySQL()
if self.type == "SARIMAX":
test_relation = self.transform_relation
test_relation = "(SELECT {} AS prediction, {} FROM {}) VERTICAPY_SUBTABLE".format(
self.deploySQL(), "VerticaPy_y_copy AS {}".format(self.y), test_relation
)
test_relation = (
test_relation.format(self.test_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace(
"[VerticaPy_key_columns]",
", " + ", ".join([self.ts] + self.exogenous),
)
)
for idx, elem in enumerate(self.exogenous):
test_relation = test_relation.replace("[X{}]".format(idx), elem)
prediction = "prediction"
elif self.type == "KNeighborsRegressor":
test_relation = self.deploySQL()
prediction = "predict_neighbors"
elif self.type == "VAR":
relation = self.transform_relation.replace(
"[VerticaPy_ts]", self.ts
).format(self.test_relation)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
values = {
"index": [
"explained_variance",
"max_error",
"median_absolute_error",
"mean_absolute_error",
"mean_squared_error",
"root_mean_squared_error",
"r2",
"r2_adj",
]
}
result = tablesample(values)
for idx, y in enumerate(self.X):
result.values[y] = regression_report(
y,
self.deploySQL()[idx],
relation,
self.cursor,
len(self.X) * self.parameters["p"],
).values["value"]
return result
elif self.type == "KernelDensity":
test_relation = self.map
else:
test_relation = self.test_relation
if method == "metrics":
return regression_report(
self.y, prediction, test_relation, self.cursor, len(self.X)
)
elif method == "anova":
return anova_table(
self.y, prediction, test_relation, len(self.X), self.cursor
)
elif method == "details":
vdf = vdf_from_relation(
"(SELECT {} FROM ".format(self.y)
+ self.input_relation
+ ") VERTICAPY_SUBTABLE",
cursor=self.cursor,
)
n = vdf[self.y].count()
kurt = vdf[self.y].kurt()
skew = vdf[self.y].skew()
jb = vdf[self.y].agg(["jb"])[self.y][0]
R2 = self.score()
R2_adj = 1 - ((1 - R2) * (n - 1) / (n - len(self.X) - 1))
anova_T = anova_table(
self.y, prediction, test_relation, len(self.X), self.cursor
)
F = anova_T["F"][0]
p_F = anova_T["p_value"][0]
return tablesample(
{
"index": [
"Dep. Variable",
"Model",
"No. Observations",
"No. Predictors",
"R-squared",
"Adj. R-squared",
"F-statistic",
"Prob (F-statistic)",
"Kurtosis",
"Skewness",
"Jarque-Bera (JB)",
],
"value": [
self.y,
self.type,
n,
len(self.X),
R2,
R2_adj,
F,
p_F,
kurt,
skew,
jb,
],
}
)
# ---#
def score(self, method: str = "r2"):
"""
---------------------------------------------------------------------------
Computes the model score.
Parameters
----------
method: str, optional
The method to use to compute the score.
max : Max Error
mae : Mean Absolute Error
median : Median Absolute Error
mse : Mean Squared Error
msle : Mean Squared Log Error
r2 : R squared coefficient
r2a : R2 adjusted
rmse : Root Mean Squared Error
var : Explained Variance
Returns
-------
float
score
"""
check_types([("method", method, [str],)])
method = method.lower()
if method in ("r2a", "r2adj", "r2adjusted"):
method = "r2"
adj = True
else:
adj = False
if method in ("rmse",):
method = "mse"
root = True
else:
root = False
if self.type == "SARIMAX":
test_relation = self.transform_relation
test_relation = "(SELECT {} AS prediction, {} FROM {}) VERTICAPY_SUBTABLE".format(
self.deploySQL(), "VerticaPy_y_copy AS {}".format(self.y), test_relation
)
test_relation = (
test_relation.format(self.test_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace(
"[VerticaPy_key_columns]",
", " + ", ".join([self.ts] + self.exogenous),
)
)
for idx, elem in enumerate(self.exogenous):
test_relation = test_relation.replace("[X{}]".format(idx), elem)
prediction = "prediction"
elif self.type == "VAR":
relation = self.transform_relation.replace(
"[VerticaPy_ts]", self.ts
).format(self.test_relation)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
result = tablesample({"index": [method]})
elif self.type == "KNeighborsRegressor":
test_relation = self.deploySQL()
prediction = "predict_neighbors"
elif self.type == "KernelDensity":
test_relation = self.map
prediction = self.deploySQL()
else:
test_relation = self.test_relation
prediction = self.deploySQL()
if method in ("r2", "rsquared"):
if self.type == "VAR":
for idx, y in enumerate(self.X):
result.values[y] = [
r2_score(
y,
self.deploySQL()[idx],
relation,
self.cursor,
len(self.X) * self.parameters["p"],
adj,
)
]
else:
return r2_score(
self.y, prediction, test_relation, self.cursor, len(self.X), adj
)
elif method in ("mae", "mean_absolute_error"):
if self.type == "VAR":
for idx, y in enumerate(self.X):
result.values[y] = [
mean_absolute_error(
y, self.deploySQL()[idx], relation, self.cursor
)
]
else:
return mean_absolute_error(
self.y, prediction, test_relation, self.cursor
)
elif method in ("mse", "mean_squared_error"):
if self.type == "VAR":
for idx, y in enumerate(self.X):
result.values[y] = [
mean_squared_error(
y, self.deploySQL()[idx], relation, self.cursor, root
)
]
else:
return mean_squared_error(
self.y, prediction, test_relation, self.cursor, root
)
elif method in ("msle", "mean_squared_log_error"):
if self.type == "VAR":
for idx, y in enumerate(self.X):
result.values[y] = [
mean_squared_log_error(
y, self.deploySQL()[idx], relation, self.cursor
)
]
else:
return mean_squared_log_error(
self.y, prediction, test_relation, self.cursor
)
elif method in ("max", "max_error"):
if self.type == "VAR":
for idx, y in enumerate(self.X):
result.values[y] = [
max_error(y, self.deploySQL()[idx], relation, self.cursor)
]
else:
return max_error(self.y, prediction, test_relation, self.cursor)
elif method in ("median", "median_absolute_error"):
if self.type == "VAR":
for idx, y in enumerate(self.X):
result.values[y] = [
median_absolute_error(
y, self.deploySQL()[idx], relation, self.cursor
)
]
else:
return median_absolute_error(
self.y, prediction, test_relation, self.cursor
)
elif method in ("var", "explained_variance"):
if self.type == "VAR":
for idx, y in enumerate(self.X):
result.values[y] = [
explained_variance(
y, self.deploySQL()[idx], relation, self.cursor
)
]
else:
return explained_variance(
self.y, prediction, test_relation, self.cursor
)
else:
raise ParameterError(
"The parameter 'method' must be in r2|mae|mse|msle|max|median|var"
)
return result.transpose()
# ---#
class Unsupervised(vModel):
# ---#
def fit(self, input_relation: (str, vDataFrame), X: list = []):
"""
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Train relation.
X: list, optional
List of the predictors. If empty, all the numerical columns will be used.
Returns
-------
object
model
"""
check_types(
[("input_relation", input_relation, [str, vDataFrame],), ("X", X, [list],)]
)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
check_model(name=self.name, cursor=self.cursor)
if isinstance(input_relation, vDataFrame):
self.input_relation = input_relation.__genSQL__()
schema, relation = schema_relation(self.name)
relation = "{}._VERTICAPY_TEMPORARY_VIEW_{}".format(
str_column(schema), get_session(self.cursor)
)
self.cursor.execute("DROP VIEW IF EXISTS {}".format(relation))
self.cursor.execute(
"CREATE VIEW {} AS SELECT * FROM {}".format(
relation, input_relation.__genSQL__()
)
)
if not (X):
X = input_relation.numcol()
else:
self.input_relation = input_relation
relation = input_relation
if not (X):
X = vDataFrame(input_relation, self.cursor).numcol()
self.X = [str_column(column) for column in X]
parameters = vertica_param_dict(self)
if "num_components" in parameters and not (parameters["num_components"]):
del parameters["num_components"]
fun = self.get_model_fun()[0]
query = "SELECT {}('{}', '{}', '{}'".format(
fun, self.name, relation, ", ".join(self.X)
)
if self.type in ("BisectingKMeans", "KMeans"):
query += ", {}".format(parameters["n_cluster"])
elif self.type == "Normalizer":
query += ", {}".format(parameters["method"])
del parameters["method"]
if self.type != "Normalizer":
query += " USING PARAMETERS "
if (
"init_method" in parameters
and not (isinstance(parameters["init_method"], str))
and self.type in ("KMeans", "BisectingKMeans")
):
schema = schema_relation(self.name)[0]
name = "VERTICAPY_KMEANS_INITIAL_{}".format(get_session(self.cursor))
del parameters["init_method"]
try:
self.cursor.execute("DROP TABLE IF EXISTS {}.{}".format(schema, name))
except:
pass
if len(self.parameters["init"]) != self.parameters["n_cluster"]:
raise ParameterError(
"'init' must be a list of 'n_cluster' = {} points".format(
self.parameters["n_cluster"]
)
)
else:
for item in self.parameters["init"]:
if len(X) != len(item):
raise ParameterError(
"Each points of 'init' must be of size len(X) = {}".format(
len(self.X)
)
)
query0 = []
for i in range(len(self.parameters["init"])):
line = []
for j in range(len(self.parameters["init"][0])):
line += [str(self.parameters["init"][i][j]) + " AS " + X[j]]
line = ",".join(line)
query0 += ["SELECT " + line]
query0 = " UNION ".join(query0)
query0 = "CREATE TABLE {}.{} AS {}".format(schema, name, query0)
self.cursor.execute(query0)
query += "initial_centers_table = '{}.{}', ".format(schema, name)
elif "init_method" in parameters:
del parameters["init_method"]
query += "init_method = '{}', ".format(self.parameters["init"])
query += ", ".join(
["{} = {}".format(elem, parameters[elem]) for elem in parameters]
)
query += ")"
try:
executeSQL(self.cursor, query, "Fitting the model.")
if isinstance(input_relation, vDataFrame):
self.cursor.execute("DROP VIEW {};".format(relation))
except:
if isinstance(input_relation, vDataFrame):
self.cursor.execute("DROP VIEW {};".format(relation))
raise
if self.type == "KMeans":
try:
self.cursor.execute("DROP TABLE IF EXISTS {}.{}".format(schema, name))
except:
pass
self.cluster_centers_ = self.get_attr("centers")
result = self.get_attr("metrics").values["metrics"][0]
values = {
"index": [
"Between-Cluster Sum of Squares",
"Total Sum of Squares",
"Total Within-Cluster Sum of Squares",
"Between-Cluster SS / Total SS",
"converged",
]
}
values["value"] = [
float(
result.split("Between-Cluster Sum of Squares: ")[1].split("\n")[0]
),
float(result.split("Total Sum of Squares: ")[1].split("\n")[0]),
float(
result.split("Total Within-Cluster Sum of Squares: ")[1].split(
"\n"
)[0]
),
float(
result.split("Between-Cluster Sum of Squares: ")[1].split("\n")[0]
)
/ float(result.split("Total Sum of Squares: ")[1].split("\n")[0]),
result.split("Converged: ")[1].split("\n")[0] == "True",
]
self.metrics_ = tablesample(values)
elif self.type in ("BisectingKMeans"):
self.metrics_ = self.get_attr("Metrics")
self.cluster_centers_ = self.get_attr("BKTree")
elif self.type in ("PCA"):
self.components_ = self.get_attr("principal_components")
self.explained_variance_ = self.get_attr("singular_values")
self.mean_ = self.get_attr("columns")
elif self.type in ("SVD"):
self.singular_values_ = self.get_attr("right_singular_vectors")
self.explained_variance_ = self.get_attr("singular_values")
elif self.type in ("Normalizer"):
self.param_ = self.get_attr("details")
elif self.type == "OneHotEncoder":
try:
self.param_ = to_tablesample(
query="SELECT category_name, category_level::varchar, category_level_index FROM (SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'integer_categories')) VERTICAPY_SUBTABLE UNION ALL SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'varchar_categories')".format(
self.name, self.name
),
cursor=self.cursor,
title="Getting Model Attributes.",
)
except:
try:
self.param_ = to_tablesample(
query="SELECT category_name, category_level::varchar, category_level_index FROM (SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'integer_categories')) VERTICAPY_SUBTABLE".format(
self.name
),
cursor=self.cursor,
title="Getting Model Attributes.",
)
except:
self.param_ = self.get_attr("varchar_categories")
return self
# ---#
class Preprocessing(Unsupervised):
# ---#
def deploySQL(
self, key_columns: list = [], exclude_columns: list = [], X: list = [],
):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Parameters
----------
key_columns: list, optional
Predictors used during the algorithm computation which will be deployed
with the principal components.
exclude_columns: list, optional
Columns to exclude from the prediction.
X: list, optional
List of the columns used to deploy the self. If empty, the model
predictors will be used.
Returns
-------
str
the SQL code needed to deploy the model.
"""
check_types(
[
("key_columns", key_columns, [list],),
("exclude_columns", exclude_columns, [list],),
("X", X, [list],),
]
)
X = [str_column(elem) for elem in X]
fun = self.get_model_fun()[1]
sql = "{}({} USING PARAMETERS model_name = '{}', match_by_pos = 'true'"
if key_columns:
sql += ", key_columns = '{}'".format(
", ".join([str_column(item) for item in key_columns])
)
if exclude_columns:
sql += ", exclude_columns = '{}'".format(
", ".join([str_column(item) for item in exclude_columns])
)
if self.type == "OneHotEncoder":
separator = (
"NULL"
if self.parameters["separator"] == None
else "'{}'".format(self.parameters["separator"])
)
null_column_name = (
"NULL"
if self.parameters["null_column_name"] == None
else "'{}'".format(self.parameters["null_column_name"])
)
sql += ", drop_first = {}, ignore_null = {}, separator = {}, column_naming = '{}'".format(
self.parameters["drop_first"],
self.parameters["ignore_null"],
separator,
self.parameters["column_naming"],
)
if self.parameters["column_naming"].lower() in ("values", "values_relaxed"):
sql += ", null_column_name = {}".format(null_column_name,)
sql += ")"
return sql.format(fun, ", ".join(self.X if not (X) else X), self.name)
# ---#
def deployInverseSQL(
self, key_columns: list = [], exclude_columns: list = [], X: list = []
):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the inverse model.
Parameters
----------
key_columns: list, optional
Predictors used during the algorithm computation which will be deployed
with the principal components.
exclude_columns: list, optional
Columns to exclude from the prediction.
X: list, optional
List of the columns used to deploy the inverse model. If empty, the model
predictors will be used.
Returns
-------
str
the SQL code needed to deploy the inverse model.
"""
if self.type == "OneHotEncoder":
raise ModelError(
"method 'inverse_transform' is not supported for OneHotEncoder models."
)
check_types([("key_columns", key_columns, [list],), ("X", X, [list],)])
X = [str_column(elem) for elem in X]
fun = self.get_model_fun()[2]
sql = "{}({} USING PARAMETERS model_name = '{}', match_by_pos = 'true'"
if key_columns:
sql += ", key_columns = '{}'".format(
", ".join([str_column(item) for item in key_columns])
)
if exclude_columns:
sql += ", exclude_columns = '{}'".format(
", ".join([str_column(item) for item in exclude_columns])
)
sql += ")"
return sql.format(fun, ", ".join(self.X if not (X) else X), self.name)
# ---#
def get_names(self, inverse: bool = False, X: list = []):
"""
---------------------------------------------------------------------------
Returns the Transformation output names.
Parameters
----------
inverse: bool, optional
If set to True, it returns the inverse transform output names.
X: list, optional
List of the columns used to get the model output names. If empty,
the model predictors names will be used.
Returns
-------
list
Python list.
"""
X = [str_column(elem) for elem in X]
if not (X):
X = self.X
if self.type in ("PCA", "SVD") and not (inverse):
n = self.parameters["n_components"]
if not (n):
n = len(self.X)
return [f"col{i}" for i in range(1, n + 1)]
elif self.type in ("OneHotEncoder") and not (inverse):
names = []
for column in self.X:
k = 0
for i in range(len(self.param_["category_name"])):
if str_column(self.param_["category_name"][i]) == str_column(
column
):
if (k != 0 or not (self.parameters["drop_first"])) and (
not (self.parameters["ignore_null"])
or self.param_["category_level"][i] != None
):
if self.parameters["column_naming"] == "indices":
names += [
'"'
+ str_column(column)[1:-1]
+ "{}{}".format(
self.parameters["separator"],
self.param_["category_level_index"][i],
)
+ '"'
]
else:
names += [
'"'
+ str_column(column)[1:-1]
+ "{}{}".format(
self.parameters["separator"],
self.param_["category_level"][i].lower()
if self.param_["category_level"][i] != None
else self.parameters["null_column_name"],
)
+ '"'
]
k += 1
return names
else:
return X
# ---#
def inverse_transform(
self, vdf: (str, vDataFrame) = None, X: list = [],
):
"""
---------------------------------------------------------------------------
Applies the Inverse Model on a vDataFrame.
Parameters
----------
vdf: str/vDataFrame, optional
input vDataFrame. It can also be a customized relation but you need to
englobe it using an alias. For example "(SELECT 1) x" is correct whereas
"(SELECT 1)" or "SELECT 1" are incorrect.
X: list, optional
List of the input vcolumns.
Returns
-------
vDataFrame
object result of the model transformation.
"""
if self.type == "OneHotEncoder":
raise ModelError(
"method 'inverse_transform' is not supported for OneHotEncoder models."
)
check_types([("X", X, [list],)])
if not (vdf):
vdf = self.input_relation
if not (X):
X = self.get_names()
check_types([("vdf", vdf, [str, vDataFrame],),],)
if isinstance(vdf, str):
vdf = vdf_from_relation(relation=vdf, cursor=self.cursor)
columns_check(X, vdf)
X = vdf_columns_names(X, vdf)
relation = vdf.__genSQL__()
exclude_columns = vdf.get_columns(exclude_columns=X)
all_columns = vdf.get_columns()
main_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
self.deployInverseSQL(exclude_columns, exclude_columns, all_columns),
relation,
)
return vdf_from_relation(main_relation, "Inverse Transformation", self.cursor,)
# ---#
def transform(
self, vdf: (str, vDataFrame) = None, X: list = [],
):
"""
---------------------------------------------------------------------------
Applies the model on a vDataFrame.
Parameters
----------
vdf: str/vDataFrame, optional
Input vDataFrame. It can also be a customized relation but you need to
englobe it using an alias. For example "(SELECT 1) x" is correct whereas
"(SELECT 1)" or "SELECT 1" are incorrect.
X: list, optional
List of the input vcolumns.
Returns
-------
vDataFrame
object result of the model transformation.
"""
check_types(
[("X", X, [list],),]
)
if not (vdf):
vdf = self.input_relation
if not (X):
X = self.X
check_types([("vdf", vdf, [str, vDataFrame],),],)
if isinstance(vdf, str):
vdf = vdf_from_relation(relation=vdf, cursor=self.cursor)
columns_check(X, vdf)
X = vdf_columns_names(X, vdf)
relation = vdf.__genSQL__()
exclude_columns = vdf.get_columns(exclude_columns=X)
all_columns = vdf.get_columns()
main_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
self.deploySQL(exclude_columns, exclude_columns, all_columns), relation
)
return vdf_from_relation(main_relation, "Inverse Transformation", self.cursor,)
# ---#
class Decomposition(Preprocessing):
# ---#
def deploySQL(
self,
n_components: int = 0,
cutoff: float = 1,
key_columns: list = [],
exclude_columns: list = [],
X: list = [],
):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Parameters
----------
n_components: int, optional
Number of components to return. If set to 0, all the components will be
deployed.
cutoff: float, optional
Specifies the minimum accumulated explained variance. Components are taken
until the accumulated explained variance reaches this value.
key_columns: list, optional
Predictors used during the algorithm computation which will be deployed
with the principal components.
exclude_columns: list, optional
Columns to exclude from the prediction.
X: list, optional
List of the columns used to deploy the self. If empty, the model
predictors will be used.
Returns
-------
str
the SQL code needed to deploy the model.
"""
check_types(
[
("n_components", n_components, [int, float],),
("cutoff", cutoff, [int, float],),
("key_columns", key_columns, [list],),
("exclude_columns", exclude_columns, [list],),
("X", X, [list],),
]
)
X = [str_column(elem) for elem in X]
fun = self.get_model_fun()[1]
sql = "{}({} USING PARAMETERS model_name = '{}', match_by_pos = 'true'"
if key_columns:
sql += ", key_columns = '{}'".format(
", ".join([str_column(item) for item in key_columns])
)
if exclude_columns:
sql += ", exclude_columns = '{}'".format(
", ".join([str_column(item) for item in exclude_columns])
)
if n_components:
sql += ", num_components = {}".format(n_components)
else:
sql += ", cutoff = {}".format(cutoff)
sql += ")"
return sql.format(fun, ", ".join(self.X if not (X) else X), self.name)
# ---#
def score(
self, X: list = [], input_relation: str = "", method: str = "avg", p: int = 2
):
"""
---------------------------------------------------------------------------
Returns the decomposition Score on a dataset for each trasformed column. It
is the average / median of the p-distance between the real column and its
result after applying the decomposition model and its inverse.
Parameters
----------
X: list, optional
List of the columns used to deploy the self. If empty, the model
predictors will be used.
input_relation: str, optional
Input Relation. If empty, the model input relation will be used.
method: str, optional
Distance Method used to do the scoring.
avg : The average is used as aggregation.
median : The mdeian is used as aggregation.
p: int, optional
The p of the p-distance.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("X", X, [list],),
("input_relation", input_relation, [str],),
("method", str(method).lower(), ["avg", "median"],),
("p", p, [int, float],),
]
)
fun = self.get_model_fun()
if not (X):
X = self.X
if not (input_relation):
input_relation = self.input_relation
method = str(method).upper()
if method == "MEDIAN":
method = "APPROXIMATE_MEDIAN"
n_components = self.parameters["n_components"]
if not (n_components):
n_components = len(X)
col_init_1 = ["{} AS col_init{}".format(X[idx], idx) for idx in range(len(X))]
col_init_2 = ["col_init{}".format(idx) for idx in range(len(X))]
cols = ["col{}".format(idx + 1) for idx in range(n_components)]
query = "SELECT {}({} USING PARAMETERS model_name = '{}', key_columns = '{}', num_components = {}) OVER () FROM {}".format(
fun[1],
", ".join(self.X),
self.name,
", ".join(self.X),
n_components,
input_relation,
)
query = "SELECT {} FROM ({}) VERTICAPY_SUBTABLE".format(
", ".join(col_init_1 + cols), query
)
query = "SELECT {}({} USING PARAMETERS model_name = '{}', key_columns = '{}', exclude_columns = '{}', num_components = {}) OVER () FROM ({}) y".format(
fun[2],
", ".join(col_init_2 + cols),
self.name,
", ".join(col_init_2),
", ".join(col_init_2),
n_components,
query,
)
query = "SELECT 'Score' AS 'index', {} FROM ({}) z".format(
", ".join(
[
"{}(POWER(ABS(POWER({}, {}) - POWER({}, {})), {})) AS {}".format(
method,
X[idx],
p,
"col_init{}".format(idx),
p,
float(1 / p),
X[idx],
)
for idx in range(len(X))
]
),
query,
)
result = to_tablesample(
query, cursor=self.cursor, title="Getting Model Score.",
).transpose()
return result
# ---#
def transform(
self,
vdf: (str, vDataFrame) = None,
X: list = [],
n_components: int = 0,
cutoff: float = 1,
):
"""
---------------------------------------------------------------------------
Applies the model on a vDataFrame.
Parameters
----------
vdf: str/vDataFrame, optional
Input vDataFrame. It can also be a customized relation but you need to
englobe it using an alias. For example "(SELECT 1) x" is correct whereas
"(SELECT 1)" or "SELECT 1" are incorrect.
X: list, optional
List of the input vcolumns.
n_components: int, optional
Number of components to return. If set to 0, all the components will
be deployed.
cutoff: float, optional
Specifies the minimum accumulated explained variance. Components are
taken until the accumulated explained variance reaches this value.
Returns
-------
vDataFrame
object result of the model transformation.
"""
check_types(
[
("n_components", n_components, [int, float],),
("cutoff", cutoff, [int, float],),
("X", X, [list],),
]
)
if not (vdf):
vdf = self.input_relation
if not (X):
X = self.X
check_types([("vdf", vdf, [str, vDataFrame],),],)
if isinstance(vdf, str):
vdf = vdf_from_relation(relation=vdf, cursor=self.cursor)
columns_check(X, vdf)
X = vdf_columns_names(X, vdf)
relation = vdf.__genSQL__()
exclude_columns = vdf.get_columns(exclude_columns=X)
all_columns = vdf.get_columns()
main_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
self.deploySQL(
n_components, cutoff, exclude_columns, exclude_columns, all_columns
),
relation,
)
return vdf_from_relation(main_relation, "Inverse Transformation", self.cursor,)
# ---#
class Clustering(Unsupervised):
# ---#
def predict(
self, vdf: (str, vDataFrame), X: list = [], name: str = "", inplace: bool = True
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: str/vDataFrame
Object to use to run the prediction. It can also be a customized relation
but you need to englobe it using an alias. For example "(SELECT 1) x" is
correct whereas "(SELECT 1)" or "SELECT 1" are incorrect.
X: list, optional
List of the columns used to deploy the models. If empty, the model
predictors will be used.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
inplace: bool, optional
If set to True, the prediction will be added to the vDataFrame.
Returns
-------
vDataFrame
the input object.
"""
check_types(
[
("name", name, [str],),
("X", X, [list],),
("vdf", vdf, [str, vDataFrame],),
],
)
if isinstance(vdf, str):
vdf = vdf_from_relation(relation=vdf, cursor=self.cursor)
X = [str_column(elem) for elem in X]
name = (
"{}_".format(self.type) + "".join(ch for ch in self.name if ch.isalnum())
if not (name)
else name
)
if inplace:
return vdf.eval(name, self.deploySQL(X=X))
else:
return vdf.copy().eval(name, self.deploySQL(X=X))
| StarcoderdataPython |
1789023 | <filename>GUnicornConfig.py
import logging
import sys
from GServerHooks import ServerHooks
from gunicorn import glogging
from pythonjsonlogger import jsonlogger
class CustomLogger(glogging.Logger):
"""Custom logger for Gunicorn log messages."""
def _set_handler(self, log, output, fmt, stream=None):
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setLevel(logging.INFO)
formatter = jsonlogger.JsonFormatter('%(asctime)%(levelname)%(filename)%(name)%(lineno)%(process)%(thread)%(message).1000s')
log_handler.setFormatter(formatter)
while log.hasHandlers():
log.removeHandler(log.handlers[0])
log.addHandler(log_handler)
bind = '0.0.0.0:8005'
backlog = 2048
workers = 5
threads = 5
worker_class = 'gevent'
worker_connections = 1000
timeout = 30
keepalive = 5
spew = False
daemon = False
# errorlog = '-'
# loglevel = 'info'
# accesslog = '-'
# access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
logger_class = CustomLogger
# do not change this
proc_name = "wc_app"
when_ready = ServerHooks.when_ready
on_exit = ServerHooks.on_exit
worker_exit = ServerHooks.on_worker_exit
worker_abort = ServerHooks.on_worker_abort | StarcoderdataPython |
5166361 | #Gets data export
#(c) Leanplum 2015
import urllib2
import re
import json
import time
startDate = raw_input('Enter the startDate you want data for: ')
appId = raw_input('Enter your appId: ')
clientKey = raw_input('Enter your clientKey: ')
# Optional Entries - make sure to uncomment out urlTwo
# endDate = raw_input('Enter the endDate you want data for: ')
# s3ObjectPrefix = raw_inpute('Enter the s3ObjectPrefix: ')
#NOTE: If using for one app, it may be beneficial to store the appId and clientKey
#appId = 'enter your appId as a string here'
#clientKey = 'enter your clientKey as a string here'
#Making the URL that will return the jobId
urlOne = "http://www.leanplum.com/api?appId=" + appId + "&clientKey=" + clientKey + "&apiVersion=1.0.6&action=exportData&startDate=" + startDate
#urlAlt = "http://www.leanplum.com/api?appId=" + appId + "&clientKey=" + clientKey + "&apiVersion=1.0.6&action=exportData&startDate=" + startDate + "&endDate=" + endDate + "&s3ObjectPrefix=" + s3ObjectPrefix + "&s3BucketName=" + s3BucketName +"&s3AccessKey=" + s3AccessKey + "&s3AccessId=" + s3AccessId + "&compressData=" + compressData
print urlOne
#Getting the jobId
# MAKE SURE TO REPLACE urlOne with urlAlt IF YOU ARE USING THE OTHER ENTRIES
response = urllib2.urlopen(urlOne)
html = response.read()
fullHTML = json.loads(html)
jobId = fullHTML['response'][0]['jobId']
#Making the URL that will return the link for the data export
urlTwo = "http://www.leanplum.com/api?appId=" + appId + "&clientKey=" + clientKey + "&apiVersion=1.0.6&action=getExportResults&jobId=" + jobId
print urlTwo
loading = True
while(loading):
responseTwo = urllib2.urlopen(urlTwo)
htmlTwo = responseTwo.read()
fullTwo = json.loads(htmlTwo)
state = fullTwo['response'][0]['state']
if (state == 'FINISHED'):
loading = False
else:
print "Running, please wait for job to finish"
time.sleep(10)
#getting the URLs for data export
fullText = json.loads(htmlTwo)
numURLs = len(fullText['response'][0]['files'])
#saving to json
for x in range(0, numURLs):
print "Saving file %d of %d" % (x+1, numURLs)
dataExportUrl = fullText['response'][0]['files'][x]
responseDataExport = urllib2.urlopen(dataExportUrl)
dataExport = responseDataExport.read()
with open(date + 'dataExport' + str(x), 'w') as fid:
fid.write(dataExport) | StarcoderdataPython |
11390143 | <gh_stars>0
#!/usr/bin/env python
import asyncio
import random
import websockets
class WebsocketClient:
def __init__(self):
self.clientID = random.randint(0, 100)
self.event = asyncio.Event()
async def eventGenerator(self):
while True:
await asyncio.sleep(1.5)
self.event.set()
async def consumer(self, message):
print(f'client rec:{message}')
async def producer(self, websocket):
await self.event.wait()
self.event.clear()
return f'msg from client:{self.clientID}!'
async def connectToServer(self):
while True:
try:
async with websockets.connect('ws://localhost:8989') as websocket:
while True:
listener_task = asyncio.ensure_future(websocket.recv())
producer_task = asyncio.ensure_future(self.producer(websocket))
done, pending = await asyncio.wait(
[listener_task, producer_task],
return_when=asyncio.FIRST_COMPLETED)
if listener_task in done:
message = listener_task.result()
await self.consumer(message)
else:
listener_task.cancel()
if producer_task in done:
message = producer_task.result()
await websocket.send(message)
else:
producer_task.cancel()
except Exception as e:
if type(e) == ConnectionRefusedError or type(e) == websockets.exceptions.ConnectionClosedError:
print('Client is waiting for server launching...')
await asyncio.sleep(5)
else:
raise e
wsc = WebsocketClient()
tasks = [wsc.connectToServer(), wsc.eventGenerator()]
try:
asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks))
except KeyboardInterrupt:
print("Client crash...")
| StarcoderdataPython |
8071067 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
MAX_ALLOWED_WARNINGS = 20
COMMON_TYPE = 'MAGMA-COMMON'
MME_TYPE = 'MAGMA-OAI-MME'
SCTPD_TYPE = 'MAGMA-SCTPD'
U18_BUILD_LOG_FILE = 'build_magma_mme.log'
RHEL8_BUILD_LOG_FILE = 'build_magma_mme_rhel8.log'
REPORT_NAME = 'build_results_magma_oai_mme.html'
class HtmlReport():
"""Creates Executive Summary HTML reports."""
def __init__(self):
"""Initialize obeject."""
self.job_name = ''
self.mode = ''
self.job_id = ''
self.job_url = ''
self.job_start_time = 'TEMPLATE_TIME'
self.git_url = ''
self.git_src_branch = ''
self.git_src_commit = ''
self.git_src_commit_msg = None
self.git_merge_request = False
self.git_target_branch = ''
self.git_target_commit = ''
self.errorWarningInfo = []
self.variant = []
def generate_build_report(self):
"""Create the BUILD HTML report."""
cwd = os.getcwd()
try:
self.file = open(os.path.join(cwd, REPORT_NAME), 'w')
except IOError:
sys.exit('Could not open write output file')
self.generate_header()
self.add_build_summary_header()
self.add_compile_rows()
self.add_copy_to_target_image_row()
self.add_copy_conf_tools_to_target_mage_row()
self.add_image_size_row()
self.add_build_summary_footer()
self.add_details()
self.generate_footer()
self.file.close()
def generate_header(self):
"""Append HTML header to file."""
# HTML Header
header = '<!DOCTYPE html>\n'
header += '<html class="no-js" lang="en-US">\n'
header += '<head>\n'
header += ' <meta name="viewport" content="width=device-width, initial-scale=1">\n'
header += ' <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">\n'
header += ' <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>\n'
header += ' <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>\n'
header += ' <title>MAGMA/OAI Core Network Build Results for ' + self.job_name + ' job build #' + self.job_id + '</title>\n'
header += '</head>\n'
header += '<body><div class="container">\n'
header += ' <br>\n'
header += ' <table width = "100%" style="border-collapse: collapse; border: none;">\n'
header += ' <tr style="border-collapse: collapse; border: none;">\n'
# SVG has a invisible background color -- adding it.
header += ' <td bgcolor="#5602a4" style="border-collapse: collapse; border: none;">\n'
header += ' <a href="https://www.magmacore.org/">\n'
header += ' <img src="https://www.magmacore.org/img/magma-logo.svg" alt="" border="none" height=50 width=150>\n'
header += ' </img>\n'
header += ' </a>\n'
header += ' </td>\n'
header += ' <td align = "center" style="border-collapse: collapse; border: none; vertical-align: center;">\n'
header += ' <b><font size = "6">Job Summary -- Job: ' + self.job_name + ' -- Build-ID: <a href="' + self.job_url + '">' + self.job_id + '</a></font></b>\n'
header += ' </td>\n'
header += ' <td style="border-collapse: collapse; border: none;">\n'
header += ' <a href="http://www.openairinterface.org/">\n'
header += ' <img src="http://www.openairinterface.org/wp-content/uploads/2016/03/cropped-oai_final_logo2.png" alt="" border="none" height=50 width=150>\n'
header += ' </img>\n'
header += ' </a>\n'
header += ' </td>\n'
header += ' </tr>\n'
header += ' </table>\n'
header += ' <br>\n'
self.file.write(header)
summary = self.generate_build_summary()
self.file.write(summary)
def generate_build_summary(self):
"""
Create build summary string.
Returns:
a string with build information.
"""
summary = ''
# Build Info Summary
summary += ' <table class="table-bordered" width = "80%" align = "center" border = "1">\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-time"></span> Build Start Time</td>\n'
# date_formatted = re.sub('\..*', '', self.created
summary += ' <td>' + self.job_start_time + '</td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-wrench"></span> Build Trigger</td>\n'
if self.git_merge_request:
summary += ' <td>Pull Request</td>\n'
else:
summary += ' <td>Push Event</td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-cloud-upload"></span> GIT Repository</td>\n'
summary += ' <td><a href="' + self.git_url + '">' + self.git_url + '</a></td>\n'
summary += ' </tr>\n'
if self.git_merge_request:
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-link"></span> Pull Request Link</td>\n'
summary += ' <td><a href="TEMPLATE_PULL_REQUEST_LINK">TEMPLATE_PULL_REQUEST_LINK</a></td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-header"></span> Pull Request Title</td>\n'
summary += ' <td>TEMPLATE_PULL_REQUEST_TEMPLATE</td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-log-out"></span> Source Branch</td>\n'
summary += ' <td>' + self.git_src_branch + '</td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-tag"></span> Source Commit ID</td>\n'
summary += ' <td>' + self.git_src_commit + '</td>\n'
summary += ' </tr>\n'
if (self.git_src_commit_msg is not None):
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-comment"></span> Source Commit Message</td>\n'
summary += ' <td>' + self.git_src_commit_msg + '</td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-log-in"></span> Target Branch</td>\n'
summary += ' <td>' + self.git_target_branch + '</td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-tag"></span> Target Commit ID</td>\n'
summary += ' <td>' + self.git_target_commit + '</td>\n'
summary += ' </tr>\n'
else:
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-tree-deciduous"></span> Branch</td>\n'
summary += ' <td>' + self.git_src_branch + '</td>\n'
summary += ' </tr>\n'
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-tag"></span> Commit ID</td>\n'
summary += ' <td>' + self.git_src_commit + '</td>\n'
summary += ' </tr>\n'
if (self.git_src_commit_msg is not None):
summary += ' <tr>\n'
summary += ' <td bgcolor="lightcyan" > <span class="glyphicon glyphicon-comment"></span> Commit Message</td>\n'
summary += ' <td>' + self.git_src_commit_msg + '</td>\n'
summary += ' </tr>\n'
summary += ' </table>\n'
summary += ' <br>\n'
return summary
def generate_footer(self):
"""Append the HTML footer to report."""
self.file.write(' <div class="well well-lg">End of Build Report -- Copyright <span class="glyphicon glyphicon-copyright-mark"></span> 2020 <a href="http://www.openairinterface.org/">OpenAirInterface</a>. All Rights Reserved.</div>\n')
self.file.write('</div></body>\n')
self.file.write('</html>\n')
def add_build_summary_header(self):
"""Append Build Information Summary (Header)."""
self.file.write(' <h2>Docker/Podman Images Build Summary</h2>\n')
self.file.write(' <table class="table-bordered" width = "100%" align = "center" border = "1">\n')
self.file.write(' <tr bgcolor="#33CCFF" >\n')
self.file.write(' <th>Stage Name</th>\n')
self.file.write(' <th>Image Kind</th>\n')
cwd = os.getcwd()
if os.path.isfile(cwd + '/archives/' + U18_BUILD_LOG_FILE):
self.file.write(' <th>MAGMA - OAI MME cNF (Ubuntu-18)</th>\n')
if os.path.isfile(cwd + '/archives/' + RHEL8_BUILD_LOG_FILE):
self.file.write(' <th>MAGMA - OAI MME cNF (RHEL-8)</th>\n')
self.file.write(' </tr>\n')
def add_build_summary_footer(self):
"""Append Build Information Summary (Footer)."""
self.file.write(' </table>\n')
self.file.write(' <br>\n')
def add_compile_rows(self):
"""Add rows for the compilation."""
self.file.write(' <tr>\n')
self.file.write(' <td rowspan=2 bgcolor="lightcyan" ><b>magma-common</b> Compile / Build</td>\n')
self.analyze_build_log(COMMON_TYPE)
self.file.write(' </tr>\n')
self.file.write(' <tr>\n')
self.analyze_compile_log(COMMON_TYPE)
self.file.write(' </tr>\n')
self.file.write(' <tr>\n')
self.file.write(' <td rowspan=2 bgcolor="lightcyan" ><b>magma-oai-mme</b> Compile / Build</td>\n')
self.analyze_build_log(MME_TYPE)
self.file.write(' </tr>\n')
self.file.write(' <tr>\n')
self.analyze_compile_log(MME_TYPE)
self.file.write(' </tr>\n')
self.file.write(' <tr>\n')
self.file.write(' <td rowspan=2 bgcolor="lightcyan" ><b>magma-sctpd</b> Compile / Build</td>\n')
self.analyze_build_log(SCTPD_TYPE)
self.file.write(' </tr>\n')
self.file.write(' <tr>\n')
self.analyze_compile_log(SCTPD_TYPE)
self.file.write(' </tr>\n')
def analyze_build_log(self, nf_type):
"""
Add the row about build status.
Args:
nf_type: which build part
"""
self.file.write(' <td>Builder Image</td>\n')
cwd = os.getcwd()
log_file_names = [U18_BUILD_LOG_FILE, RHEL8_BUILD_LOG_FILE]
for log_file_name in log_file_names:
if os.path.isfile(cwd + '/archives/' + log_file_name):
status = False
if nf_type == COMMON_TYPE:
section_start_pattern = 'ninja -C /build/c/magma_common'
section_end_pattern = 'cmake /magma/lte/gateway/c/core/oai -DCMAKE_BUILD_TYPE=Debug -DS6A_OVER_GRPC=False -GNinja'
if nf_type == MME_TYPE:
section_start_pattern = 'ninja -C /build/c/core/oai'
section_end_pattern = 'cmake /magma/orc8r/gateway/c/common -DCMAKE_BUILD_TYPE=Debug -GNinja'
if nf_type == SCTPD_TYPE:
section_start_pattern = 'ninja -C /build/c/sctpd'
section_end_pattern = 'FROM ubuntu:bionic as magma-mme'
section_status = False
with open(cwd + '/archives/' + log_file_name, 'r') as logfile:
for line in logfile:
my_res = re.search(section_start_pattern, line)
if my_res is not None:
section_status = True
my_res = re.search(section_end_pattern, line)
if my_res is not None:
section_status = False
if section_status:
if nf_type == COMMON_TYPE:
my_res = re.search('Linking CXX static library eventd/libEVENTD.a', line)
if nf_type == MME_TYPE:
my_res = re.search('Linking CXX executable core/oai_mme/mme', line)
if nf_type == SCTPD_TYPE:
my_res = re.search('Linking CXX executable sctpd', line)
if my_res is not None:
status = True
logfile.close()
if status:
cell_msg = ' <td bgcolor="LimeGreen"><pre style="border:none; background-color:LimeGreen"><b>'
cell_msg += 'OK:\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO:\n'
if nf_type == COMMON_TYPE:
cell_msg += ' -- ninja -C /build/c/magma_common</b></pre></td>\n'
if nf_type == MME_TYPE:
cell_msg += ' -- ninja -C /build/c/core/oai</b></pre></td>\n'
if nf_type == SCTPD_TYPE:
cell_msg += ' -- ninja -C /build/c/sctpd</b></pre></td>\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO: logfile (' + log_file_name + ') not found</b></pre></td>\n'
self.file.write(cell_msg)
def analyze_compile_log(self, nf_type):
"""
Add the row about compilation errors/warnings/notes.
Args:
nf_type: which build part
"""
self.file.write(' <td>Builder Image</td>\n')
cwd = os.getcwd()
log_file_names = [U18_BUILD_LOG_FILE, RHEL8_BUILD_LOG_FILE]
for log_file_name in log_file_names:
nb_errors = 0
nb_warnings = 0
nb_notes = 0
if log_file_name.count('_rhel8') > 0:
variant = 'RHEL8'
else:
variant = 'UBUNTU 18'
self.errorWarningInfo.append([])
self.variant.append(nf_type + ' ' + variant)
idx = len(self.errorWarningInfo) - 1
if os.path.isfile(cwd + '/archives/' + log_file_name):
if nf_type == COMMON_TYPE:
section_start_pattern = '/build/c/magma_common'
section_end_pattern = 'mkdir -p /build/c/core/oai'
if nf_type == MME_TYPE:
section_start_pattern = '/build/c/core/oai'
section_end_pattern = 'mkdir -p /build/c/magma_common'
if nf_type == SCTPD_TYPE:
section_start_pattern = '/build/c/sctpd'
section_end_pattern = 'FROM ubuntu:bionic as magma-mme'
section_status = False
section_done = False
with open(cwd + '/archives/' + log_file_name, 'r') as logfile:
for line in logfile:
my_res = re.search(section_start_pattern, line)
if (my_res is not None) and not section_done and (re.search('cmake', line) is not None):
section_status = True
my_res = re.search(section_end_pattern, line)
if (my_res is not None) and not section_done and section_status:
section_status = False
section_done = True
if section_status:
my_res = re.search('error:', line)
if my_res is not None:
nb_errors += 1
errorandwarnings = {}
file_name = re.sub(':.*$', '', line.strip())
file_name = re.sub('^/magma/', '', file_name)
line_nb = '0'
warning_msg = re.sub('^.*error: ', '', line.strip())
details = re.search(':(?P<linenb>[0-9]+):', line)
if details is not None:
line_nb = details.group('linenb')
errorandwarnings['kind'] = 'Error'
errorandwarnings['file_name'] = file_name
errorandwarnings['line_nb'] = line_nb
errorandwarnings['warning_msg'] = warning_msg
errorandwarnings['warning_type'] = 'fatal'
self.errorWarningInfo[idx].append(errorandwarnings)
my_res = re.search('warning:', line)
if my_res is not None:
nb_warnings += 1
errorandwarnings = {}
file_name = re.sub(':.*$', '', line.strip())
file_name = re.sub('^/magma/', '', file_name)
line_nb = '0'
details = re.search(':(?P<linenb>[0-9]+):', line)
if details is not None:
line_nb = details.group('linenb')
warning_msg = re.sub('^.*warning: ', '', line.strip())
warning_msg = re.sub(' \[-W.*$', '', warning_msg)
warning_type = re.sub('^.* \[-W', '', line.strip())
warning_type = re.sub('\].*$', '', warning_type)
errorandwarnings['kind'] = 'Warning'
errorandwarnings['file_name'] = file_name
errorandwarnings['line_nb'] = line_nb
errorandwarnings['warning_msg'] = warning_msg
errorandwarnings['warning_type'] = warning_type
self.errorWarningInfo[idx].append(errorandwarnings)
my_res = re.search('note:', line)
if my_res is not None:
nb_notes += 1
logfile.close()
if nb_warnings == 0 and nb_errors == 0:
cell_msg = ' <td bgcolor="LimeGreen"><pre style="border:none; background-color:LimeGreen"><b>'
elif nb_warnings < MAX_ALLOWED_WARNINGS and nb_errors == 0:
cell_msg = ' <td bgcolor="Orange"><pre style="border:none; background-color:Orange"><b>'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
if nb_errors > 0:
cell_msg += str(nb_errors) + ' errors found in compile log\n'
cell_msg += str(nb_warnings) + ' warnings found in compile log\n'
if nb_notes > 0:
cell_msg += str(nb_notes) + ' notes found in compile log\n'
cell_msg += '</b></pre></td>\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO: logfile (' + log_file_name + ') not found</b></pre></td>\n'
self.file.write(cell_msg)
def add_copy_to_target_image_row(self):
"""Add the row about start of target image creation."""
self.file.write(' <tr>\n')
self.file.write(' <td bgcolor="lightcyan" >SW libs Installation / Copy from Builder</td>\n')
self.analyze_copy_log('MME')
self.file.write(' </tr>\n')
def analyze_copy_log(self, nf_type):
"""
Add the row about copy of executables/packages to target image.
Args:
nf_type: which build part
"""
if nf_type != 'MME':
self.file.write(' <td>N/A</td>\n')
self.file.write(' <td>Wrong NF Type for this Report</td>\n')
return
self.file.write(' <td>Target Image</td>\n')
cwd = os.getcwd()
log_file_names = [U18_BUILD_LOG_FILE, RHEL8_BUILD_LOG_FILE]
for log_file_name in log_file_names:
if os.path.isfile(cwd + '/archives/' + log_file_name):
if log_file_name == U18_BUILD_LOG_FILE:
section_start_pattern = 'FROM ubuntu:bionic as magma-mme$'
if log_file_name == RHEL8_BUILD_LOG_FILE:
section_start_pattern = 'FROM registry.access.redhat.com/ubi8/ubi:latest AS magma-mme$'
section_end_pattern = 'WORKDIR /magma-mme/bin$'
section_status = False
status = False
with open(cwd + '/archives/' + log_file_name, 'r') as logfile:
for line in logfile:
my_res = re.search(section_start_pattern, line)
if my_res is not None:
section_status = True
my_res = re.search(section_end_pattern, line)
if (my_res is not None) and section_status:
section_status = False
status = True
logfile.close()
if status:
cell_msg = ' <td bgcolor="LimeGreen"><pre style="border:none; background-color:LimeGreen"><b>'
cell_msg += 'OK:\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO:\n'
cell_msg += '</b></pre></td>\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO: logfile (' + log_file_name + ') not found</b></pre></td>\n'
self.file.write(cell_msg)
def add_copy_conf_tools_to_target_mage_row(self):
"""Add the row about copy of configuration/tools."""
self.file.write(' <tr>\n')
self.file.write(' <td bgcolor="lightcyan" >Copy Template Conf / Tools from Builder</td>\n')
self.analyze_copy_conf_tool_log('MME')
self.file.write(' </tr>\n')
def analyze_copy_conf_tool_log(self, nf_type):
"""
Retrieve info from log for conf/tools copy.
Args:
nf_type: which build part
"""
if nf_type != 'MME':
self.file.write(' <td>N/A</td>\n')
self.file.write(' <td>Wrong NF Type for this Report</td>\n')
return
self.file.write(' <td>Target Image</td>\n')
cwd = os.getcwd()
log_file_names = [U18_BUILD_LOG_FILE, RHEL8_BUILD_LOG_FILE]
for log_file_name in log_file_names:
if os.path.isfile(cwd + '/archives/' + log_file_name):
section_start_pattern = 'WORKDIR /magma-mme/bin$'
if log_file_name == U18_BUILD_LOG_FILE:
section_end_pattern = 'Successfully tagged magma-mme:'
if log_file_name == RHEL8_BUILD_LOG_FILE:
section_end_pattern = 'COMMIT magma-mme:'
section_status = False
status = False
with open(cwd + '/archives/' + log_file_name, 'r') as logfile:
for line in logfile:
my_res = re.search(section_start_pattern, line)
if my_res is not None:
section_status = True
my_res = re.search(section_end_pattern, line)
if (my_res is not None) and section_status:
section_status = False
status = True
logfile.close()
if status:
cell_msg = ' <td bgcolor="LimeGreen"><pre style="border:none; background-color:LimeGreen"><b>'
cell_msg += 'OK:\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO:\n'
cell_msg += '</b></pre></td>\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO: logfile (' + log_file_name + ') not found</b></pre></td>\n'
self.file.write(cell_msg)
def add_image_size_row(self):
"""Add the row about image size of target image."""
self.file.write(' <tr>\n')
self.file.write(' <td bgcolor="lightcyan" >Image Size</td>\n')
self.analyze_image_size_log('MME')
self.file.write(' </tr>\n')
def analyze_image_size_log(self, nf_type):
"""
Retrieve image size from log.
Args:
nf_type: which build part
"""
if nf_type != 'MME':
self.file.write(' <td>N/A</td>\n')
self.file.write(' <td>Wrong NF Type for this Report</td>\n')
return
self.file.write(' <td>Target Image</td>\n')
cwd = os.getcwd()
log_file_names = [U18_BUILD_LOG_FILE, RHEL8_BUILD_LOG_FILE]
for log_file_name in log_file_names:
if os.path.isfile(cwd + '/archives/' + log_file_name):
if log_file_name == U18_BUILD_LOG_FILE:
section_start_pattern = 'Successfully tagged magma-mme'
section_end_pattern = 'MAGMA-OAI-MME DOCKER IMAGE BUILD'
if log_file_name == RHEL8_BUILD_LOG_FILE:
section_start_pattern = 'COMMIT magma-mme:'
section_end_pattern = 'MAGMA-OAI-MME RHEL8 PODMAN IMAGE BUILD'
section_status = False
status = False
with open(cwd + '/archives/' + log_file_name, 'r') as logfile:
for line in logfile:
my_res = re.search(section_start_pattern, line)
if my_res is not None:
section_status = True
my_res = re.search(section_end_pattern, line)
if (my_res is not None) and section_status:
section_status = False
if section_status:
if self.git_merge_request:
my_res = re.search('magma-mme *ci-tmp', line)
else:
my_res = re.search('magma-mme *master *', line)
if my_res is not None:
my_res = re.search('ago *([0-9 A-Z]+)', line)
if my_res is not None:
size = my_res.group(1)
status = True
logfile.close()
if status:
cell_msg = ' <td bgcolor="LimeGreen"><pre style="border:none; background-color:LimeGreen"><b>'
cell_msg += 'OK: ' + size + '\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO:\n'
cell_msg += '</b></pre></td>\n'
else:
cell_msg = ' <td bgcolor="Tomato"><pre style="border:none; background-color:Tomato"><b>'
cell_msg += 'KO: logfile (' + log_file_name + ') not found</b></pre></td>\n'
self.file.write(cell_msg)
def add_details(self):
"""Add the compilation warnings/errors details"""
idx = 0
needed_details = False
while (idx < len(self.errorWarningInfo)):
if len(self.errorWarningInfo[idx]) > 0:
needed_details = True
idx += 1
if not needed_details:
return
details = ' <h3>Details</h3>\n'
details += ' <button data-toggle="collapse" data-target="#compilation-details">Details for Compilation Errors and Warnings </button>\n'
details += ' <div id="compilation-details" class="collapse">\n'
idx = 0
while (idx < len(self.errorWarningInfo)):
if len(self.errorWarningInfo[idx]) == 0:
idx += 1
continue
details += ' <h4>Details for ' + self.variant[idx] + '</h4>\n'
details += ' <table class="table-bordered" width = "100%" align = "center" border = "1">\n'
details += ' <tr bgcolor = "#33CCFF" >\n'
details += ' <th>File</th>\n'
details += ' <th>Line Number</th>\n'
details += ' <th>Status</th>\n'
details += ' <th>Kind</th>\n'
details += ' <th>Message</th>\n'
details += ' </tr>\n'
for info in self.errorWarningInfo[idx]:
details += ' <tr>\n'
details += ' <td>' + info['file_name'] + '</td>\n'
details += ' <td>' + info['line_nb'] + '</td>\n'
details += ' <td>' + info['kind'] + '</td>\n'
details += ' <td>' + info['warning_type'] + '</td>\n'
details += ' <td>' + info['warning_msg'] + '</td>\n'
details += ' </tr>\n'
details += ' </table>\n'
idx += 1
details += ' </div>\n'
details += ' <br>\n'
self.file.write(details)
def append_build_summary(self, mode):
"""
Append in test results a correct build info summary.
Args:
mode: which test mode
"""
cwd = os.getcwd()
if mode == 'dsTester':
filename = 'test_results_magma_oai_epc.html'
if os.path.isfile(cwd + '/' + filename):
new_test_report = open(cwd + '/new_' + filename, 'w')
build_summary_to_be_done = True
with open(cwd + '/' + filename, 'r') as original_test_report:
for line in original_test_report:
my_res = re.search('Deployment Summary', line)
if (my_res is not None) and build_summary_to_be_done:
summary = self.generate_build_summary()
new_test_report.write(summary)
build_summary_to_be_done = False
new_test_report.write(line)
original_test_report.close()
new_test_report.close()
os.rename(cwd + '/new_' + filename, cwd + '/' + filename)
# --------------------------------------------------------------------------------------------------------
#
# Start of main
#
# --------------------------------------------------------------------------------------------------------
argvs = sys.argv
argc = len(argvs)
HTML = HtmlReport()
while len(argvs) > 1:
my_argv = argvs.pop(1)
if re.match('^--help$', my_argv, re.IGNORECASE):
print('No help yet.')
sys.exit(0)
elif re.match('^--job_name=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--job_name=(.+)$', my_argv, re.IGNORECASE)
HTML.job_name = match.group(1)
elif re.match('^--job_id=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--job_id=(.+)$', my_argv, re.IGNORECASE)
HTML.job_id = match.group(1)
elif re.match('^--job_url=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--job_url=(.+)$', my_argv, re.IGNORECASE)
HTML.job_url = match.group(1)
elif re.match('^--git_url=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--git_url=(.+)$', my_argv, re.IGNORECASE)
HTML.git_url = match.group(1)
elif re.match('^--git_src_branch=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--git_src_branch=(.+)$', my_argv, re.IGNORECASE)
HTML.git_src_branch = match.group(1)
elif re.match('^--git_src_commit=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--git_src_commit=(.+)$', my_argv, re.IGNORECASE)
HTML.git_src_commit = match.group(1)
elif re.match('^--git_src_commit_msg=(.+)$', my_argv, re.IGNORECASE):
# Not Mandatory
match = re.match('^--git_src_commit_msg=(.+)$', my_argv, re.IGNORECASE)
HTML.git_src_commit_msg = match.group(1)
elif re.match('^--git_merge_request=(.+)$', my_argv, re.IGNORECASE):
# Can be silent: would be false!
match = re.match('^--git_merge_request=(.+)$', my_argv, re.IGNORECASE)
if match.group(1) == 'true' or match.group(1) == 'True':
HTML.git_merge_request = True
elif re.match('^--git_target_branch=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--git_target_branch=(.+)$', my_argv, re.IGNORECASE)
HTML.git_target_branch = match.group(1)
elif re.match('^--git_target_commit=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--git_target_commit=(.+)$', my_argv, re.IGNORECASE)
HTML.git_target_commit = match.group(1)
elif re.match('^--mode=(.+)$', my_argv, re.IGNORECASE):
match = re.match('^--mode=(.+)$', my_argv, re.IGNORECASE)
if match.group(1) == 'Build':
HTML.mode = 'build'
elif match.group(1) == 'TestWithDsTest':
HTML.mode = 'dsTester'
else:
sys.exit('Invalid mode: ' + match.group(1))
else:
sys.exit('Invalid Parameter: ' + my_argv)
if HTML.job_name == '' or HTML.job_id == '' or HTML.job_url == '' or HTML.mode == '':
sys.exit('Missing Parameter in job description')
if HTML.git_url == '' or HTML.git_src_branch == '' or HTML.git_src_commit == '':
sys.exit('Missing Parameter in Git Repository description')
if HTML.git_merge_request:
if HTML.git_target_commit == '' or HTML.git_target_branch == '':
sys.exit('Missing Parameter in Git Pull Request Repository description')
if HTML.mode == 'build':
HTML.generate_build_report()
elif HTML.mode == 'dsTester':
HTML.append_build_summary(HTML.mode)
| StarcoderdataPython |
396307 | import os
import sys
from pathlib import Path
ROOT_PATH = Path(__file__).resolve().parent.parent
if str(ROOT_PATH) not in sys.path:
sys.path.insert(1, str(ROOT_PATH))
import numpy as np
import re
from frequency_response import FrequencyResponse
from biquad import peaking, low_shelf, high_shelf, digital_coeffs
from argparse import ArgumentParser, SUPPRESS
fns = {'PK': peaking, 'LS': low_shelf, 'HS': high_shelf}
fs = 48000
def peq2fr(fc, q, gain, filts):
if type(fc) in [float, int]:
fc = np.array([fc])
if type(q) in [float, int]:
q = np.array([q])
if type(gain) in [float, int]:
gain = np.array([gain])
if type(filts) == str:
filts = [filts] * len(fc)
fr = FrequencyResponse(name='PEG')
c = np.zeros(fr.frequency.shape)
for i, filt in enumerate(filts):
a0, a1, a2, b0, b1, b2 = fns[filt](fc[i], q[i], gain[i], fs=fs)
c += digital_coeffs(fr.frequency, fs, a0, a1, a2, b0, b1, b2)
fr.raw = c
return fr
def peq2geq(fc, q, gain, filts, normalize=False):
fr = peq2fr(fc, q, gain, filts)
fr.equalization = fr.raw
return fr.eqapo_graphic_eq(normalize=normalize)
def read_eqapo(file_path):
with open(file_path) as fh:
lines = fh.read().strip().split('\n')
fcs = []
qs = []
gains = []
filts = []
for line in lines:
if line[0] == '#': # Comment line
continue
tokens = line.split()
if tokens[0] == 'Filter:':
fcs.append(float(tokens[tokens.index('Fc') + 1]))
qs.append(float(tokens[tokens.index('Q') + 1]))
gains.append(float(tokens[tokens.index('Gain') + 1]))
filts.append(re.search(r'(PK|LS|HS)', line)[0])
else:
print(f'Unsupported EqualizerAPO control type "{line}"')
return fcs, qs, gains, filts
def main():
parser = ArgumentParser('Turns parametric equalizer into EqualizerAPO\'s GraphicEQ.\nCan read filter parameters'
'from fc, q, gain and type arguments and/or from EqualizerAPO configuration file.\n\n'
'EqualizerAPO file has to contain lines such as\n'
' "Filter: ON PK Fc 15500 Hz Gain -18 dB Q 2"\n')
parser.add_argument('--fc', type=float, nargs='+',
help='Center frequencies in Hz, separated by spaces')
parser.add_argument('--q', type=float, nargs='+',
help='Qualities, separated by spaces')
parser.add_argument('--gain', type=float, nargs='+',
help='Gains, separated by spaces')
parser.add_argument('--type', nargs='+',
help='Filter types. PK for peaking, LS for low-shelf and HS for high-shelf')
parser.add_argument('--file', type=str, default=SUPPRESS,
help='Path to EqualizerAPO config file. Supports filters only.')
parser.add_argument('--normalize', action='store_true', help='Normalize gain?')
args = parser.parse_args()
if 'file' in args and args.file:
fcs, qs, gains, filts = read_eqapo(args.file)
else:
fcs, qs, gains, filts = [], [], [], []
if args.fc:
fcs += args.fc
if args.q:
qs += args.q
if args.gain:
gains += args.gain
if args.type:
filts += args.type
if not (len(fcs) == len(qs) == len(gains) == len(filts)):
print('Different number of Fc, Q, gain and filter types')
return
print(peq2geq(fcs, qs, gains, filts, normalize=args.normalize))
if __name__ == '__main__':
main()
| StarcoderdataPython |
5067989 | """
Module contains the state machine runner itself.
Implements the following functions:
* __init__ - Constructor accepts path to properties file and
then calls get_properties() to load the YAML.
* get_properties - Read the properties file into the properties attribute.
* run - Iterates over the array of imported ism_core_actions and calls each one's
execute method.
"""
# Standard library imports
import errno
import importlib.resources as pkg_resources
import importlib.util
import inspect
import json
import logging
import os
import threading
import time
import yaml
# Local application imports
from ism.exceptions.exceptions import PropertyKeyNotRecognised, RDBMSNotRecognised, TimestampFormatNotRecognised, \
ExecutionPhaseNotFound, MalformedActionPack
from . import core
from .core.action_check_timers import ActionCheckTimers
from .core.action_normal_shutdown import ActionNormalShutdown
from .core.action_emergency_shutdown import ActionEmergencyShutdown
from .core.action_confirm_ready_to_run import ActionConfirmReadyToRun
from .core.action_confirm_ready_to_stop import ActionConfirmReadyToStop
class ISM:
"""
Implements an Infinite State Machine
Attributes
----------
props_file: str
Fully qualified path to the properties file.
Methods
-------
run()
Iterates over the array of ism_core_actions and calls each one's
execute method.
get_properties()
Read in the properties file passed into the constructor.
"""
def __init__(self, *args):
"""
:param props_file:
Fully qualified path to the properties file
"""
self.properties_file = args[0]['properties_file']
self.properties = self.__get_properties()
self.properties['database']['password'] = args[0].get('database', {}).get('password', None)
self.properties['database']['db_path'] = None
self.properties['runtime']['run_timestamp'] = self.__create_run_timestamp()
self.properties['runtime']['tag'] = self.properties['runtime'].get('tag', 'default')
self.properties['running'] = False
self.ism_thread = None
self.actions = []
self.__create_runtime_environment()
self.__enable_logging()
self.logger.info(f'Starting run using user tag ('
f'{self.properties["runtime"]["tag"]}) and system tag ('
f'{self.properties["runtime"]["run_timestamp"]})')
self.__create_db(self.properties['database']['rdbms'])
self.__create_core_schema()
self.__insert_core_data()
self.__import_core_actions()
# Private methods
def __create_core_schema(self):
"""Create the core schema
ISM needs a basic core of tables to run. Import the schema from ism.core.schema.json.
"""
with pkg_resources.open_text(core, 'schema.json') as schema:
data = json.load(schema)
for table in data[self.properties['database']['rdbms'].lower()]['tables']:
self.dao.execute_sql_statement(table)
def __create_db(self, rdbms):
"""Route through to the correct RDBMS handler"""
try:
{
'sqlite3': self.__create_sqlite3,
'mysql': self.__create_mysql
}[rdbms.lower()]()
except KeyError:
self.logger.error(f'RDBMS {rdbms} not recognised / supported')
raise RDBMSNotRecognised(f'RDBMS {rdbms} not recognised / supported')
def __create_mysql(self):
"""Create the Mysql database for the run.
TODO Investigate if properties needs to be passed to create_database and if it's used at all in the DAO
"""
from ism.dal.mysql_dao import MySqlDAO
self.properties['database']['run_db'] = \
f'{self.properties["database"]["db_name"]}_' \
f'{self.properties["runtime"]["tag"]}_' \
f'{self.properties["runtime"]["run_timestamp"]}'
self.dao = MySqlDAO(self.properties)
self.dao.create_database(self.properties)
self.logger.info(f'Created MySql database {self.properties["database"]["run_db"]}')
def __create_sqlite3(self):
"""RDBMS set to SQLITE3
Create the SQLITE3 database object and record the path to it.
TODO Investigate if properties needs to be passed to create_database and if it's used at all in the DAO
"""
from ism.dal.sqlite3_dao import Sqlite3DAO
db_dir = f'{self.properties["runtime"]["run_dir"]}{os.path.sep}database'
self.properties['database']['db_path'] = \
f'{db_dir}{os.path.sep}{self.properties["database"]["db_name"]}'
os.makedirs(db_dir)
self.dao = Sqlite3DAO(self.properties)
self.dao.create_database(self.properties)
self.logger.info(f'Created Sqlite3 database {self.properties["database"]["db_path"]}')
def __create_run_timestamp(self):
"""Create a timestamp for the runtime directories in correct format
Properties file runtime:stamp_format may be -
epoch_milliseconds
epoch_seconds
"""
tag_format = self.properties['runtime']['sys_tag_format']
try:
return {
'epoch_seconds': int(time.time()),
'epoch_milliseconds': int(time.time()*1000.0)
}[tag_format.lower()]
except KeyError:
raise TimestampFormatNotRecognised(f'System tag format ({tag_format}) not recognised')
def __create_runtime_environment(self):
"""Create the runtime environment
The ISM will create a directory structure for the run. This will
hold the database, runtime files and directories.
"""
try:
self.properties["runtime"]["run_dir"] = f'{self.properties["runtime"]["root_dir"]}' \
f'{os.path.sep}' \
f'{self.properties["runtime"]["tag"]}' \
f'{os.path.sep}' \
f'{self.properties["runtime"]["run_timestamp"]}'
os.makedirs(self.properties["runtime"]["run_dir"])
except OSError as e:
if e.errno != errno.EEXIST:
raise
def __enable_logging(self):
"""Configure the logging to write to a log file in the run root
Used this guide to set up logging. It explains how to set up different loggers
for each module and have them referenced in the log.
https://docs.python.org/3/howto/logging-cookbook.html
"""
try:
log_dir = f'{self.properties["runtime"]["run_dir"]}' \
f'{os.path.sep}' \
f'log'
os.makedirs(log_dir)
self.properties["logging"]["file"] = \
f'{log_dir}' \
f'{os.path.sep}' \
f'{self.properties["logging"]["file"]}'
log_level = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}[self.properties['logging']['level'].upper()]
except KeyError:
raise PropertyKeyNotRecognised()
# Configure the root logger
self.root_logger = logging.getLogger()
self.root_logger.setLevel(log_level)
# File handler for the root logger
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler(self.properties["logging"]["file"], 'w')
fh.setFormatter(formatter)
self.root_logger.addHandler(fh)
# Suppress propagation to STDOUT?
self.root_logger.propagate = self.properties.get('logging', {}).get('propagate', False)
# Now create the ISM logger and inherit from the root logger
self.logger = logging.getLogger('ism')
def __get_properties(self) -> dict:
"""Read in the properties file passed into the constructor."""
logging.info(f'Reading in properties from file ({self.properties_file})')
with open(self.properties_file) as file:
return yaml.safe_load(file)
def __import_core_actions(self):
"""Import the core actions for the ISM"""
args = {
"dao": self.dao,
"properties": self.properties
}
self.actions.append(ActionCheckTimers(args))
self.actions.append(ActionConfirmReadyToRun(args))
self.actions.append(ActionConfirmReadyToStop(args))
self.actions.append(ActionEmergencyShutdown(args))
self.actions.append(ActionNormalShutdown(args))
def __insert_core_data(self):
"""Insert the run data for the core
ISM needs a basic core of actions to run. Import the data from ism.core.data.json.
"""
with pkg_resources.open_text(core, 'data.json') as data:
inserts = json.load(data)
for insert in inserts[self.properties['database']['rdbms'].lower()]['inserts']:
self.dao.execute_sql_statement(insert)
def __run(self):
"""Iterates over the array of imported actions and calls each one's
execute method.
Method executes in its own thread.
"""
self.properties['running'] = True
index = 0
while self.properties['running']:
self.actions[index].execute()
index += 1
if index >= len(self.actions):
index = 0
# Public methods
def get_database_name(self) -> str:
"""Return the database name"""
db = {
'sqlite3': self.__get_sqlite3_db_name,
'mysql': self.__get_mysql_db_name
}[self.properties['database']['rdbms'].lower()]()
return db
def get_tag(self) -> str:
"""Return the user tag for the runtime directories"""
return self.properties['runtime']['tag']
def import_action_pack(self, pack):
"""Import an action pack
Application can pass in action packs to enable the ISM to express
specific functionality. For an example of how to call this method,
see the unit test in tests/test_ism.py (test_import_action_pack).
Each action pack is a python package containing:
* At least one action class inheriting from ism.core.BaseAction
* A data.json file containing at least the insert statements for the
action in the control DB.
* Optionally a schema.json file contain the create statements for any
tables the action needs in the control DB.
The package should contain nothing else and no sub packages.
"""
import pkgutil
action_args = {
"dao": self.dao,
"properties": self.properties
}
try:
# Import the package containing the actions
package = importlib.import_module(pack)
# Find each action module in the package
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__):
# Should not be any sub packages in there
if ispkg:
raise MalformedActionPack(
f'Passed malformed action pack ({pack}). Unexpected sub packages {modname}'
)
# Import the module containing the action
module = importlib.import_module(f'{pack}.{importer.find_spec(modname).name}')
# Get the name of the action class, instantiate it and add to the collection of actions
for action in inspect.getmembers(module, inspect.isclass):
if action[0] == 'BaseAction':
continue
if 'Action' in action[0]:
cl_ = getattr(module, action[0])
self.actions.append(cl_(action_args))
# Get the supporting DB file/s
self.import_action_pack_tables(package)
except ModuleNotFoundError as e:
logging.error(f'Module/s not found for argument ({pack})')
raise
def import_action_pack_tables(self, package):
""""An action will typically create some tables and insert standing data.
If supporting schema file exists, then create the tables. A data.json
file must exist with at least one insert for the actions table or the action
execute method will not be able to activate or deactivate..
"""
inserts_found = False
path = os.path.split(package.__file__)[0]
for root, dirs, files in os.walk(path):
if 'schema.json' in files:
schema_file = os.path.join(root, 'schema.json')
with open(schema_file) as tables:
data = json.load(tables)
for table in data[self.properties['database']['rdbms'].lower()]['tables']:
self.dao.execute_sql_statement(table)
if 'data.json' in files:
data = os.path.join(root, 'data.json')
with open(data) as statements:
inserts = json.load(statements)
for insert in inserts[self.properties['database']['rdbms'].lower()]['inserts']:
self.dao.execute_sql_statement(insert)
inserts_found = True
if not inserts_found:
raise MalformedActionPack(f'No insert statements found for action pack ({package})')
def set_tag(self, tag):
"""Set the user tag for the runtime directories"""
self.properties['runtime']['tag'] = tag
def start(self, join=False):
"""Start running the state machine main loop in the background
Caller has the option to run the thread as a daemon or to join() it.
"""
self.ism_thread = threading.Thread(target=self.__run, daemon=True)
self.logger.info(f'Starting run() thread {self.ism_thread.name}')
self.ism_thread.start()
if join:
self.ism_thread.join()
def stop(self):
"""Stop the run in the background thread"""
self.properties['running'] = False
# Test Methods
def __get_mysql_db_name(self) -> str:
"""Retrieve the MySql db name from the information schema."""
sql = self.dao.prepare_parameterised_statement(
'select SCHEMA_NAME from information_schema.schemata WHERE SCHEMA_NAME = ?'
)
params = (self.properties.get("database", {}).get("run_db", None),)
rows = self.dao.execute_sql_query(sql, params)
return ''.join(rows[0]) if rows else 'Not found'
def __get_sqlite3_db_name(self) -> str:
"""Return the path to the Sqlite3 database."""
return self.properties.get('database', {}).get('db_path', 'Not found')
def get_execution_phase(self) -> str:
"""Get the current active execution phase.
e.g.
* RUNNING
* STARTING
* EMERGENCY_SHUTDOWN
* NORMAL_SHUTDOWN
* STOPPED
"""
try:
return self.dao.execute_sql_query(
f'SELECT execution_phase FROM phases WHERE state = 1'
)[0][0]
except IndexError as e:
raise ExecutionPhaseNotFound(f'Current execution_phase not found in control database. ({e})')
| StarcoderdataPython |
6456833 | <filename>createfiles.py<gh_stars>0
import os
cwd = os.getcwd()
names = ("fivemb", "fiftymb", "fivehundredmb", "onegb", "twogb")
nums = (5, 50, 500, 1024, 2048)
multi = 1024 * 1024
sizes = [num * multi for num in nums]
for name, size in zip(names, sizes):
with open(name, "wb") as out:
out.seek(size - 1)
out.write(b"0")
filesize = os.stat(f"{cwd}\\{name}").st_size
if name[-2:] == "mb":
print(filesize / multi, "MB")
else:
print(filesize / (1024 * multi), "GB")
| StarcoderdataPython |
160833 | <reponame>RenaKunisaki/GhidraScripts<filename>FindStruct.py
#Find structs by field type.
#@author Rena
#@category Struct
#@keybinding
#@menupath
#@toolbar
StringColumnDisplay = ghidra.app.tablechooser.StringColumnDisplay
AddressableRowObject = ghidra.app.tablechooser.AddressableRowObject
TableChooserExecutor = ghidra.app.tablechooser.TableChooserExecutor
DTM = state.tool.getService(ghidra.app.services.DataTypeManagerService)
AF = currentProgram.getAddressFactory()
DT = currentProgram.getDataTypeManager()
listing = currentProgram.getListing()
mem = currentProgram.getMemory()
def addrToInt(addr):
return int(str(addr), 16)
def intToAddr(addr):
return AF.getAddress("0x%08X" % addr)
class Executor(TableChooserExecutor):
def getButtonName(self):
return "Edit Structure"
def execute(self, row):
DTM.edit(row.struc) # show the structure editor
return False # do not remove row
class StructNameColumn(StringColumnDisplay):
def getColumnName(self):
return "Struct Name"
def getColumnValue(self, row):
return row.struc.displayName
class StructLengthColumn(StringColumnDisplay):
def getColumnName(self):
return "Struct Size"
def getColumnValue(self, row):
return row.struc.length
class StructListResult(AddressableRowObject):
def __init__(self, struc):
self.struc = struc
def getAddress(self):
return intToAddr(self.struc.length)
def run():
# XXX find a way to make this UI better.
# criteria is eg:
# B8=*int (a struct with an int* at 0xB8)
# B8=* (a struct with any pointer at 0xB8)
# B8=2 (a struct with any field at 0xB8 with length 2)
# B8=*2 (a struct with a pointer at 0xB8 to something with length 2)
# B8 (a struct with any field starting at 0xB8)
params = askString("Find Struct", "Enter search criteria")
params = params.split(';')
monitor.initialize(len(params))
candidates = list(DT.allStructures)
def showResults():
executor = Executor()
tbl = createTableChooserDialog("Matching Structs", executor)
tbl.addCustomColumn(StructNameColumn())
tbl.addCustomColumn(StructLengthColumn())
#printf("show %d results\n", len(candidates))
for res in candidates:
#printf("%s\n", res.displayName)
tbl.add(StructListResult(res))
tbl.show()
tbl.setMessage("%d results" % len(candidates))
def removeResult(struc):
candidates.remove(struc)
#print("remove", struc.name, "#res", len(candidates))
def checkComponent(struc, comp, offset, typ):
# return True if match, False if not.
# does component match given offset/type?
if comp.offset != offset: return False
if typ is None: return True # match any type at this offset
# if this is a pointer, walk the dataType chain
# to reach the base type
tp = typ
dt = comp.dataType
while tp.startswith('*'):
if (not hasattr(dt, 'dataType')) or dt.dataType is None:
#printf("[X] %s.%s @%X type is %s\n", struc.name,
# comp.fieldName, offset, str(getattr(dt, 'dataType')))
return False
dt = dt.dataType
tp = tp[1:]
# check the name
# remove spaces for simplicity
tp = tp.replace(' ', '')
nm = dt.name.replace(' ', '')
if tp.isnumeric():
#printf("[%s] %s.%s @%X size is %d\n",
# "O" if dt.length == int(tp) else "X",
# struc.name, comp.fieldName, offset, dt.length)
if dt.length == int(tp):
return True
else:
#printf("[%s] %s.%s @%X type is %d\n",
# "O" if nm == tp else "X",
# struc.name, comp.fieldName, offset, dt.length)
if nm == tp:
return True
#comp.dataType.name, numElements, elementLength, length, dataType
#comp.fieldName, comment, endOffset, bitFieldComponent, dataType, length, offset, ordinal
return False
def evaluateParam(param):
param = param.split('=')
offset = int(param[0], 16)
if len(param) < 2:
# no type given - find any struct which has a field
# beginning at this offset.
typ = None
else:
# user specified a type for the field
typ = param[1]
#printf("Evaluate '%s', #res=%d\n", param, len(candidates))
remove = []
for struc in candidates:
monitor.checkCanceled()
#monitor.incrementProgress(1)
#monitor.setMessage("Checking %s" % struc.displayName)
#print("check", struc.displayName)
match = False
for comp in struc.components:
if checkComponent(struc, comp, offset, typ):
match = True
break
if not match: remove.append(struc)
for struc in remove: removeResult(struc)
#printf("Evaluated '%s', #res=%d\n", param, len(candidates))
for param in params:
monitor.checkCanceled()
monitor.incrementProgress(1)
monitor.setMessage("Checking %s" % param)
evaluateParam(param)
if len(candidates) == 0: break
#popup("Found %d matches (see console)" % len(candidates))
showResults()
run()
| StarcoderdataPython |
11237158 | <reponame>abrahammurciano/nextcord
from nextcord.ext.abc.context_base import ContextBase
from .id_converter import IDConverter
from .errors import MessageNotFound, ChannelNotFound
from nextcord.abc import MessageableChannel
from typing import Optional
import nextcord
import re
class PartialMessageConverter(IDConverter[nextcord.PartialMessage]):
"""Converts to a :class:`nextcord.PartialMessage`.
.. versionadded:: 1.7
The creation strategy is as follows (in order):
1. By "{channel ID}-{message ID}" (retrieved by shift-clicking on "Copy ID")
2. By message ID (The message is assumed to be in the context channel.)
3. By message URL
"""
@staticmethod
def _get_id_matches(ctx, argument):
id_regex = re.compile(r'(?:(?P<channel_id>[0-9]{15,20})-)?(?P<message_id>[0-9]{15,20})$')
link_regex = re.compile(
r'https?://(?:(ptb|canary|www)\.)?discord(?:app)?\.com/channels/'
r'(?P<guild_id>[0-9]{15,20}|@me)'
r'/(?P<channel_id>[0-9]{15,20})/(?P<message_id>[0-9]{15,20})/?$'
)
match = id_regex.match(argument) or link_regex.match(argument)
if not match:
raise MessageNotFound(argument)
data = match.groupdict()
channel_id = nextcord.utils._get_as_snowflake(data, 'channel_id')
message_id = int(data['message_id'])
guild_id = data.get('guild_id')
if guild_id is None:
guild_id = ctx.guild and ctx.guild.id
elif guild_id == '@me':
guild_id = None
else:
guild_id = int(guild_id)
return guild_id, message_id, channel_id
@staticmethod
def _resolve_channel(ctx, guild_id, channel_id) -> Optional[MessageableChannel]:
if guild_id is not None:
guild = ctx.bot.get_guild(guild_id)
if guild is not None and channel_id is not None:
return guild._resolve_channel(channel_id)
else:
return None
else:
return ctx.bot.get_channel(channel_id) if channel_id else ctx.channel
async def convert(self, ctx: ContextBase, argument: str) -> nextcord.PartialMessage:
guild_id, message_id, channel_id = self._get_id_matches(ctx, argument)
channel = self._resolve_channel(ctx, guild_id, channel_id)
if not channel:
raise ChannelNotFound(channel_id)
return nextcord.PartialMessage(channel=channel, id=message_id)
async def convert_from_id(self, ctx: ContextBase, id: int) -> nextcord.PartialMessage:
return nextcord.PartialMessage(channel=ctx.channel, id=id) | StarcoderdataPython |
9685267 | <filename>pcapkit/all.py
# -*- coding: utf-8 -*-
# pylint: disable=unused-import, unused-wildcard-import, bad-continuation,wildcard-import
"""index for the library
:mod:`pcapkit` has defined various and numerous functions
and classes, which have different features and purposes.
To make a simple index for this library, :mod:`pcapkit.all`
contains all things from :mod:`pcapkit`.
"""
import pcapkit.const as const
#import pcapkit.vendor as vendor
from pcapkit.corekit import *
from pcapkit.dumpkit import *
from pcapkit.foundation import *
from pcapkit.interface import *
from pcapkit.protocols import *
from pcapkit.reassembly import *
from pcapkit.toolkit import *
from pcapkit.utilities import * # pylint: disable=redefined-builtin
__all__ = [
# pcapkit.const
'const',
# # pcapkit.vendor
# 'vendor',
# pcapkit.corekit
'Info', # Info Class
'ProtoChain', # ProtoChain
'VersionInfo', # Version
# pcapkit.dumpkit
'PCAPIO', # PCAP Dumper
'NotImplementedIO', # Simulated I/O
# pcapkit.foundation
'Extractor', # Extraction
'analyse2', # Analysis
'TraceFlow', # Trace Flow
# pcapkit.interface
'extract', 'analyse', 'reassemble', 'trace', # Interface Functions
'TREE', 'JSON', 'PLIST', 'PCAP', # Format Macros
'LINK', 'INET', 'TRANS', 'APP', 'RAW', # Layer Macros
'DPKT', 'Scapy', 'PyShark', 'MPServer', 'MPPipeline', 'PCAPKit',
# Engine Macros
# pcapkit.protocols
'LINKTYPE', 'ETHERTYPE', 'TP_PROTO', # Protocol Numbers
'Header', 'Frame', # PCAP Headers
'NoPayload', # No Payload
'Raw', # Raw Packet
'ARP', 'DRARP', 'Ethernet', 'InARP', 'L2TP', 'OSPF', 'RARP', 'VLAN',
# Link Layer
'AH', 'IP', 'IPsec', 'IPv4', 'IPv6', 'IPX', # Internet Layer
'HIP', 'HOPOPT', 'IPv6_Frag', 'IPv6_Opts', 'IPv6_Route', 'MH',
# IPv6 Extension Header
'TCP', 'UDP', # Transport Layer
'FTP', 'HTTP', # Application Layer
# pcapkit.reassembly
'IPv4_Reassembly', 'IPv6_Reassembly', # IP Reassembly
'TCP_Reassembly', # TCP Reassembly
# pcapkit.toolkit
'ipv4_reassembly', 'ipv6_reassembly', 'tcp_reassembly', 'tcp_traceflow',
# default engine
'dpkt_ipv6_hdr_len', 'dpkt_packet2chain', 'dpkt_packet2dict',
'dpkt_ipv4_reassembly', 'dpkt_ipv6_reassembly', 'dpkt_tcp_reassembly', 'dpkt_tcp_traceflow',
# DPKT engine
'pyshark_packet2dict', 'pyshark_tcp_traceflow', # PyShark engine
'scapy_packet2chain', 'scapy_packet2dict',
'scapy_ipv4_reassembly', 'scapy_ipv6_reassembly', 'scapy_tcp_reassembly', 'scapy_tcp_traceflow',
# Scapy engine
# pcapkit.utilities
'beholder_ng', 'seekset_ng', # Decorators
]
| StarcoderdataPython |
1919897 | <reponame>karpiq24/django-klima-kar
from django.contrib import admin
from apps.invoicing.models import (
SaleInvoice,
SaleInvoiceItem,
Contractor,
RefrigerantWeights,
ServiceTemplate,
CorrectiveSaleInvoice,
)
admin.site.register(SaleInvoice)
admin.site.register(SaleInvoiceItem)
admin.site.register(Contractor)
admin.site.register(RefrigerantWeights)
admin.site.register(ServiceTemplate)
admin.site.register(CorrectiveSaleInvoice)
| StarcoderdataPython |
6646432 | <filename>src/examples/utils_PyKinectV2.py<gh_stars>0
##############################################################
### Set of useful utilities function related to PyKinectV2 ###
##############################################################
import cv2
import ctypes
import numpy as np
from open3d import *
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectV2
from pykinect2 import PyKinectRuntime
##########################
### Map color to depth ###
##########################
def get_align_color_image(kinect, color_img, color_height=1080, color_width=1920, depth_height=424, depth_width=512):
CSP_Count = kinect._depth_frame_data_capacity
CSP_type = _ColorSpacePoint * CSP_Count.value
CSP = ctypes.cast(CSP_type(), ctypes.POINTER(_ColorSpacePoint))
kinect._mapper.MapDepthFrameToColorSpace(kinect._depth_frame_data_capacity, kinect._depth_frame_data, CSP_Count,
CSP)
colorXYs = np.copy(
np.ctypeslib.as_array(CSP, shape=(depth_height * depth_width,))) # Convert ctype pointer to array
colorXYs = colorXYs.view(np.float32).reshape(colorXYs.shape + (
-1,)) # Convert struct array to regular numpy array https://stackoverflow.com/questions/5957380/convert-structured-array-to-regular-numpy-array
colorXYs += 0.5
colorXYs = colorXYs.reshape(depth_height, depth_width, 2).astype(np.int)
colorXs = np.clip(colorXYs[:, :, 0], 0, color_width - 1)
colorYs = np.clip(colorXYs[:, :, 1], 0, color_height - 1)
align_color_img = np.zeros((depth_height, depth_width, 4), dtype=np.uint8)
align_color_img[:, :] = color_img[colorYs, colorXs, :]
return align_color_img
##################################
### Get the joints information ###
##################################
def get_single_joint(joints, jointPoints, jointType):
jointState = joints[jointType].TrackingState;
# Joint not tracked or not 'really' tracked
if (jointState == PyKinectV2.TrackingState_NotTracked) or (jointState == PyKinectV2.TrackingState_Inferred):
return np.zeros((1, 2), dtype=np.int32) # Return zeros
else:
return np.array([jointPoints[jointType].x, jointPoints[jointType].y], dtype=np.int32)
def get_joint2D(joints, jointPoints):
joint2D = np.zeros((PyKinectV2.JointType_Count, 2), dtype=np.int32) # [25, 2] Note: Total 25 joints
for i in range(PyKinectV2.JointType_Count):
joint2D[i, :] = get_single_joint(joints, jointPoints, i)
return joint2D
def get_joint3D(joints, jointPoints, depth_img, intrinsics, depth_scale):
joint3D = np.zeros((PyKinectV2.JointType_Count, 3), dtype=np.float32) # [25, 3] Note: Total 25 joints
joint2D = get_joint2D(joints, jointPoints)
fx = intrinsics.intrinsic_matrix[0, 0]
fy = intrinsics.intrinsic_matrix[1, 1]
cx = intrinsics.intrinsic_matrix[0, 2]
cy = intrinsics.intrinsic_matrix[1, 2]
# Back project the 2D points to 3D coor
for i in range(PyKinectV2.JointType_Count):
u, v = joint2D[i, 0], joint2D[i, 1]
joint3D[i, 2] = depth_img[v, u] * depth_scale # Z coor
joint3D[i, 0] = (u - cx) * joint3D[i, 2] / fx # X coor
joint3D[i, 1] = (v - cy) * joint3D[i, 2] / fy # Y coor
return joint3D
def get_joint_quaternions(orientations):
quat = np.zeros((PyKinectV2.JointType_Count, 4), dtype=np.float32) # [25, 4] Note: Total 25 joints
for i in range(PyKinectV2.JointType_Count):
quat[i, 0] = orientations[i].Orientation.w
quat[i, 1] = orientations[i].Orientation.x
quat[i, 2] = orientations[i].Orientation.y
quat[i, 3] = orientations[i].Orientation.z
return quat
######################
### Draw on OpenCV ###
######################
# Define the BGR color for 6 different bodies
colors_order = [(0, 0, 255), # Red
(0, 255, 0), # Green
(255, 0, 0), # Blue
(0, 255, 255), # Yellow
(255, 0, 255), # Magenta
(255, 255, 0)] # Cyan
def draw_joint2D(img, j2D, color=(0, 0, 255)): # Default red circles
for i in range(j2D.shape[0]): # Should loop 25 times
cv2.circle(img, (j2D[i, 0], j2D[i, 1]), 5, color, -1)
return img
def draw_bone2D(img, j2D, color=(0, 0, 255)): # Default red lines
# Define the kinematic tree where each of the 25 joints is associated to a parent joint
k = [0, 0, 1, 2, # Spine
20, 4, 5, 6, # Left arm
20, 8, 9, 10, # Right arm
0, 12, 13, 14, # Left leg
0, 16, 17, 18, # Right leg
1, # Spine
7, 7, # Left hand
11, 11] # Right hand
for i in range(j2D.shape[0]): # Should loop 25 times
if j2D[k[i], 0] > 0 and j2D[k[i], 1] > 0 and j2D[i, 0] > 0 and j2D[i, 1] > 0:
cv2.line(img, (j2D[k[i], 0], j2D[k[i], 1]), (j2D[i, 0], j2D[i, 1]), color)
return img
def color_body_index(kinect, img):
height, width = img.shape
color_img = np.zeros((height, width, 3), dtype=np.uint8)
for i in range(kinect.max_body_count):
color_img[np.where(img == i)] = colors_order[i]
return color_img
def draw_bodyframe(body_frame, kinect, img):
if body_frame is not None:
for i in range(0, kinect.max_body_count):
body = body_frame.bodies[i]
if body.is_tracked:
joints = body.joints
joint_points = kinect.body_joints_to_depth_space(joints) # Convert joint coordinates to depth space
joint2D = get_joint2D(joints, joint_points) # Convert to numpy array format
img = draw_joint2D(img, joint2D, colors_order[i])
img = draw_bone2D(img, joint2D, colors_order[i])
return img
################################
### For Open3D visualisation ###
################################
def create_line_set_bones(joints):
# Draw the 24 bones (lines) connecting 25 joints
# The lines below is the kinematic tree that defines the connection between parent and child joints
lines = [[0, 1], [1, 20], [20, 2], [2, 3], # Spine
[20, 4], [4, 5], [5, 6], [6, 7], [7, 21], [7, 22], # Left arm and hand
[20, 8], [8, 9], [9, 10], [10, 11], [11, 23], [11, 24], # Right arm and hand
[0, 12], [12, 13], [13, 14], [14, 15], # Left leg
[0, 16], [16, 17], [17, 18], [18, 19]] # Right leg
colors = [[0, 0, 1] for i in range(24)] # Default blue
line_set = LineSet()
line_set.lines = Vector2iVector(lines)
line_set.colors = Vector3dVector(colors)
line_set.points = Vector3dVector(joints)
return line_set
def create_color_point_cloud(align_color_img, depth_img,
depth_scale, clipping_distance_in_meters, intrinsic):
align_color_img = align_color_img[:, :, 0:3] # Only get the first three channel
align_color_img = align_color_img[..., ::-1] # Convert opencv BGR to RGB
rgbd_image = create_rgbd_image_from_color_and_depth(
Image(align_color_img.copy()),
Image(depth_img),
depth_scale=1.0 / depth_scale,
depth_trunc=clipping_distance_in_meters,
convert_rgb_to_intensity=False)
pcd = create_point_cloud_from_rgbd_image(rgbd_image, intrinsic)
# Point cloud only without color
# pcd = create_point_cloud_from_depth_image(
# Image(depth_img),
# intrinsic,
# depth_scale=1.0/depth_scale,
# depth_trunc=clipping_distance_in_meters)
return pcd.points, pcd.colors
def get_single_joint3D_and_orientation(kinect, body_frame, depth_img, intrinsic, depth_scale):
joint3D = np.zeros((PyKinectV2.JointType_Count, 3), dtype=np.float32)
orientation = np.zeros((PyKinectV2.JointType_Count, 4), dtype=np.float32)
if body_frame is not None:
for i in range(0, kinect.max_body_count):
body = body_frame.bodies[i]
if body.is_tracked:
joints = body.joints
joint_points = kinect.body_joints_to_depth_space(joints) # Convert joint coordinates to depth space
joint3D = get_joint3D(joints, joint_points, depth_img, intrinsic,
depth_scale) # Convert to numpy array format
orientation = get_joint_quaternions(body.joint_orientations)
# Note: Currently only return single set of joint3D and orientations
return joint3D, orientation
def transform_geometry_quaternion(joint3D, orientation):
qw, qx, qy, qz = orientation[0], orientation[1], orientation[2], orientation[3]
tx, ty, tz = joint3D[0], joint3D[1], joint3D[2]
# Convert quaternion to rotation matrix
# http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm
transform_matrix = [[1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 * qx * qz + 2 * qy * qw, tx],
[2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 * qz * qz, 2 * qy * qz - 2 * qx * qw, ty],
[2 * qx * qz - 2 * qy * qw, 2 * qy * qz + 2 * qx * qw, 1 - 2 * qx * qx - 2 * qy * qy, tz],
[0, 0, 0, 1]]
return transform_matrix | StarcoderdataPython |
1763823 | """Unit tests for the SonarQube commented-out code collector."""
from .base import SonarQubeTestCase
class SonarQubeCommentedOutCodeTest(SonarQubeTestCase):
"""Unit tests for the SonarQube commented-out code collector."""
METRIC_TYPE = "commented_out_code"
async def test_commented_out_code(self):
"""Test that the number of lines with commented out code is returned."""
json = dict(total="2")
response = await self.collect(get_request_json_return_value=json)
self.assert_measurement(
response,
value="2",
total="100",
landing_url=f"{self.issues_landing_url}&rules=abap:S125,apex:S125,c:CommentedCode,cpp:CommentedCode,"
"flex:CommentedCode,csharpsquid:S125,javascript:CommentedCode,javascript:S125,kotlin:S125,"
"objc:CommentedCode,php:S125,plsql:S125,python:S125,scala:S125,squid:CommentedOutCodeLine,"
"java:S125,swift:S125,typescript:S125,Web:AvoidCommentedOutCodeCheck,xml:S125",
)
| StarcoderdataPython |
9705919 | <gh_stars>0
#!/usr/bin/python
# --------------------------------------------------------------------------------------------------
# Convert all Preferences from many separate tables per semester to a
# single table with additional semester Id.
#
#---------------------------------------------------------------------------------------------------
import sys,os,re
import MySQLdb
import Database
#---------------------------------------------------------------------------------------------------
# H E L P E R
#---------------------------------------------------------------------------------------------------
def makePreferencesTable(cursor,execute):
# test whether requested table exists already and if not make the table
# Prepare SQL query to test whether table exists
sql = "describe Preferences;"
try:
# Execute the SQL command
print " MYSQL> " + sql
if execute == "exec":
cursor.execute(sql)
print ' INFO -- table (Preferences) exists already.\n'
except:
print ' INFO - table (Preferences) does not yet exist.\n'
# Prepare SQL query to create the new table
sql = "create table Preferences" +\
"(Term char(5), Email char(40), Pref1 text, Pref2 text, Pref3 text);" + \
" alter table Preferences add constraint onePerTerm unique(Term, Email);"
try:
# Execute the SQL command
print " MYSQL> " + sql
if execute == "exec":
cursor.execute(sql)
except:
print ' ERROR - table creation failed.'
def findAllPreferencesTables(cursor):
tables = []
results = []
sql = "show tables like 'Preferences_____'"
try:
# Execute the SQL command
cursor.execute(sql)
results = cursor.fetchall()
except:
print ' ERROR - select failed: ' + sql
for row in results:
table = row[0]
tables.append(table)
return tables
def convertPreferencesTable(cursor,table,execute):
# convert all entries in the given table
# Prepare SQL query to insert record into the existing table
term = table[-5:]
sql = "select * from " + table + ";"
try:
# Execute the SQL command
cursor.execute(sql)
results = cursor.fetchall()
except:
print ' ERROR - select failed: ' + sql
return
for row in results:
email = row[0]
pref1 = row[1]
pref2 = row[2]
pref3 = row[3]
sql = "insert into Preferences values ('" + \
term + "','" + \
email + "','" + \
pref1 + "','" + \
pref2 + "','" + \
pref3 + "');"
try:
# Execute the SQL command
#print " MYSQL> " + sql
if execute == "exec":
cursor.execute(sql)
except:
print ' ERROR - insert failed: ' + sql
print ' --> %d in %s.\n'%(len(results),table)
return len(results)
#---------------------------------------------------------------------------------------------------
# M A I N
#---------------------------------------------------------------------------------------------------
usage = " usage: convertAllPreferences.py [ <execute = no> ]\n\n"
usage += " execute should we execute the insertion into the database\n"
usage += " activate by setting: execute = exec\n\n"
# Read command line arguments
execute = "no"
if len(sys.argv) > 1:
execute = sys.argv[1]
# Open database connection
db = Database.DatabaseHandle()
# Prepare a cursor object using cursor() method
cursor = db.getCursor()
# Create the new summary table
makePreferencesTable(cursor,execute)
# Find all tables to be converted
tables = findAllPreferencesTables(cursor)
# Loop over all old tables and convert
nEntries = 0
for table in tables:
#print ' Convert table: ' + table
nEntries += convertPreferencesTable(cursor,table,execute)
print '\n Converted %d entries in total.\n'%(nEntries)
# make sure to commit all changes
db.commit()
# disconnect from server
db.disco()
# exit
sys.exit()
| StarcoderdataPython |
4980888 | <gh_stars>1-10
"""
Task using configuration to get the subscriptions then
- Does not collect groups with no alias as they will be deleted
- Collect groups that have an alias not found in AAD
- Collect groups by age that meet certain thresh holds
- 30 days old - initial warning
- 60 days old - second warning
- 90 days old - final warning before delete
NOTE: This code will NOT delete any groups at this time
NOTE: All groups, regardless of alias tag are also tagged
with a lifetime object to tell us when it was first/last seen
and it's age (for the warnings listed above)
"""
import os
from datetime import datetime
import typing
import json
from microsoft.submaintenance.utils import(
Configuration,
AzLoginUtils,
PathUtils,
AzResourceGroupUtils,
AzRolesUtils
)
CONFIGURATION_FILE = "./configuration.json"
class Lifetime:
LIFETIME_TAG = "lifetime"
def __init__(self, existing:str = None):
self.first_seen = None
self.last_seen = None
self.scans = 0
self.age = 0
if existing:
existing = existing.replace("'", '"')
props = json.loads(existing)
if props:
for prop in props:
setattr(self, prop, props[prop])
def get_updated_tag_content(self):
return json.dumps(self.__dict__).replace('"', "'")
def get_lifetime_tag_update_text(self):
return "tags.{}=\"{}\"".format(Lifetime.LIFETIME_TAG, lifetime_obj.get_updated_tag_content())
@staticmethod
def update_group_lifetime_tag(lifetime_data:str) -> typing.Any: # Lifetime object
"""return the value of the lifetime tag to udpate"""
lifetime_obj = (Lifetime() if not lifetime else Lifetime(lifetime))
utc_now = datetime.utcnow()
current_date = "{}-{}-{}".format(utc_now.year, utc_now.month, utc_now.day)
lifetime_obj.scans += 1
lifetime_obj.last_seen = current_date
if not lifetime_obj.first_seen:
lifetime_obj.first_seen = current_date
else:
first = datetime.strptime(lifetime_obj.first_seen, "%Y-%m-%d")
last = datetime.strptime(lifetime_obj.last_seen, "%Y-%m-%d")
lifetime_obj.age = (last-first).days
return lifetime_obj
class ScanResult:
AGE_NO_WARNING = -1
AGE_WARNING_DAYS = 30
AGE_WARNING_2_DAYS = 60
AGE_DELETE_DAYS = 90
def __init__(self):
self.untagged_groups:typing.List[str] = []
self.unknown_alias:typing.List[typing.Dict[str,str]] = []
self.aged_groups:typing.Dict[int, typing.List[typing.Dict[str,str]]] = {}
def add_aged_group_data(self, age_warning:int, group_data:dict):
if age_warning == ScanResult.AGE_NO_WARNING:
return
if age_warning not in self.aged_groups:
self.aged_groups[age_warning] = []
self.aged_groups[age_warning].append(group_data)
@staticmethod
def get_age_warning(age:int) -> int:
if age >= ScanResult.AGE_WARNING_DAYS and age < ScanResult.AGE_WARNING_2_DAYS:
return ScanResult.AGE_WARNING_DAYS
if age >= ScanResult.AGE_WARNING_2_DAYS and age < ScanResult.AGE_DELETE_DAYS:
return ScanResult.AGE_WARNING_2_DAYS
if age >= ScanResult.AGE_DELETE_DAYS:
return ScanResult.AGE_DELETE_DAYS
return ScanResult.AGE_NO_WARNING
# Ensure a login and switch to SP if requested
try:
# SP doesn't have rights to AAD don't use one
AzLoginUtils.validate_login(None)
except Exception as ex:
print(str(ex))
quit()
# Get and validate the minimum on the configuration
configuration = Configuration(CONFIGURATION_FILE)
if not hasattr(configuration, "subscriptions") or len(configuration.subscriptions) == 0:
raise Exception("Update configuration.json with sub ids")
# Perform scan here
complete_results:typing.Dict[str, ScanResult] = {}
for subid in configuration.subscriptions:
print("Processing", subid)
scan_result = ScanResult()
complete_results[subid] = scan_result
groups = AzResourceGroupUtils.get_groups(subid)
for group in groups:
aliases = AzResourceGroupUtils.get_tag_content(group, "alias")
lifetime = AzResourceGroupUtils.get_tag_content(group, Lifetime.LIFETIME_TAG)
# Update/create a lifetime tag content object
lifetime_obj = Lifetime.update_group_lifetime_tag(lifetime)
AzResourceGroupUtils.update_group_tags(subid, group["name"], lifetime_obj.get_lifetime_tag_update_text())
# Update age warnings
age_warning = ScanResult.get_age_warning(lifetime_obj.age)
scan_result.add_aged_group_data(age_warning, { "group" : group["name"], "alias" : aliases})
# Action based on presence of alias tag
if not aliases:
scan_result.untagged_groups.append(group["name"])
else:
# A couple of them have multiple alias' in them so make
# sure we account for all of them (and not flag as unknown)
aliases = aliases.split(' ')
for alias in aliases:
# If the alias is NOT in AD then flag it
object_ret = AzRolesUtils.get_aad_user_info("{}<EMAIL>".<EMAIL>(alias))
if not object_ret:
scan_result.unknown_alias.append({ "group" : group["name"], "alias" : alias})
print("Dumping results....")
# Dump out results....probably to a disk
usable_path = PathUtils.ensure_path("./logs/aged_groups")
for sub_id in complete_results:
outputs = {}
# No need to report on untagged groups as they get deleted.
# if len(complete_results[sub_id].untagged_groups):
# outputs["Untagged Groups"] = complete_results[sub_id].untagged_groups
if len(complete_results[sub_id].unknown_alias):
outputs["Unknown Alias"] = complete_results[sub_id].unknown_alias
if len(complete_results[sub_id].aged_groups):
outputs["Age Warnings"] = complete_results[sub_id].aged_groups
file_path = os.path.join(usable_path, "{}.json".format(sub_id))
with open(file_path, "w") as output_file:
output_file.writelines(json.dumps(outputs, indent=4))
| StarcoderdataPython |
3494025 | import time
import random
import board
import adafruit_pyportal
# Get wifi details and more from a settings.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# Set up where we'll be fetching data from
DATA_SOURCE = "https://api.hackster.io/v2/projects?"
DATA_SOURCE += "client_id="+secrets['hackster_clientid']
DATA_SOURCE += "&client_secret="+secrets['hackster_secret']
IMAGE_LOCATION = ['records', 0, "cover_image_url"]
TITLE_LOCATION = ['records',0, "name"]
HID_LOCATION = ['records', 0, "hid"]
NUM_PROJECTS = 24
# determine the current working directory needed so we know where to find files
cwd = ("/"+__file__).rsplit('/', 1)[0]
pyportal = adafruit_pyportal.PyPortal(url=DATA_SOURCE,
json_path=(TITLE_LOCATION, HID_LOCATION),
image_json_path=IMAGE_LOCATION,
image_position=(0, 0),
image_resize=(320, 240),
status_neopixel=board.NEOPIXEL,
default_bg=cwd+"/hackster_background.bmp",
text_font=cwd+"/fonts/Arial-Bold-12.bdf",
text_position=((5, 5), (5, 200)),
text_color=(0xFF0000, 0xFF0000),
text_wrap=(40, 40))
pyportal.preload_font()
while True:
response = None
try:
response = pyportal.fetch()
print("Response is", response)
pyportal.set_text("http://hackster.com/project/"+response[1], 1)
except (IndexError, RuntimeError, ValueError) as e:
print("Some error occured, retrying! -", e)
# next thingy should be random!
thingy = random.randint(0, NUM_PROJECTS-1)
HID_LOCATION[1] = TITLE_LOCATION[1] = IMAGE_LOCATION[1] = thingy
time.sleep(60 * 3) # cycle every 3 minutes
| StarcoderdataPython |
12849485 | <filename>app/gws/server/spool.py
import gws
import importlib
def add(job):
uwsgi = importlib.import_module('uwsgi')
gws.log.info("SPOOLING", job.uid)
d = {b'job_uid': gws.as_bytes(job.uid)}
uwsgi.spool(d)
| StarcoderdataPython |
1675874 | <reponame>rob-opsi/freight<filename>freight/hooks/github.py
from __future__ import absolute_import
__all__ = ['GitHubHooks']
from flask import request, Response
from .base import Hook
class GitHubHooks(Hook):
def ok(self):
return Response()
def deploy(self, app, env):
payload = request.get_json()
event = request.headers.get('X-GitHub-Event')
if event != 'push':
# Gracefully ignore everything except push events
return self.ok()
default_ref = app.get_default_ref(env)
ref = payload['ref']
if ref != 'refs/heads/{}'.format(default_ref):
return self.ok()
head_commit = payload['head_commit']
if not head_commit:
# Deleting a branch is one case, not sure of others
return self.ok()
committer = head_commit['committer']
# If the committer is GitHub and the action was triggered from
# the web UI, ignore it and use the author instead
if committer['email'] == '<EMAIL>' and committer['username'] == 'web-flow':
committer = head_commit['author']
return self.client().post('/api/0/deploys/', data={
'env': env,
'app': app.name,
'ref': head_commit['id'],
'user': committer['email'],
})
| StarcoderdataPython |
1951113 | #!/usr/bin/env python3
import json
import os
import math
# Should we run commands that are generated (to escape and convert)
run_commands = True
release = True
workers = 14;
print_commands = True
recolor = False
gen_gif = False
reverse_gif = False;
rotate_gif_degrees = 0
gif_intermediate = "zoom_rotated.gif"
gif_output = "jc_circle_2.gif"
# Zoom / gif parameters
sample_config = {}
sample_config["cutoffs"] = [
20, 50, 90, 150, 250
]
sample_config["julia_set_param"] = [0.0, 0.0]
sampling_duration = 0
frame_count = 0
gif_frame_delay = 0 # hundreths of a second
view = {}
view["zoom"] = 0.34
view["center"] = [0, 0]
if release:
view["width"] = 2200
view["height"] = 2200
sample_config["samples"] = 10000
sample_config["warm_up_samples"] = 10
sampling_duration = 540
frame_count = 220
gif_frame_delay = 5
else:
view["width"] = 100
view["height"] = 100
sample_config["samples"] = 1000
sample_config["warm_up_samples"] = 100
sampling_duration = 5
frame_count = 20
gif_frame_delay = 12
sample_config["view"] = view
# Sampling Parameters
# File paths and such
draw_config_path = os.getcwd() + "/color.json"
output_dir = os.getcwd()
for frame_index in range(0, frame_count):
angle = ((frame_count - frame_index) / frame_count) * 2 * math.pi
m_r = math.cos(angle)
m_i = math.sin(angle)
message = "Frame: " + str(frame_index) \
+ " angle: " + str(angle)
working_dir = output_dir + "/frame_" + str(frame_index)
frame_histogram_path = working_dir + "/histogram.json"
frame_path = working_dir + "/frame.png"
if not recolor:
# Make working dir
os.mkdir(working_dir)
# In working dir, create sample_config
sample_config["mandelbrot_param"] = [m_r, m_i]
json.dumps(sample_config)
frame_sample_config_path = working_dir + "/sample.json"
frame_sample_config_file = open(frame_sample_config_path, "x")
frame_sample_config_file.write(json.dumps(sample_config))
frame_sample_config_file.close()
# Run sampling for desired duration
sample_command = "escape sample" \
+ " -c " + frame_sample_config_path \
+ " -w " + str(workers) \
+ " -d " + str(sampling_duration) \
+ " -o " + frame_histogram_path \
+ " -v off"
if print_commands:
print(sample_command)
if run_commands:
os.system(sample_command)
# Render samples
draw_command = "escape draw" \
+ " -c " + draw_config_path \
+ " -h " + frame_histogram_path \
+ " -o " + frame_path
if print_commands:
print(draw_command)
if run_commands:
os.system(draw_command)
# Once all frames are done, make a gif using convert
gif_command = "convert" \
+ " -loop 0" \
+ " -delay " + str(gif_frame_delay) \
+ " -dispose previous"
for i in range(0, frame_index):
frame_path = os.getcwd() + "/frame_" + str(i) + "/frame.png"
gif_command += " " + frame_path
if reverse_gif:
for i in range(1, frame_index - 1):
index = frame_index - ( i + 1 )
frame_path = os.getcwd() + "/frame_" + str(index) + "/frame.png"
gif_command += " " + frame_path
gif_command += " " + gif_intermediate
if print_commands:
print(gif_command)
if run_commands:
os.system(gif_command)
# We may want to rotate the gif
rotate_command = "convert" \
+ " " + gif_intermediate \
+ " -distort SRT " + str(rotate_gif_degrees) \
+ " " + gif_output
if print_commands and gen_gif:
print(rotate_command)
if run_commands:
os.system(rotate_command)
| StarcoderdataPython |
5048148 | from setuptools import find_packages
from setuptools import setup
readme = open('README.rst').read()
history = open('CHANGES.txt').read()
long_description = readme + '\n\n' + history
setup(name='Products.mcdutils',
version='3.3.dev0',
description=('A Zope product with memcached-backed ZCache and '
'Zope session implementations.'),
long_description=long_description,
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Web Environment',
'Framework :: Zope',
'Framework :: Zope :: 4',
'Framework :: Zope :: 5',
'Intended Audience :: Developers',
'License :: OSI Approved :: Zope Public License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP :: Session'],
keywords='session memcache memcached Products',
author='<NAME> and contributors',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://mcdutils.readthedocs.io',
project_urls={
'Documentation': 'https://mcdutils.readthedocs.io',
'Issue Tracker': ('https://github.com/dataflake/Products.mcdutils'
'/issues'),
'Sources': 'https://github.com/dataflake/Products.mcdutils',
},
license='ZPL 2.1',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
namespace_packages=['Products'],
zip_safe=False,
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
install_requires=[
'setuptools',
'six',
'python-memcached',
'Zope >4',
],
extras_require={
'docs': ['repoze.sphinx.autointerface', 'Sphinx'],
},
)
| StarcoderdataPython |
1746894 | <reponame>gadgetlabs/reinforcementlearningrobot
#!/usr/bin/env python3
import os
import asyncio
import json
class SensorNotFound(Exception):
pass
class ActuatorNotFound(Exception):
pass
class BehaviourNotFound(Exception):
pass
def main(config):
loop = asyncio.get_event_loop()
sensor_queue = asyncio.Queue(loop=loop)
actuator_queue = asyncio.Queue(loop=loop)
try:
module = __import__("behaviours")
behaviour_class = getattr(module, config["Behaviour"])
loop.create_task(behaviour_class(sensor_queue, actuator_queue))
except BehaviourNotFound:
exit(-1)
sensors = config["Sensors"]
actuators = config["Actuators"]
for sensor in sensors:
# Instanstiate the sensor and pass the sensor queue
try:
module = __import__("sensors")
sensor_class = getattr(module, sensor["name"])
loop.create_task(sensor_class(sensor_queue))
except SensorNotFound:
exit(-1)
for actuator in actuators:
# Instanstiate the sensor and pass the sensor queue
try:
module = __import__("actuators")
actuator_class = getattr(module, actuator["name"])
loop.create_task(actuator_class(actuator_queue))
except ActuatorNotFound:
exit(-1)
loop.run_forever()
loop.close()
if __name__ == '__main__':
CONFIG_FILE = "config.json"
if not os.path.exists(CONFIG_FILE):
raise FileExistsError
config = json.load(CONFIG_FILE)
main(config)
| StarcoderdataPython |
3408987 | """
DESAFIO 063: Sequência de Fibonacci v1.0
Escreva um programa que leia um número n inteiro qualquer e mostre
na tela os n primeiros elementos de uma Sequência de Fibonacci.
Ex: 0 → 1 → 1 → 2 → 3 → 5 → 8
"""
"""
# Feito com for
x = 1
y = 0
n = int(input('Digite quantos primeiros elementos da Sequência de Fibonacci você quer exibir: '))
if n > 0:
if n == 1:
print('0')
else:
print('0', end=' → ')
for i in range(1, n):
z = x + y
if i == n - 1:
print(z, end='')
else:
print(z, end=' → ')
x = y
y = z
"""
# Feito com while
contador = 1
x = 1
y = 0
n = int(input('Digite quantos primeiros elementos da Sequência de Fibonacci você quer exibir: '))
if n > 0:
if n == 1:
print('0')
else:
print('0', end=' → ')
while contador < n:
z = x + y
if contador == n - 1:
print(z, end='')
else:
print(z, end=' → ')
x = y
y = z
contador += 1
| StarcoderdataPython |
9697744 | <reponame>pkingpeng/-python-
"""
https://stackoverflow.com/questions/36721232/importerror-cannot-import-name-get-column-letter
"""
import openpyxl
from openpyxl.utils import get_column_letter, column_index_from_string
print(get_column_letter(1))
print(get_column_letter(2))
print(get_column_letter(27))
print(get_column_letter(900))
wb = openpyxl.load_workbook('../resource/excel/example.xlsx')
sheet = wb['Sheet1']
print(sheet.max_column)
print(get_column_letter(sheet.max_column))
print(column_index_from_string('A'))
print(column_index_from_string('AA'))
"""
A
B
AA
AHP
3
C
1
27
"""
| StarcoderdataPython |
3526310 | import requests
import json
import logging
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.resource import ResourceManagementClient
from dku_utils.access import _is_none_or_blank
AZURE_METADATA_SERVICE="http://169.254.169.254"
INSTANCE_API_VERSION = "2019-04-30"
def run_and_process_cloud_error(fn):
try:
return fn()
except CloudError as e:
raise Exception('%s : %s' % (str(e), e.response.content))
except Exception as e:
raise e
def get_instance_metadata(api_version=INSTANCE_API_VERSION):
"""
Return VM metadata.
"""
metadata_svc_endpoint = "{}/metadata/instance?api-version={}".format(AZURE_METADATA_SERVICE, api_version)
req = requests.get(metadata_svc_endpoint, headers={"metadata": "true"}, proxies={"http":None, "http":None})
resp = req.json()
return resp
def get_subscription_id(connection_info):
identity_type = connection_info.get('identityType', None)
subscription_id = connection_info.get('subscriptionId', None)
if (identity_type == 'default' or identity_type == 'service-principal') and not _is_none_or_blank(subscription_id):
return subscription_id
else:
return get_instance_metadata()["compute"]["subscriptionId"]
def get_vm_resource_id(subscription_id=None,
resource_group=None,
vm_name=None):
"""
Return full resource ID given a VM's name.
"""
return "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}".format(subscription_id, resource_group, vm_name)
def get_subnet_id(connection_info, resource_group, vnet, subnet):
"""
"""
logging.info("Mapping subnet {} to its full resource ID...".format(subnet))
if vnet.startswith("/subscriptions/"):
logging.info("Vnet is specified by its full resource ID: {}".format(vnet))
subnet_id = "{}/subnets/{}".format(vnet, subnet)
else:
logging.info("Vnet is specified by its name: {}".format(vnet))
subscription_id = get_subscription_id(connection_info)
subnet_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}".format(subscription_id,
resource_group,
vnet,
subnet)
logging.info("Subnet {} linked to the resource {}".format(subnet, subnet_id))
return subnet_id
def get_host_network(credentials=None, resource_group=None, connection_info=None, api_version="2019-07-01"):
"""
Return the VNET and subnet id of the DSS host.
"""
logging.info("Getting instance metadata...")
vm_name = get_instance_metadata()["compute"]["name"]
logging.info("DSS host is on VNET {}".format(vm_name))
subscription_id = get_subscription_id(connection_info)
vm_resource_id = get_vm_resource_id(subscription_id, resource_group, vm_name)
resource_mgmt_client = ResourceManagementClient(credential=credentials, subscription_id=subscription_id, api_version=api_version)
vm_properties = resource_mgmt_client.resources.get_by_id(vm_resource_id, api_version=api_version).properties
vm_network_interfaces = vm_properties["networkProfile"]["networkInterfaces"]
if len(vm_network_interfaces) > 1:
print("WARNING: more than 1 network interface detected, will use 1st one on list to retrieve IP configuration info")
network_interface_id = vm_network_interfaces[0]["id"]
network_interface_properties = resource_mgmt_client.resources.get_by_id(network_interface_id, api_version=api_version).properties
ip_configs = network_interface_properties["ipConfigurations"]
if len(ip_configs) > 1:
print("WARNING: more than 1 IP config detected for this interface, will use 1st one on the list to retrieve VNET/subnet info")
subnet_id = ip_configs[0]["properties"]["subnet"]["id"]
vnet = subnet_id.split("virtualNetworks")[1].split('/')[1]
logging.info("VNET: {}".format(vnet))
logging.info("SUBNET ID: {}".format(subnet_id))
return vnet, subnet_id
| StarcoderdataPython |
3340431 | <filename>safe_transaction_service/tokens/clients/zerion_client.py
from dataclasses import dataclass
from typing import List, Optional
from eth_typing import ChecksumAddress
from web3.exceptions import ContractLogicError
from gnosis.eth import EthereumClient
from gnosis.eth.constants import NULL_ADDRESS
@dataclass
class UniswapComponent:
address: str
tokenType: str # `ERC20` by default
rate: str # price per full share (1e18)
@dataclass
class ZerionPoolMetadata:
address: ChecksumAddress
name: str
symbol: str
decimals: int
class ZerionTokenAdapterClient:
"""
Client for Zerion Token Adapter
https://github.com/zeriontech/defi-sdk
"""
ABI = [
{
"inputs": [{"internalType": "address", "name": "token", "type": "address"}],
"name": "getComponents",
"outputs": [
{
"components": [
{"internalType": "address", "name": "token", "type": "address"},
{
"internalType": "string",
"name": "tokenType",
"type": "string",
},
{"internalType": "uint256", "name": "rate", "type": "uint256"},
],
"internalType": "struct Component[]",
"name": "",
"type": "tuple[]",
}
],
"stateMutability": "view",
"type": "function",
},
{
"inputs": [{"internalType": "address", "name": "token", "type": "address"}],
"name": "getMetadata",
"outputs": [
{
"components": [
{"internalType": "address", "name": "token", "type": "address"},
{"internalType": "string", "name": "name", "type": "string"},
{"internalType": "string", "name": "symbol", "type": "string"},
{"internalType": "uint8", "name": "decimals", "type": "uint8"},
],
"internalType": "struct TokenMetadata",
"name": "",
"type": "tuple",
}
],
"stateMutability": "view",
"type": "function",
},
]
ADAPTER_ADDRESS: ChecksumAddress = ChecksumAddress(NULL_ADDRESS)
def __init__(
self,
ethereum_client: EthereumClient,
adapter_address: Optional[ChecksumAddress] = None,
):
self.ethereum_client = ethereum_client
self.adapter_address = (
adapter_address if adapter_address else self.ADAPTER_ADDRESS
)
self.contract = ethereum_client.w3.eth.contract(
self.adapter_address, abi=self.ABI
)
def get_components(
self, token_address: ChecksumAddress
) -> Optional[List[UniswapComponent]]:
try:
return [
UniswapComponent(*component)
for component in self.contract.functions.getComponents(
token_address
).call()
]
except ContractLogicError:
return None
def get_metadata(
self, token_address: ChecksumAddress
) -> Optional[ZerionPoolMetadata]:
try:
return ZerionPoolMetadata(
*self.contract.functions.getMetadata(token_address).call()
)
except ContractLogicError:
return None
class ZerionUniswapV2TokenAdapterClient(ZerionTokenAdapterClient):
ADAPTER_ADDRESS: ChecksumAddress = ChecksumAddress(
"0x6C5D49157863f942A5E6115aaEAb7d6A67a852d3"
)
class BalancerTokenAdapterClient(ZerionTokenAdapterClient):
ADAPTER_ADDRESS: ChecksumAddress = ChecksumAddress(
"0xb45c5AE417F70E4C52DFB784569Ce843a45FE8ca"
)
| StarcoderdataPython |
179701 | # Generated by Django 2.2.1 on 2019-05-16 20:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('IoT_DataMgmt', '0069_auto_20190514_1740'),
]
operations = [
migrations.AlterModelOptions(
name='equipmentfacility',
options={'ordering': ('name',), 'verbose_name_plural': 'equipment_facilities'},
),
]
| StarcoderdataPython |
4847176 | <filename>course_project/K33401/Kumpan_Viktor/Django_Note_service/src/api/tests/tests_views.py<gh_stars>1-10
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from notes.models import Note
User = get_user_model()
class NoteViewTest(TestCase):
def setUp(self):
self.test_user1 = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>")
self.test_user2 = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>")
self.n = 5
self.notes = []
for i in range(self.n):
self.notes.append(Note.objects.create(
title=f"Note title {i}",
body="Note body",
owner=self.test_user1))
self.test_user2_note = Note.objects.create(
title="Note title",
body="Note body",
owner=self.test_user2)
self.client = APIClient()
def _authenticate(self):
response = self.client.post('/api/jwt-auth/', {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}, format="json")
token = response.json()["token"]
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
def test_api_can_get_note_list(self):
self._authenticate()
response = self.client.get(reverse('api:note-list'))
self.assertEquals(len(response.json()), self.n)
def test_api_can_get_note_detail(self):
self._authenticate()
pk = self.notes[0].id
response = self.client.get(reverse('api:note-detail', kwargs={"pk": pk}))
note = response.json()
self.assertEquals(note["id"], self.notes[0].id)
self.assertEquals(note["title"], self.notes[0].title)
self.assertEquals(note["body"], self.notes[0].body)
def test_api_can_create_note(self):
self._authenticate()
response = self.client.post(reverse('api:create'),
{
"title": "New note title",
"body": "New note body",
"tags": ['123'],
}, format="json")
note = Note.objects.filter(owner=self.test_user1).last()
self.assertEquals(Note.objects.filter(owner=self.test_user1).count(),
self.n + 1)
self.assertEquals(note.title, "New note title")
self.assertEquals(note.body, "New note body")
def test_api_can_update_note(self):
self._authenticate()
pk = self.notes[0].id
response = self.client.put(reverse('api:reload', kwargs={"pk": pk}),
{
"title": "Note title updated",
"body": "Note body updated",
"tags": ["123"],
}, format="json")
note = Note.objects.get(pk=pk)
self.assertEquals(note.title, "Note title updated")
self.assertEquals(note.body, "Note body updated")
def test_api_can_delete_note(self):
self._authenticate()
pk = self.notes[0].id
response = self.client.delete(reverse('api:note-detail', kwargs={"pk": pk}))
self.assertEquals(Note.objects.filter(owner=self.test_user1).count(),
self.n-1)
def test_api_only_owner_can_get_note_detail(self):
self._authenticate()
response = self.client.get(reverse('api:note-detail',
kwargs={"pk": self.test_user2_note.id}))
self.assertEquals(response.json()["detail"], "Not found.")
def test_api_only_owner_can_update_note(self):
self._authenticate()
pk = self.test_user2_note.id
response = self.client.put(reverse('api:note-detail', kwargs={"pk": pk}),
{
"title": "Note title updated",
"body": "Note body updated"
}, format="json")
note = Note.objects.get(pk=pk)
self.assertEquals(response.json()["detail"], "Not found.")
self.assertEquals(note.title, "Note title")
self.assertEquals(note.body, "Note body")
def test_api_only_owner_can_delete_note(self):
self._authenticate()
pk = self.test_user2_note.id
response = self.client.delete(reverse('api:note-detail', kwargs={"pk": pk}))
self.assertEquals(response.json()["detail"], "Not found.")
self.assertEquals(Note.objects.filter(owner=self.test_user2).count(), 1)
| StarcoderdataPython |
3547915 | import sys
from awsglue.dynamicframe import DynamicFrame
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrame
data_sheets = ['volts', 'amps', 'watts', 'power_factor', 'watt_hours']
args = getResolvedOptions(sys.argv, ['JOB_NAME',
'database',
'ingest_table',
'target_bucket',
'TempDir'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
DataSource0 = glueContext.create_dynamic_frame.from_catalog(database=args['database'],
table_name=args['ingest_table'],
transformation_ctx="DataSource0")
relationalize_json = DataSource0.relationalize(root_table_name="root",
staging_path=args['TempDir'])
root_df = relationalize_json.select('root')
root_vals_df = relationalize_json.select('root_values')
root_df = root_df.toDF()
root_vals_df = root_vals_df.toDF()
joined_data = root_df.join(root_vals_df, (root_df.values == root_vals_df.id),
how="left_outer")
joined_data.createOrReplaceTempView("join_table")
columns = []
for i, d in enumerate(data_sheets):
sql = """SELECT `values.val.timestamp` as timestamp_{},
case when `property_type` = 'INTEGER' then `values.val.value.int` else `values.val.value.double` end as {}
FROM join_table
where `name` = '{}'""".format(i, d, d)
current_table = spark.sql(sql)
columns.append(current_table)
final_table = columns[0].join(columns[1], (columns[0].timestamp_0 == columns[1].timestamp_1), how="left_outer")
final_table = final_table.join(columns[2], (final_table.timestamp_0 == columns[2].timestamp_2), how="left_outer")
final_table = final_table.join(columns[3], (final_table.timestamp_0 == columns[3].timestamp_3), how="left_outer")
final_table = final_table.join(columns[4], (final_table.timestamp_0 == columns[4].timestamp_4), how="left_outer")
final_table.createOrReplaceTempView("final_table")
sql = """select `timestamp_0` as timestamp, `volts`, `amps`, `watts`, `power_factor`, `watt_hours`
from final_table
order by `timestamp_0` desc"""
final_table = spark.sql(sql)
final_frame = DynamicFrame.fromDF(final_table, glueContext, "final_table")
final_frame_repartitioned = final_frame.repartition(1)
DatasinkFinal = glueContext.write_dynamic_frame.from_options(frame=final_frame_repartitioned,
format_options={'withHeader': True,
'writeHeader': True},
connection_type="s3",
format="csv",
connection_options={'path': args['target_bucket'],
'partitionKeys': []},
transformation_ctx="DataSinkFinal")
job.commit() | StarcoderdataPython |
85414 | # Django settings for example project.
import os
from django.contrib.messages import constants as message_constants
PROJECT_DIR, PROJECT_MODULE_NAME = os.path.split(
os.path.dirname(os.path.abspath(__file__))
)
def env(name, default):
return os.environ.get(name, default)
DEBUG = True
SECRET_KEY = <KEY>'
SENDFILE_BACKEND = 'sendfile.backends.development'
THUMBNAIL_DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'bootstrapform',
'example',
'submit',
'example.tasks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'submit_example',
'USER': 'submit_example',
'PASSWORD': '',
},
}
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'sk-SK'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Bratislava'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
LOCALE_PATHS = (
os.path.join(PROJECT_DIR, PROJECT_MODULE_NAME, 'locale'),
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_DIR, PROJECT_MODULE_NAME, 'static')
AUTH_USER_MODEL = 'auth.User'
# Bootstrap classes for messages
MESSAGE_TAGS = {
message_constants.DEBUG: 'alert-debug',
message_constants.INFO: 'alert-info',
message_constants.SUCCESS: 'alert-success',
message_constants.WARNING: 'alert-warning',
message_constants.ERROR: 'alert-danger',
}
# Task statements
TASKS_DEFAULT_SUBMIT_RECEIVER_TEMPLATE = 'source'
# Submit app
SUBMIT_IS_SUBMIT_ACCEPTED = 'example.submit_configuration.is_submit_accepted'
SUBMIT_FORM_SUCCESS_MESSAGE = 'example.submit_configuration.form_success_message'
SUBMIT_PREFETCH_DATA_FOR_SCORE_CALCULATION = 'example.submit_configuration.prefetch_data_for_score_calculation'
SUBMIT_DISPLAY_SCORE = 'example.submit_configuration.display_score'
JUDGE_DEFAULT_INPUTS_FOLDER_FOR_RECEIVER = 'example.submit_configuration.default_inputs_folder_at_judge'
SUBMIT_CAN_POST_SUBMIT = 'example.submit_configuration.can_post_submit'
SUBMIT_TASK_MODEL = 'tasks.Task'
SUBMIT_PATH = env('SUBMIT_PATH', os.path.join(PROJECT_DIR, 'submit'))
JUDGE_INTERFACE_IDENTITY = env('JUDGE_INTERFACE_IDENTITY', 'EXAMPLE')
JUDGE_ADDRESS = env('JUDGE_ADDRESS', '127.0.0.1')
JUDGE_PORT = int(env('JUDGE_PORT', 12347))
# Debug toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
('debug_toolbar.middleware.DebugToolbarMiddleware',) +
MIDDLEWARE_CLASSES
)
INTERNAL_IPS = ('127.0.0.1',)
| StarcoderdataPython |
4895356 | <reponame>Tomcli/kfp-tekton
# Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp.components import func_to_container_op
# Stabilizing the test output
class StableIDGenerator:
def __init__(self, ):
self._index = 0
def get_next_id(self, ):
self._index += 1
return '{code:0{num_chars:}d}'.format(code=self._index, num_chars=kfp.dsl._for_loop.LoopArguments.NUM_CODE_CHARS)
kfp.dsl.ParallelFor._get_unique_id_code = StableIDGenerator().get_next_id
@func_to_container_op
def produce_str() -> str:
return "Hello"
@func_to_container_op
def produce_list_of_dicts() -> list:
return ({"aaa": "aaa1", "bbb": "bbb1"}, {"aaa": "aaa2", "bbb": "bbb2"})
@func_to_container_op
def produce_list_of_strings() -> list:
return ("a", "z")
@func_to_container_op
def produce_list_of_ints() -> list:
return (1234567890, 987654321)
@func_to_container_op
def consume(param1):
print(param1)
@kfp.dsl.pipeline()
def parallelfor_item_argument_resolving():
produce_str_task = produce_str()
produce_list_of_strings_task = produce_list_of_strings()
produce_list_of_ints_task = produce_list_of_ints()
produce_list_of_dicts_task = produce_list_of_dicts()
with kfp.dsl.ParallelFor(produce_list_of_strings_task.output) as loop_item:
consume(produce_list_of_strings_task.output)
consume(loop_item)
consume(produce_str_task.output)
with kfp.dsl.ParallelFor(produce_list_of_ints_task.output) as loop_item:
consume(produce_list_of_ints_task.output)
consume(loop_item)
with kfp.dsl.ParallelFor(produce_list_of_dicts_task.output) as loop_item:
consume(produce_list_of_dicts_task.output)
# consume(loop_item) # Cannot use the full loop item when it's a dict
consume(loop_item.aaa)
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(parallelfor_item_argument_resolving, __file__.replace('.py', '.yaml'))
| StarcoderdataPython |
5023256 | #entrada
while True:
try:
entrada = str(input()).split()
#processamento
binN1 = bin(int(entrada[0]))[2:]
binN2 = bin(int(entrada[1]))[2:]
tamBinN1 = len(binN1)
tamBinN2 = len(binN2)
result = ''
resto = '' #completando o numero binario para que possua 32 bits
for i in range(0, 32 - tamBinN1):
resto = resto + '0'
binN1 = resto + binN1
resto = ''
for i in range(0, 32 - tamBinN2):
resto = resto + '0'
binN2 = resto + binN2
for i in range(0, 32): #soma equivocada no modo Mofiz, sem o Carry
if binN1[i] == '1' and binN2[i] == '1':
result = result + '0'
elif binN1[i] == '1' or binN2[i] == '1':
result = result + '1'
else:
result = result + '0'
resultDecimal = int(result, 2)
#saida
print(resultDecimal)
except IndexError or EOFError: #para ser aceita no URI tire o IndexError, mantenha apenas o EOFError
break
| StarcoderdataPython |
4853385 | from pos_parameters import filename_parameter, value_parameter, \
string_parameter, list_parameter,\
vector_parameter
import pos_wrappers
class preprocess_slice_volume(pos_wrappers.generic_wrapper):
_template = """pos_slice_volume \
-i {input_image} \
-o "{output_naming}" \
-s {slicing_plane} \
-r {start_slice} {end_slice} {step} \
{shift_indexes}"""
_parameters = { \
'input_image' : filename_parameter('input_image', None),
'output_naming' : filename_parameter('output_naming', None),
'slicing_plane' : value_parameter('slicing_plane', 1),
'start_slice' : value_parameter('start_slice', None),
'end_slice' : value_parameter('end_slice', None),
'step' : value_parameter('step', 1),
'shift_indexes' : value_parameter('output-filenames-offset', None, str_template="--{_name} {_value}"),
'output_dir' : string_parameter('output_dir', None),
}
class blank_slice_deformation_wrapper(pos_wrappers.generic_wrapper):
_template = """c{dimension}d {input_image} -scale 0 -dup -omc {dimension} {output_image}"""
_parameters = {\
'dimension' : value_parameter('dimension', 2),
'input_image' : filename_parameter('input_image', None),
'output_image' : filename_parameter('output_image', None),
}
class convert_slice_parent(pos_wrappers.generic_wrapper):
_template = """ -- stub -- """
_parameters = {
'dimension' : value_parameter('dimension', 2),
'input_image' : filename_parameter('input_image', None),
'output_image' : filename_parameter('output_image', None),
'scaling' : value_parameter('scaling', None, "-scale {_value}"),
'spacing' : vector_parameter('spacing', None, '-spacing {_list}mm')
}
class convert_slice_image(convert_slice_parent):
_template = """c{dimension}d -mcs {input_image}\
-foreach {spacing} {scaling} -endfor \
-omc {dimension} {output_image}"""
class convert_slice_image_grayscale(convert_slice_parent):
_template = """c{dimension}d {input_image}\
{spacing} {scaling}\
-o {output_image}"""
| StarcoderdataPython |
1653716 | <filename>tests/test_users.py<gh_stars>10-100
# pylint: disable=redefined-outer-name,unused-variable
from unittest import mock
import pytest
from tinkoff.invest.services import UsersService
@pytest.fixture()
def users_service():
return mock.create_autospec(spec=UsersService)
def test_get_accounts(users_service):
response = users_service.get_accounts() # noqa: F841
users_service.get_accounts.assert_called_once()
def test_get_margin_attributes(users_service):
response = users_service.get_margin_attributes( # noqa: F841
account_id=mock.Mock(),
)
users_service.get_margin_attributes.assert_called_once()
def test_get_user_tariff(users_service):
response = users_service.get_user_tariff() # noqa: F841
users_service.get_user_tariff.assert_called_once()
def test_get_info(users_service):
response = users_service.get_info() # noqa: F841
users_service.get_info.assert_called_once()
| StarcoderdataPython |
3594433 | import subprocess
def audiveris(input_path, output_path):
subprocess.call(["sudo","docker","run","--rm",\
"-v", output_path+":/output",
"-v", input_path+":/input",\
"toprock/audiveris"]) | StarcoderdataPython |
5199467 | <gh_stars>0
from scipy.spatial import distance
import imutils
from imutils import face_utils
import dlib
import cv2 as cv
def eye_aspect_ratio(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
thresh = 0.25
frame_check = 60
detect = dlib.get_frontal_face_detector()
predict = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS['left_eye']
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS['right_eye']
cap = cv.VideoCapture(0)
flag = 0
while True:
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
subjects = detect(gray, 0)
for subject in subjects:
shape = predict(gray, subject)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv.convexHull(leftEye)
rightEyeHull = cv.convexHull(rightEye)
cv.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
if ear < thresh:
flag += 1
print(flag)
if flag >= frame_check:
cv.putText(frame, "*********************************ALERT!!*****************************************", (10, 30),
cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv.putText(frame, "*********************************ALERT!!*****************************************",(10, 425),
cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
else:
flag = 0
cv.imshow("Frame", frame)
key = cv.waitKey(1) & 0xFF
if key == ord("q"):
break
cv.destroyAllWindows()
| StarcoderdataPython |
3424732 | # Desenvolva um programa que leia as duas notas de um aluno e calcule e mostre sua média
Nome = (input('Digite seu nome:'))
nota1 = float(input('Digite o valor da nota 1:'))
nota2 = float(input('Digite o valor da nota 2:'))
media = (nota1 + nota2) / 2
print('A média do aluno \033[32m{}\033[m \033[36m{}\033[m'.format(Nome, media))
| StarcoderdataPython |
6600643 | <reponame>braincodercn/OpenStock<filename>RoadMap/graph.py
import graphviz
from graphviz import Digraph
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
def make_graph():
g = Digraph("Roadmap_of_Task_based_Framework")
g.node('System')
g.node('Data Sources')
g.node('Manager')
g.node('App Layer')
g.view()
def main(argv):
del argv
make_graph()
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
106471 | <reponame>dpopadic/ml-res<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
import re
from nltk import PorterStemmer
def plotData(X, y):
# PLOTDATA(x,y) plots the data points with + for the positive examples
# and o for the negative examples. X is assumed to be a Mx2 matrix.
# Note: This was slightly modified such that it expects y = 1 or y = 0
# Find Indices of Positive and Negative Examples
y = y.flatten()
pos = y==1
neg = y==0
# Plot Examples
plt.plot(X[:,0][pos], X[:,1][pos], "k+", markersize=10)
plt.plot(X[:,0][neg], X[:,1][neg], "yo", markersize=10)
plt.show(block=False)
def linearKernel(x1, x2):
# sim = linearKernel(x1, x2) returns a linear kernel between x1 and x2
# and returns the value in sim
# Compute the kernel
sim = np.dot(x1, x2.T)
return sim
def gaussianKernel(x1, x2, sigma=0.1):
# sim = gaussianKernel(x1, x2) returns a gaussian kernel between x1 and x2
# and returns the value in sim
# Ensure that x1 and x2 are column vectors
x1 = x1.flatten()
x2 = x2.flatten()
# You need to return the following variables correctly.
sim = 0
sim = np.exp(- np.sum(np.power((x1 - x2), 2)) / float(2 * (sigma ** 2)))
return sim
def gaussianKernelGramMatrix(X1, X2, K_function=gaussianKernel, sigma=0.1):
"""(Pre)calculates Gram Matrix K"""
gram_matrix = np.zeros((X1.shape[0], X2.shape[0]))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
gram_matrix[i, j] = K_function(x1, x2, sigma)
return gram_matrix
def svmTrain(X, y, C, kernelFunction, tol=1e-3, max_passes=-1, sigma=0.1):
"""Trains an SVM classifier"""
y = y.flatten() # prevents warning
# alternative to emulate mapping of 0 -> -1 in svmTrain.m
# but results are identical without it
# also need to cast from unsigned int to regular int
# otherwise, contour() in visualizeBoundary.py doesn't work as expected
# y = y.astype("int32")
# y[y==0] = -1
if kernelFunction == "gaussian":
clf = svm.SVC(C = C, kernel="precomputed", tol=tol, max_iter=max_passes, verbose=2)
return clf.fit(gaussianKernelGramMatrix(X,X, sigma=sigma), y)
else: # works with "linear", "rbf"
clf = svm.SVC(C = C, kernel=kernelFunction, tol=tol, max_iter=max_passes, verbose=2)
return clf.fit(X, y)
def visualizeBoundaryLinear(X, y, model):
# VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary
# learned by the SVM and overlays the data on it
# plot decision boundary
w = model.coef_[0]
b = model.intercept_[0]
xp = np.linspace(X[:,0].min(), X[:,0].max(), 100)
yp = - (w[0] * xp + b) / w[1]
plt.plot(xp, yp, 'b-')
plotData(X, y)
def visualizeBoundary(X, y, model, varargin=0):
# VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision
# boundary learned by the SVM and overlays the data on it
# Plot the training data on top of the boundary
plotData(X, y)
# Make classification predictions over a grid of values
x1plot = np.linspace(X[:,0].min(), X[:,0].max(), 100).T
x2plot = np.linspace(X[:,1].min(), X[:,1].max(), 100).T
X1, X2 = np.meshgrid(x1plot, x2plot)
vals = np.zeros(X1.shape)
for i in range(X1.shape[1]):
this_X = np.column_stack((X1[:, i], X2[:, i]))
vals[:, i] = model.predict(gaussianKernelGramMatrix(this_X, X))
# Plot the SVM boundary
plt.contour(X1, X2, vals, colors="blue", levels=[0,0])
plt.show(block=False)
def dataset3Params(X, y, Xval, yval):
# [C, sigma] = EX6PARAMS(X, y, Xval, yval) returns your choice of C and
# sigma. You should complete this function to return the optimal C and
# sigma based on a cross-validation set.
# You need to return the following variables correctly.
sigma = 0.3
C = 1
# determining best C and sigma
# need x1 and x2, copied from ex6.py
x1 = [1, 2, 1]
x2 = [0, 4, -1]
# vector with all predictions from SVM
predictionErrors = np.zeros((64, 3))
predictionsCounter = 0
# iterate over values of sigma and C
for sigma in [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]:
for C in [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]:
print(sigma + " " + C)
# train model on training corpus with current sigma and C
model = svmTrain(X, y, C, "gaussian", sigma=sigma)
# compute predictions on cross-validation set
predictions = model.predict(gaussianKernelGramMatrix(Xval, X))
# compute prediction errors on cross-validation set
predictionErrors[predictionsCounter, 0] = np.mean((predictions != yval).astype(int))
# store corresponding C and sigma
predictionErrors[predictionsCounter, 1] = sigma
predictionErrors[predictionsCounter, 2] = C
# move counter up by one
predictionsCounter = predictionsCounter + 1
print(predictionErrors)
# calculate mins of columns with their indexes
row = predictionErrors.argmin(axis=0)
m = np.zeros(row.shape)
for i in range(len(m)):
m[i] = predictionErrors[row[i]][i]
# note that row[0] is the index of the min of the first column
# and that the first column corresponds to the error,
# so the row at predictionErrors(row(1),:) has best C and sigma
print(predictionErrors[row[0], 1])
print(predictionErrors[row[0], 2])
# get C and sigma form such row
sigma = predictionErrors[row[0], 1]
C = predictionErrors[row[0], 2]
return C, sigma
def readFile(filename):
# file_contents = READFILE(filename) reads a file and returns its entire
# contents in file_contents
# Load File
try:
with open(filename, 'r') as openFile:
file_contents = openFile.read()
except:
file_contents = ''
print('Unable to open {:s}'.format(filename))
return file_contents
def getVocabList():
# vocabList = GETVOCABLIST() reads the fixed vocabulary list in vocab.txt
# and returns a cell array of the words in vocabList.
## Read the fixed vocabulary list
with open('ml/ex6/data/vocab.txt', 'r') as vocabFile:
# Store all dictionary words in dictionary vocabList
vocabList = {}
for line in vocabFile.readlines():
i, word = line.split()
vocabList[word] = int(i)
return vocabList
def processEmail(email_contents):
# word_indices = PROCESSEMAIL(email_contents) preprocesses
# the body of an email and returns a list of indices of the
# words contained in the email.
# Load Vocabulary
vocabList = getVocabList()
# Init return value
word_indices = []
# ========================== Preprocess Email ===========================
# Find the Headers ( \n\n and remove )
# Uncomment the following lines if you are working with raw emails with the
# full headers
# hdrstart = email_contents.find("\n\n")
# if hdrstart:
# email_contents = email_contents[hdrstart:]
# Lower case
email_contents = email_contents.lower()
# Strip all HTML
# Looks for any expression that starts with < and ends with > and replace
# and does not have any < or > in the tag it with a space
email_contents = re.sub('<[^<>]+>', ' ', email_contents)
# Handle Numbers
# Look for one or more characters between 0-9
email_contents = re.sub('[0-9]+', 'number', email_contents)
# Handle URLS
# Look for strings starting with http:// or https://
email_contents = re.sub('(http|https)://[^\s]*', 'httpaddr', email_contents)
# Handle Email Addresses
# Look for strings with @ in the middle
email_contents = re.sub('[^\s]+@[^\s]+', 'emailaddr', email_contents)
# Handle $ sign
email_contents = re.sub('[$]+', 'dollar', email_contents)
# ========================== Tokenize Email ===========================
# Output the email to screen as well
print('\n==== Processed Email ====\n\n')
# Process file
l = 0
# Slightly different order from matlab version
# Split and also get rid of any punctuation
# regex may need further debugging...
email_contents = re.split(r'[@$/#.-:&\*\+=\[\]?!(){},\'\'\">_<;%\s\n\r\t]+', email_contents)
for token in email_contents:
# Remove any non alphanumeric characters
token = re.sub('[^a-zA-Z0-9]', '', token)
# Stem the word
token = PorterStemmer().stem(token.strip())
# Skip the word if it is too short
if len(token) < 1:
continue
idx = vocabList[token] if token in vocabList else 0
# only add entries which are in vocabList
# i.e. those with ind ~= 0,
# given that ind is assigned 0 if str is not found in vocabList
if idx > 0:
word_indices.append(idx)
# Print to screen, ensuring that the output lines are not too long
if l + len(token) + 1 > 78:
print("")
l = 0
print('{:s}'.format(token)),
l = l + len(token) + 1
print('\n\n=========================\n')
return word_indices
def emailFeatures(word_indices):
# x = EMAILFEATURES(word_indices) takes in a word_indices vector and
# produces a feature vector from the word indices.
# Total number of words in the dictionary
n = 1899
# You need to return the following variables correctly.
x = np.zeros((n, 1))
# iterate over idx items in word_indices
for idx in word_indices:
# assign 1 to index idx in x
x[idx] = 1
return x
| StarcoderdataPython |
3380309 | <reponame>pathtoknowhere/warden
import requests
import os
from flask import (Blueprint, flash, redirect, render_template, request,
url_for, current_app)
from flask_login import current_user, login_required, login_user
from werkzeug.security import generate_password_hash
from forms import RegistrationForm, UpdateAccountForm
from models import User, AccountInfo, Trades
from utils import update_config
user_routes = Blueprint('user_routes', __name__)
@user_routes.route("/initial_setup", methods=["GET", "POST"])
# First run setup
def initial_setup():
if current_user.is_authenticated:
return redirect(url_for("warden.warden_page"))
page = request.args.get("page")
# initial setup will cycle through different pages
if page is None or page == 'welcome' or page == '1':
# Generate a random API key for Alphavantage
import secrets
key = secrets.token_hex(15)
current_app.settings['API']['alphavantage'] = key
update_config()
return render_template("warden/welcome.html",
title="Welcome to the WARden")
if page == '2' or page == 'register':
form = RegistrationForm()
if form.validate_on_submit():
hash = generate_password_hash(form.password.data)
user = User(username=form.username.data,
password=hash)
current_app.db.session.add(user)
current_app.db.session.commit()
login_user(user, remember=True)
flash(f"Account created for {form.username.data}. User Logged in.", "success")
return redirect("/initial_setup?page=3&setup=True")
return render_template("warden/register.html",
title="Welcome to the WARden | Register",
form=form)
if page == '3' or page == 'specter_connect':
# First let's check where we can connect with Tor
tor_ports = ['9050', '9150']
session = requests.session()
# Use DuckDuckGo Onion address to test tor
url = 'https://3g2upl4pq6kufc4m.onion'
failed = True
for PORT in tor_ports:
session.proxies = {
"http": "socks5h://0.0.0.0:" + PORT,
"https": "socks5h://0.0.0.0:" + PORT,
}
try:
session.get(url)
session.close()
failed = False
except Exception:
failed = True
if not failed:
current_app.settings['TOR']['port'] = PORT
update_config()
break
if failed:
flash("Tor does not seem to be running in any ports...", "warning")
# Maybe Specter is already running?
try:
if current_app.specter.home_parser()['alias_list'] != []:
flash(f"Succesfuly connected to Specter Server at {current_app.specter.base_url}")
except Exception:
pass
return redirect(url_for("warden.warden_page"))
@user_routes.route("/account", methods=["GET", "POST"])
@login_required
def account():
form = UpdateAccountForm()
if request.method == "POST":
if form.validate_on_submit():
hash = generate_password_hash(form.password.data)
user = User.query.filter_by(username=current_user.username).first()
user.password = <PASSWORD>
current_app.db.session.commit()
flash(f"Account password updated for user {current_user.username}", "success")
return redirect(url_for("warden.warden_page"))
flash("Password Change Failed. Something went wrong. Try Again.", "danger")
return render_template("warden/account.html",
title="Account",
form=form,
current_app=current_app)
@user_routes.route("/tor_services", methods=["GET", "POST"])
@login_required
def tor_services():
action = request.args.get("action")
if action == 'start':
current_app.settings['SERVER']['onion_server'] = 'True'
update_config()
from stem.control import Controller
from urllib.parse import urlparse
current_app.tor_port = current_app.settings['SERVER'].getint('onion_port')
current_app.port = current_app.settings['SERVER'].getint('port')
from warden_modules import home_path
toraddr_file = os.path.join(home_path(), "onion.txt")
current_app.save_tor_address_to = toraddr_file
proxy_url = "socks5h://localhost:9050"
tor_control_port = ""
try:
tor_control_address = urlparse(proxy_url).netloc.split(":")[0]
if tor_control_address == "localhost":
tor_control_address = "127.0.0.1"
current_app.controller = Controller.from_port(
address=tor_control_address,
port=int(tor_control_port)
if tor_control_port
else "default",
)
except Exception:
current_app.controller = None
from tor import start_hidden_service
start_hidden_service(current_app)
flash(f"Started Tor Hidden Services at {current_app.tor_service_id}.onion", "success")
if action == 'stop':
current_app.settings['SERVER']['onion_server'] = 'False'
update_config()
from tor import stop_hidden_services
stop_hidden_services(current_app)
flash("Stopped Tor Hidden Services", "warning")
return render_template("warden/tor.html",
title="Tor Hidden Services",
current_app=current_app)
| StarcoderdataPython |
1864559 | <filename>src/cosmosdb-preview/azext_cosmosdb_preview/commands.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
from azure.cli.core.commands import CliCommandType
from azext_cosmosdb_preview._client_factory import (
cf_db_accounts,
cf_restorable_database_accounts,
cf_restorable_sql_databases,
cf_restorable_sql_containers,
cf_restorable_sql_resources,
cf_restorable_mongodb_databases,
cf_restorable_mongodb_collections,
cf_restorable_mongodb_resources,
cf_sql_resources,
cf_cassandra_cluster,
cf_cassandra_data_center
)
def load_command_table(self, _):
cosmosdb_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#DatabaseAccountsOperations.{}',
client_factory=cf_db_accounts)
cosmosdb_restorable_database_accounts_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#RestorableDatabaseAccountsOperations.{}',
client_factory=cf_restorable_database_accounts)
cosmosdb_restorable_sql_databases_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#RestorableSqlDatabasesOperations.{}',
client_factory=cf_restorable_sql_databases)
cosmosdb_restorable_sql_containers_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#RestorableSqlContainersOperations.{}',
client_factory=cf_restorable_sql_containers)
cosmosdb_restorable_sql_resources_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#RestorableSqlResourcesOperations.{}',
client_factory=cf_restorable_sql_resources)
cosmosdb_restorable_mongodb_databases_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#RestorableMongodbDatabasesOperations.{}',
client_factory=cf_restorable_mongodb_databases)
cosmosdb_restorable_mongodb_collections_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#RestorableMongodbCollectionsOperations.{}',
client_factory=cf_restorable_mongodb_collections)
cosmosdb_restorable_mongodb_resources_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#RestorableMongodbResourcesOperations.{}',
client_factory=cf_restorable_mongodb_resources)
cosmosdb_rbac_sql_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#SqlResourcesOperations.{}',
client_factory=cf_sql_resources)
cosmosdb_managed_cassandra_cluster_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#CassandraClusterOperations.{}',
client_factory=cf_cassandra_cluster)
cosmosdb_managed_cassandra_datacenter_sdk = CliCommandType(
operations_tmpl='azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.operations#CassandraDataCenterOperations.{}',
client_factory=cf_cassandra_data_center)
with self.command_group('cosmosdb restorable-database-account', cosmosdb_restorable_database_accounts_sdk, client_factory=cf_restorable_database_accounts, is_preview=True) as g:
g.show_command('show', 'get_by_location')
g.custom_command('list', 'cli_cosmosdb_restorable_database_account_list')
with self.command_group('cosmosdb', cosmosdb_sdk, client_factory=cf_db_accounts) as g:
g.show_command('show', 'get')
g.custom_command('restore', 'cli_cosmosdb_restore', is_preview=True)
g.custom_command('create', 'cli_cosmosdb_create')
g.custom_command('update', 'cli_cosmosdb_update')
g.custom_command('list', 'cli_cosmosdb_list')
with self.command_group('cosmosdb sql restorable-database', cosmosdb_restorable_sql_databases_sdk, client_factory=cf_restorable_sql_databases, is_preview=True) as g:
g.command('list', 'list')
with self.command_group('cosmosdb sql restorable-container', cosmosdb_restorable_sql_containers_sdk, client_factory=cf_restorable_sql_containers, is_preview=True) as g:
g.command('list', 'list')
with self.command_group('cosmosdb sql restorable-resource', cosmosdb_restorable_sql_resources_sdk, client_factory=cf_restorable_sql_resources, is_preview=True) as g:
g.command('list', 'list')
with self.command_group('cosmosdb mongodb restorable-database', cosmosdb_restorable_mongodb_databases_sdk, client_factory=cf_restorable_mongodb_databases, is_preview=True) as g:
g.command('list', 'list')
with self.command_group('cosmosdb mongodb restorable-collection', cosmosdb_restorable_mongodb_collections_sdk, client_factory=cf_restorable_mongodb_collections, is_preview=True) as g:
g.command('list', 'list')
with self.command_group('cosmosdb mongodb restorable-resource', cosmosdb_restorable_mongodb_resources_sdk, client_factory=cf_restorable_mongodb_resources, is_preview=True) as g:
g.command('list', 'list')
with self.command_group('cosmosdb sql role definition', cosmosdb_rbac_sql_sdk, client_factory=cf_sql_resources) as g:
g.custom_command('create', 'cli_cosmosdb_sql_role_definition_create')
g.custom_command('update', 'cli_cosmosdb_sql_role_definition_update')
g.custom_command('exists', 'cli_cosmosdb_sql_role_definition_exists')
g.command('list', 'list_sql_role_definitions')
g.show_command('show', 'get_sql_role_definition')
g.command('delete', 'delete_sql_role_definition', confirmation=True)
with self.command_group('cosmosdb sql role assignment', cosmosdb_rbac_sql_sdk, client_factory=cf_sql_resources) as g:
g.custom_command('create', 'cli_cosmosdb_sql_role_assignment_create')
g.custom_command('update', 'cli_cosmosdb_sql_role_assignment_update')
g.custom_command('exists', 'cli_cosmosdb_sql_role_assignment_exists')
g.command('list', 'list_sql_role_assignments')
g.show_command('show', 'get_sql_role_assignment')
g.command('delete', 'delete_sql_role_assignment', confirmation=True)
with self.command_group('managed-cassandra cluster', cosmosdb_managed_cassandra_cluster_sdk, client_factory=cf_cassandra_cluster, is_preview=True) as g:
g.custom_command('create', 'cli_cosmosdb_managed_cassandra_cluster_create')
g.custom_command('update', 'cli_cosmosdb_managed_cassandra_cluster_update')
g.custom_command('node-status', 'cli_cosmosdb_managed_cassandra_fetch_node_status')
g.custom_command('list', 'cli_cosmosdb_managed_cassandra_cluster_list')
g.show_command('show', 'get')
g.command('delete', 'delete', confirmation=True)
with self.command_group('managed-cassandra datacenter', cosmosdb_managed_cassandra_datacenter_sdk, client_factory=cf_cassandra_data_center, is_preview=True) as g:
g.custom_command('create', 'cli_cosmosdb_managed_cassandra_datacenter_create')
g.custom_command('update', 'cli_cosmosdb_managed_cassandra_datacenter_update')
g.command('list', 'list_data_centers_method')
g.show_command('show', 'get')
g.command('delete', 'delete', confirmation=True)
| StarcoderdataPython |
1937193 | import time
for x in range(-30, 30):
for y in range(13, -13, -1):
if ((x * 0.05) ** 2 + (y * 0.1) ** 2 - 1) ** 3 - (x * 0.05) ** 2 * (y * 0.1) ** 3 <= 0 :
print('\n'.join([''.join(['love'[(x - y) % len('love')]])]))
#else: print(' ')
| StarcoderdataPython |
4970986 | """Structure change
Revision ID: bd1b5f1bc8ff
Revises: <PASSWORD>
Create Date: 2019-08-05 09:27:15.358485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd1b5f1bc8ff'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vote', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitches_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitches_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('comments', sa.Column('opinion', sa.String(length=255), nullable=True))
op.add_column('comments', sa.Column('pitches_id', sa.Integer(), nullable=True))
op.add_column('comments', sa.Column('time_posted', sa.DateTime(), nullable=True))
op.create_foreign_key(None, 'comments', 'pitches', ['pitches_id'], ['id'])
op.drop_column('comments', 'comment')
op.drop_column('comments', 'title')
op.add_column('pitches', sa.Column('category_id', sa.Integer(), nullable=True))
op.add_column('pitches', sa.Column('content', sa.String(), nullable=True))
op.create_foreign_key(None, 'pitches', 'categories', ['category_id'], ['id'])
op.drop_column('pitches', 'upvotes')
op.drop_column('pitches', 'body')
op.drop_column('pitches', 'author')
op.drop_column('pitches', 'downvotes')
op.drop_column('pitches', 'category')
op.drop_column('pitches', 'title')
op.add_column('users', sa.Column('bio', sa.String(length=255), nullable=True))
op.add_column('users', sa.Column('pass_secure', sa.String(length=255), nullable=True))
op.add_column('users', sa.Column('profile_pic_path', sa.String(), nullable=True))
op.drop_index('ix_users_username', table_name='users')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_users_username', 'users', ['username'], unique=False)
op.drop_column('users', 'profile_pic_path')
op.drop_column('users', 'pass_secure')
op.drop_column('users', 'bio')
op.add_column('pitches', sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('category', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('downvotes', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('author', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('body', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('upvotes', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'pitches', type_='foreignkey')
op.drop_column('pitches', 'content')
op.drop_column('pitches', 'category_id')
op.add_column('comments', sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('comments', sa.Column('comment', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'comments', type_='foreignkey')
op.drop_column('comments', 'time_posted')
op.drop_column('comments', 'pitches_id')
op.drop_column('comments', 'opinion')
op.drop_table('votes')
op.drop_table('categories')
# ### end Alembic commands ###
| StarcoderdataPython |
1856080 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.cloud import bigquery
from load_benchmark_tools import benchmark_load_table
from generic_benchmark_tools import bucket_util
class LoadTablesProcessor(object):
"""Contains methods for processing and creating load tables for benchmarks.
Attributes:
benchmark_name(str): The name of the benchmark test.
bq_project(str): ID of the project that holds the BigQuery dataset
and benchmark tables.
gcs_project(str): ID of the project that holds the GCS bucket
where the files to be loaded are stored.
staging_project(str_: ID of the project that contains the
staging tables that the files to be loaded into the benchmark table
were generated from.
staging_dataset_id(str): ID of the dataset that contains the
staging tables that the files to be loaded into the benchmark table
were generated from.
dataset_id(str): ID of the dataset that the benchmark tables should
be loaded into.
bucket_name(str): Name of the GCS bucket that holds the files that
should be loaded into the benchmark table.
bucket_util(load_benchmark_tools.bucket_util.BucketUtil): Helper class for
interacting with the bucket that the holds the files that
should be loaded into the benchmark table.
results_table_name(str): Name of the BigQuery table that the
benchmark table's load results will be inserted into.
results_table_dataset_id(str): Name of the BigQuery dataset that the
benchmark table's load results will be inserted into.
duplicate_benchmark_tables(bool): Boolean value to determine what to
do if a benchmark table already exists for a given file
combination. If True, TableProcessor knows to create another
benchmark table with the same combination to increase the
number of results for accuracy. If not, TablesProcessor knows
to only create a benchmark table for a given combination if one
has not yet been created.
file_params(dict): Dictionary containing each file parameter and
its possible values.
bq_logs_dataset(str): Name of dataset hold BQ logs table.
"""
def __init__(
self,
benchmark_name,
bq_project,
gcs_project,
staging_project,
staging_dataset_id,
dataset_id,
bucket_name,
results_table_name,
results_table_dataset_id,
duplicate_benchmark_tables,
file_params,
bq_logs_dataset,
):
self.benchmark_name = benchmark_name
self.bq_project = bq_project
self.gcs_project = gcs_project
self.staging_project = staging_project
self.staging_dataset_id = staging_dataset_id
self.dataset_id = dataset_id
self.bucket_name = bucket_name
self.bucket_util = bucket_util.BucketUtil(
bucket_name=self.bucket_name,
project_id=self.gcs_project,
file_params=file_params,
)
self.results_table_name = results_table_name
self.results_table_dataset_id = results_table_dataset_id
self.duplicate_benchmark_tables = duplicate_benchmark_tables
self.bq_logs_dataset = bq_logs_dataset
def gather_files_with_benchmark_tables(self):
"""Generates file combinations that already have benchmark tables.
Creates a set of files that already have been loaded to create
benchmark tables. Generates list by querying the job.sourceURI field
from the results table.
Returns:
Set of file names that already have been loaded to create
benchmark tables.
"""
query = (
'SELECT loadProperties.sourceURI FROM `{0:s}.{1:s}.{2:s}` '.format(
self.bq_project,
self.results_table_dataset_id,
self.results_table_name,
)
)
query_job = bigquery.Client().query(
query,
location='US',
)
files_with_benchmark_tables = set()
for row in query_job:
if row['sourceURI'] and self.bucket_name in row['sourceURI']:
uri = row['sourceURI'].split('gs://{0:s}/'.format(
self.bucket_name
))[1]
file_name = uri.split('/*')[0]
files_with_benchmark_tables.add(file_name)
return files_with_benchmark_tables
def create_benchmark_tables(self):
"""Creates a benchmark table for each file combination in GCS bucket.
"""
# Gather files combinations that already have benchmark tables.
files_with_benchmark_tables = self.gather_files_with_benchmark_tables()
if self.duplicate_benchmark_tables:
files_to_skip = set()
else:
files_to_skip = files_with_benchmark_tables
# Gather file combinations that exist in the GCS Bucket.
existing_paths = self.bucket_util.get_existing_paths()
# Create a benchmark table for each existing file combination, and
# load the data from the file into the benchmark table.
for path in existing_paths:
path = path.split('/')
path = '/'.join(path[:len(path) - 1])
if path not in files_to_skip:
if path in files_with_benchmark_tables:
verb = 'Duplicating'
else:
verb = 'Processing'
logging.info('{0:s} benchmark table for {1:s}'.format(
verb,
path,
))
table = benchmark_load_table.BenchmarkLoadTable(
benchmark_name=self.benchmark_name,
bq_project=self.bq_project,
gcs_project=self.gcs_project,
staging_project=self.staging_project,
staging_dataset_id=self.staging_dataset_id,
dataset_id=self.dataset_id,
bucket_name=self.bucket_name,
path=path,
results_table_name=self.results_table_name,
results_table_dataset_id=self.results_table_dataset_id,
bq_logs_dataset=self.bq_logs_dataset,
)
table.create_table()
table.load_from_gcs()
| StarcoderdataPython |
384649 | from flask import Flask
app = Flask(__name__)
app.config.from_object('wsgi.settings')
from wsgi import route
| StarcoderdataPython |
8132924 | # -*- coding: utf-8 -*-
# author: itimor
from django.db.models import Q
from dry_rest_permissions.generics import DRYPermissionFiltersBase
from users.models import User
from django.shortcuts import get_object_or_404
class ReportFilterBackend(DRYPermissionFiltersBase):
def filter_list_queryset(self, request, queryset, view):
userinfo = get_object_or_404(User, username=request.user)
role = userinfo.roles.name
group = userinfo.group
if request.user == 'admin' or role == 'admin':
return queryset.all()
elif role == 'groupadmin':
return queryset.filter(Q(group=group))
else:
return queryset.filter(Q(owner=request.user))
| StarcoderdataPython |
9727215 | import numpy as np
class DeltaDist:
def __init__(self, vals):
self.val = np.max(vals)
self.b = self.val
self.a = self.val
def cdf(self, samples):
if isinstance(samples, (list, np.ndarray)):
return [1.0 if self.val <= k else 0.0 for k in samples]
else:
return 1.0 if self.val <= samples else 0.0
def ppf(self, prob):
return self.val | StarcoderdataPython |
6680045 | from tkinter import *
pencere = Tk()
def fonksiyon():
print("Test")
etiket = Label(pencere, text="<NAME>")
etiket.pack()
buton = Button(pencere, text="Butona Tıkla!", command=fonksiyon)
buton.pack()
pencere.mainloop() | StarcoderdataPython |
6613744 | from pydantic import BaseModel
from tracardi.domain.entity import Entity
class PushOverAuth(BaseModel):
token: str
user: str
class PushOverConfiguration(BaseModel):
source: Entity
message: str
| StarcoderdataPython |
284218 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from ftplib import FTP
import os
import sys
import time
import socket
class MyFTP:
def __init__(self, host, port=21):
""" 初始化 FTP 客户端
参数:
host:ip地址
port:端口号
"""
# print("__init__()---> host = %s ,port = %s" % (host, port))
self.host = host
self.port = port
self.ftp = FTP()
# 重新设置下编码方式
self.ftp.encoding = 'gbk'
self.log_file = open("log.txt", "a")
self.file_list = []
def login(self, username, password):
""" 初始化 FTP 客户端
参数:
username: 用户名
password: 密码
"""
try:
timeout = 60
socket.setdefaulttimeout(timeout)
# 0主动模式 1 #被动模式
self.ftp.set_pasv(False)
# 打开调试级别2,显示详细信息
# self.ftp.set_debuglevel(2)
self.debug_print('开始尝试连接到 %s' % self.host)
self.ftp.connect(self.host, self.port)
self.debug_print('成功连接到 %s' % self.host)
self.debug_print('开始尝试登录到 %s' % self.host)
self.ftp.login(username, password)
self.debug_print('成功登录到 %s' % self.host)
self.debug_print(self.ftp.welcome)
except Exception as err:
self.deal_error("FTP 连接或登录失败 ,错误描述为:%s" % err)
pass
def is_same_size(self, local_file, remote_file):
"""判断远程文件和本地文件大小是否一致
参数:
local_file: 本地文件
remote_file: 远程文件
"""
try:
remote_file_size = self.ftp.size(remote_file)
except Exception as err:
# self.debug_print("is_same_size() 错误描述为:%s" % err)
remote_file_size = -1
try:
local_file_size = os.path.getsize(local_file)
except Exception as err:
# self.debug_print("is_same_size() 错误描述为:%s" % err)
local_file_size = -1
self.debug_print('local_file_size:%d , remote_file_size:%d' % (local_file_size, remote_file_size))
if remote_file_size == local_file_size:
return 1
else:
return 0
def download_file(self, local_file, remote_file):
"""从ftp下载文件
参数:
local_file: 本地文件
remote_file: 远程文件
"""
self.debug_print("download_file()---> local_path = %s ,remote_path = %s" % (local_file, remote_file))
if self.is_same_size(local_file, remote_file):
self.debug_print('%s 文件大小相同,无需下载' % local_file)
return
else:
try:
self.debug_print('>>>>>>>>>>>>下载文件 %s ... ...' % local_file)
buf_size = 1024
file_handler = open(local_file, 'wb')
self.ftp.retrbinary('RETR %s' % remote_file, file_handler.write, buf_size)
file_handler.close()
except Exception as err:
self.debug_print('下载文件出错,出现异常:%s ' % err)
return
def download_file_tree(self, local_path, remote_path):
"""从远程目录下载多个文件到本地目录
参数:
local_path: 本地路径
remote_path: 远程路径
"""
print("download_file_tree()---> local_path = %s ,remote_path = %s" % (local_path, remote_path))
try:
self.ftp.cwd(remote_path)
except Exception as err:
self.debug_print('远程目录%s不存在,继续...' % remote_path + " ,具体错误描述为:%s" % err)
return
if not os.path.isdir(local_path):
self.debug_print('本地目录%s不存在,先创建本地目录' % local_path)
os.makedirs(local_path)
self.debug_print('切换至目录: %s' % self.ftp.pwd())
self.file_list = []
# 方法回调
self.ftp.dir(self.get_file_list)
remote_names = self.file_list
self.debug_print('远程目录 列表: %s' % remote_names)
for item in remote_names:
file_type = item[0]
file_name = item[1]
local = os.path.join(local_path, file_name)
if file_type == 'd':
print("download_file_tree()---> 下载目录: %s" % file_name)
self.download_file_tree(local, file_name)
elif file_type == '-':
print("download_file()---> 下载文件: %s" % file_name)
self.download_file(local, file_name)
self.ftp.cwd("..")
self.debug_print('返回上层目录 %s' % self.ftp.pwd())
return True
def upload_file(self, local_file, remote_file):
"""从本地上传文件到ftp
参数:
local_path: 本地文件
remote_path: 远程文件
"""
if not os.path.isfile(local_file):
self.debug_print('%s 不存在' % local_file)
return
if self.is_same_size(local_file, remote_file):
self.debug_print('跳过相等的文件: %s' % local_file)
return
buf_size = 1024
file_handler = open(local_file, 'rb')
self.ftp.storbinary('STOR %s' % remote_file, file_handler, buf_size)
file_handler.close()
self.debug_print('上传: %s' % local_file + "成功!")
def upload_file_tree(self, local_path, remote_path):
"""从本地上传目录下多个文件到ftp
参数:
local_path: 本地路径
remote_path: 远程路径
"""
if not os.path.isdir(local_path):
self.debug_print('本地目录 %s 不存在' % local_path)
return
"""
创建服务器目录
"""
try:
self.ftp.cwd(remote_path) # 切换工作路径
except Exception as e:
base_dir, part_path = self.ftp.pwd(), remote_path.split('/')
for p in part_path[1:-1]:
base_dir = base_dir + p + '/' # 拼接子目录
try:
self.ftp.cwd(base_dir) # 切换到子目录, 不存在则异常
except Exception as e:
print('INFO:', e)
self.ftp.mkd(base_dir) # 不存在创建当前子目录
#self.ftp.cwd(remote_path)
self.debug_print('切换至远程目录: %s' % self.ftp.pwd())
local_name_list = os.listdir(local_path)
self.debug_print('本地目录list: %s' % local_name_list)
#self.debug_print('判断是否有服务器目录: %s' % os.path.isdir())
for local_name in local_name_list:
src = os.path.join(local_path, local_name)
print("src路径=========="+src)
if os.path.isdir(src):
try:
self.ftp.mkd(local_name)
except Exception as err:
self.debug_print("目录已存在 %s ,具体错误描述为:%s" % (local_name, err))
self.debug_print("upload_file_tree()---> 上传目录: %s" % local_name)
self.debug_print("upload_file_tree()---> 上传src目录: %s" % src)
self.upload_file_tree(src, local_name)
else:
self.debug_print("upload_file_tree()---> 上传文件: %s" % local_name)
self.upload_file(src, local_name)
self.ftp.cwd("..")
def close(self):
""" 退出ftp
"""
self.debug_print("close()---> FTP退出")
self.ftp.quit()
self.log_file.close()
def debug_print(self, s):
""" 打印日志
"""
self.write_log(s)
def deal_error(self, e):
""" 处理错误异常
参数:
e:异常
"""
log_str = '发生错误: %s' % e
self.write_log(log_str)
sys.exit()
def write_log(self, log_str):
""" 记录日志
参数:
log_str:日志
"""
time_now = time.localtime()
date_now = time.strftime('%Y-%m-%d', time_now)
format_log_str = "%s ---> %s \n " % (date_now, log_str)
print(format_log_str)
self.log_file.write(format_log_str)
def get_file_list(self, line):
""" 获取文件列表
参数:
line:
"""
file_arr = self.get_file_name(line)
# 去除 . 和 ..
if file_arr[1] not in ['.', '..']:
self.file_list.append(file_arr)
def get_file_name(self, line):
""" 获取文件名
参数:
line:
"""
pos = line.rfind(':')
while (line[pos] != ' '):
pos += 1
while (line[pos] == ' '):
pos += 1
file_arr = [line[0], line[pos:]]
return file_arr
if __name__ == "__main__":
my_ftp = MyFTP("192.168.10.3")
#my_ftp.set_pasv(False)
my_ftp.login("ftper", "123456")
# 下载单个文件
#my_ftp.download_file("/home/123.mp4", "/123.mp4") #FTP服务器目录 本地目录
# 下载目录
# my_ftp.download_file_tree("/tmp/111/", "tmp/111/")
# 上传单个文件
# my_ftp.upload_file("/tmp/111/demo.apk", "/tmp/111/demo.apk")
# 上传目录
my_ftp.upload_file_tree("/tmp/java8", "/123/5/")
my_ftp.close()
| StarcoderdataPython |
1858684 | # Copyright (c) 2005-2013 Simplistix Ltd
#
# This Software is released under the MIT License:
# http://www.opensource.org/licenses/mit-license.html
# See license.txt for more details.
from AccessControl import ModuleSecurityInfo
from App.FactoryDispatcher import FactoryDispatcher
from bdb import Bdb
from cmd import Cmd
from pdb import Pdb
import monkeypatch
import sys
class Zdb(Pdb):
def __init__(self):
Bdb.__init__(self)
Cmd.__init__(self)
self.rcLines = []
self.prompt = '(zdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
self.commands_defining = None
def canonic(self, filename):
if monkeypatch.ps_fncache.has_key(filename):
return filename
return Pdb.canonic(self,filename)
# Python 2.4's bdb set_trace method
# This can go away when Python 2.3 is no longer supported
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
# make us "safe"
ModuleSecurityInfo('Products.zdb').declarePublic('set_trace')
def set_trace():
Zdb().set_trace(sys._getframe().f_back)
# recompilation utlitity
def initialize(context):
# This horrificness is required because Zope doesn't understand the concept
# of a Product that doesn't register any classes :-(
pack=context._ProductContext__pack
fd=getattr(pack, '__FactoryDispatcher__', None)
if fd is None:
class __FactoryDispatcher__(FactoryDispatcher):
"Factory Dispatcher for a Specific Product"
fd = pack.__FactoryDispatcher__ = __FactoryDispatcher__
if not hasattr(pack, '_m'): pack._m=fd.__dict__
setattr(fd,'debug_compile',debug_compile)
setattr(fd,'debug_compile__roles__',('Manager',))
# utility stuff
def debug_compile(self):
'''Recompile all Python Scripts'''
base = self.this()
scripts = base.ZopeFind(base, obj_metatypes=('Script (Python)',),
search_sub=1)
names = []
for name, ob in scripts:
names.append(name)
ob._compile()
ob._p_changed = 1
if names:
return 'The following Scripts were recompiled:\n' + '\n'.join(names)
return 'No Scripts were found.'
| StarcoderdataPython |
6584484 | <gh_stars>0
from hashlib import sha1
import constants
from message import Message, MessageType
"""Handles pieces, which divisions of the file being passed by the torrent."""
class PieceError(Exception):
pass
def piece_factory(total_length, piece_length, hashes):
"""Creates the piece divisions for a given length and returns
a generator object that will yield the pieces until they are all
done."""
num_pieces = (total_length // piece_length) + 1
for i in range(num_pieces - 1):
yield Piece(i, piece_length, hashes[i])
yield Piece(num_pieces - 1, total_length % piece_length, hashes[num_pieces - 1])
class Piece:
def __init__(self, piece_index, length, piece_hash):
self.index = piece_index
self.length = length
self.hash = piece_hash
self._downloaded_bytes = b''
self.completed = False
@property
def bytes_downloaded(self):
return len(self._downloaded_bytes)
def download(self, offset, bytestring):
"""Pass this piece bytes which represent parts of it."""
if offset != self.bytes_downloaded:
raise PieceError("Offset Not Matching | {} {}".format(offset, self.bytes_downloaded))
if len(bytestring) + self.bytes_downloaded > self.length:
raise PieceError('Too Many Bytes for Piece | bytes_len: {} downloaded: {} limit: {}', len(bytestring),
self.bytes_downloaded, self.length)
self._downloaded_bytes += bytestring
if self.bytes_downloaded == self.length:
self._complete()
def get_next_request_message(self):
"""Get the request that will cover the next set of bytes that
the piece requires
Format of a request payload:
<4-byte piece index><4-byte block offset><4-byte length>
"""
if self.completed:
raise PieceError("Piece Is Already Completed")
request_length = min(constants.REQUEST_LENGTH, self.length - self.bytes_downloaded)
index_bytes = int.to_bytes(self.index, length=4, byteorder='big')
offset_bytes = int.to_bytes(self.bytes_downloaded, length=4, byteorder='big')
length_bytes = int.to_bytes(request_length, length=4, byteorder='big')
request_payload = index_bytes + offset_bytes + length_bytes
return Message(MessageType.REQUEST, request_payload)
def writeout(self, file):
file.write(self._downloaded_bytes)
def _complete(self):
if not self._is_hash_valid():
raise PieceError("Piece Has a Bad Hash")
# TODO: When the client receives this error, it should recreate a piece
# and request it from a different peer.
# TODO: Consider writing the bytes to a temp_file, which can be pulled back
# out via writeout. Then deleted.
self.completed = True
def _is_hash_valid(self):
"""Checks hash value for downloaded bytes vs the expected"""
downloaded_hash = sha1(self._downloaded_bytes).digest()
return downloaded_hash == self.hash
def __repr__(self):
return 'Piece With Index {}'.format(self.index)
def __lt__(self, other):
return self.index < other.index
| StarcoderdataPython |
12850031 | <filename>My Tools/Number Reverse/numberReverse.py
num = int(input("Enter a number: "))
temp = num
reverse = 0
while(temp):
reverse = (reverse * 10) + (temp % 10)
temp = int(temp / 10)
print("Reversed: " + str(reverse)) | StarcoderdataPython |
4983200 | <gh_stars>1-10
"""The tests for the climate component."""
import asyncio
import pytest
import voluptuous as vol
from homeassistant.components.climate import SET_TEMPERATURE_SCHEMA
from tests.common import async_mock_service
@asyncio.coroutine
def test_set_temp_schema_no_req(hass, caplog):
"""Test the set temperature schema with missing required data."""
domain = 'climate'
service = 'test_set_temperature'
schema = SET_TEMPERATURE_SCHEMA
calls = async_mock_service(hass, domain, service, schema)
data = {'operation_mode': 'test', 'entity_id': ['climate.test_id']}
with pytest.raises(vol.Invalid):
yield from hass.services.async_call(domain, service, data)
yield from hass.async_block_till_done()
assert len(calls) == 0
@asyncio.coroutine
def test_set_temp_schema(hass, caplog):
"""Test the set temperature schema with ok required data."""
domain = 'climate'
service = 'test_set_temperature'
schema = SET_TEMPERATURE_SCHEMA
calls = async_mock_service(hass, domain, service, schema)
data = {
'temperature': 20.0, 'operation_mode': 'test',
'entity_id': ['climate.test_id']}
yield from hass.services.async_call(domain, service, data)
yield from hass.async_block_till_done()
assert len(calls) == 1
assert calls[-1].data == data
| StarcoderdataPython |
6533713 | <reponame>yaseralnajjar/hackcyprus-hitup
import datetime
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from rest_framework import viewsets, generics, status
from rest_framework.response import Response
from . import models
from . import serializers
from rest_framework.permissions import BasePermission, IsAuthenticated
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class ResendConfirmView(generics.GenericAPIView):
serializer_class = serializers.ResendConfirmSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({'detail': "Email confirmation sent"})
class ReviewViewSet(viewsets.ModelViewSet):
queryset = models.Review.objects.all()
serializer_class = serializers.ReviewSerializer
class ProfileViewSet(viewsets.ModelViewSet):
class HisOwnProfile(BasePermission):
def has_object_permission(self, request, view, obj):
return obj.is_owner(request.user)
permission_classes = (IsAuthenticated, HisOwnProfile)
queryset = models.Profile.objects.all()
serializer_class = serializers.ProfileSerializer
def update(self, request, pk):
profile = self.get_queryset().get(pk=pk)
serializer = serializers.ProfileSerializer(reservation, data=request.data, partial=True)
serializer.is_valid()
serializer.save()
return Response(serializer.data)
class HitupViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = models.Hitup.objects.all()
def get_serializer_class(self):
if self.action == 'list':
return serializers.HitupSerializer
elif self.action == 'create':
return serializers.NewHitupSerializer
def get_queryset(self):
#return models.Hitup.objects.all()
return models.Hitup.objects.filter(hangee__user_id=self.request.user,
expiration__gt=datetime.datetime.now()).all()
def create(self, request, *args, **kwargs):
serializer = serializers.NewHitupSerializer(data=request.data, context={'request': request})
serializer.is_valid()
result = serializer.save()
response = Response(status=status.HTTP_201_CREATED)
return response
| StarcoderdataPython |
3475478 | from django.shortcuts import render,redirect
from django.http import HttpResponse, Http404,HttpResponseRedirect
import datetime as dt
from .models import Image, PhotosLetterRecipients
from .forms import PhotosLetterForm,NewImageForm,ProfileUploadForm
from .email import send_welcome_email
from django.contrib.auth.decorators import login_required
# Create your views here.
# def welcome(request):
# return render(request, 'all-photos/today-photos.html', {"date": date,})
@login_required(login_url='/accounts/login/')
def photos_today(request):
date = dt.date.today()
all_images = Image.all_images()
images= Image.objects.all()
print(images)
# image = Image.today-photos()
if request.method == 'POST':
form = PhotosLetterForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
email = form.cleaned_data['email']
recipient = PhotosLetterRecipients(name = name,email =email)
recipient.save()
send_welcome_email(name,email)
HttpResponseRedirect('photos_today')
else:
form = PhotosLetterForm()
form = NewImageForm()
return render(request, 'all-photos/today-photos.html', {"date": date,"letterForm":form, "ImageForm":form, "images":all_images},{'images':images})
def past_days_photos(request, past_date):
try:
# Converts data from the string Url
date = dt.datetime.strptime(past_date, '%Y-%m-%d').date()
except ValueError:
# Raise 404 error when ValueError is thrown
raise Http404()
assert False
if date == dt.date.today():
return redirect(photos_today)
photos = Image.days_photos(date)
return render(request, 'all-photos/past-photos.html',{"date": date,"photos":photos})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_name(search_term)
message = f"{search_term}"
return render(request, 'all-photos/search.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'all-photos/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def image(request,image_id):
try:
image = Image.objects.get(id = image_id)
except DoesNotExist:
raise Http404()
return render(request,"all-photos/image.html", {"image":image})
@login_required(login_url='/accounts/login/')
def new_image(request):
current_user = request.user
title = 'New image'
if request.method == 'POST':
form = NewImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.user = current_user
image.save()
return redirect('photosToday')
else:
form = NewImageForm()
return render(request, 'new_image.html', {"form": form,"current_user":current_user,"title":title})
@login_required(login_url='/accounts/login/')
def upload_profile(request):
current_user = request.user
title = 'Upload Profile'
try:
requested_profile = Profile.objects.get(user_id = current_user.id)
if request.method == 'POST':
form = ProfileUploadForm(request.POST,request.FILES)
if form.is_valid():
requested_profile.profile_pic = form.cleaned_data['image']
requested_profile.bio = form.cleaned_data['bio']
requested_profile.username = form.cleaned_data['username']
requested_profile.save_profile()
return redirect( profile )
else:
form = ProfileUploadForm()
except:
if request.method == 'POST':
form = ProfileUploadForm(request.POST,request.FILES)
if form.is_valid():
new_profile = Profile(image = form.cleaned_data['image'],bio = form.cleaned_data['bio'],username = form.cleaned_data['username'])
new_profile.save_profile()
return redirect( profile )
else:
form = ProfileUploadForm()
return render(request,'upload_profile.html',{"title":title,"current_user":current_user,"form":form})
| StarcoderdataPython |
12848279 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from bson.objectid import ObjectId
from config import setting
import helper
db = setting.db_web
url = ('/online/batch_job')
# - 批量处理订单
class handler:
def GET(self):
if helper.logged(helper.PRIV_USER,'BATCH_JOB'):
render = helper.create_render()
#user_data=web.input(start_date='', shop='__ALL__')
# 查找shop
db_shop = helper.get_shop_by_uid()
shop_name = helper.get_shop(db_shop['shop'])
# 统计线上订单
condition = {
'shop' : db_shop['shop'],
'status' : {'$in' : ['PAID','DISPATCH','ONROAD']},
'type' : {'$in' : ['TUAN', 'SINGLE']}, # 只拼团用
}
db_sale2 = db.order_app.find(condition, {
'order_id' : 1,
'paid_time' : 1,
'cart' : 1,
'type' : 1,
'status' : 1,
'address' : 1,
})
skus={}
for i in db_sale2:
# 区分省份
sheng = i['address'][8].split(',')[0] if len(i['address'])>=9 else u'未知'
if skus.has_key(i['cart'][0]['tuan_id']):
if skus[i['cart'][0]['tuan_id']].has_key(sheng):
skus[i['cart'][0]['tuan_id']][sheng]['num'] += 1
skus[i['cart'][0]['tuan_id']][sheng]['paid'] += (1 if i['status']=='PAID' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] += (1 if i['status']=='DISPATCH' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['onroad'] += (1 if i['status']=='ONROAD' else 0)
else:
skus[i['cart'][0]['tuan_id']][sheng] = {}
skus[i['cart'][0]['tuan_id']][sheng]['num'] = 1
skus[i['cart'][0]['tuan_id']][sheng]['paid'] = (1 if i['status']=='PAID' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] = (1 if i['status']=='DISPATCH' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['onroad'] = (1 if i['status']=='ONROAD' else 0)
else:
r = db.pt_store.find_one({'tuan_id':i['cart'][0]['tuan_id']},{'title':1})
if r:
title = r['title']
else:
title = 'n/a'
skus[i['cart'][0]['tuan_id']] = {
'name' : title,
'tuan_id' : i['cart'][0]['tuan_id'],
}
skus[i['cart'][0]['tuan_id']][sheng]={
'num' : 1, # 要包含送的
'paid' : 1 if i['status']=='PAID' else 0, # 已付款,待拣货的, 拼团用
'dispatch' : 1 if i['status']=='DISPATCH' else 0, # 已付款,待配送, 拼团用
'onroad' : 1 if i['status']=='ONROAD' else 0, # 已付款,配送中, 拼团用
}
total_sum={}
for i in skus.keys():
for j in skus[i].keys():
if j in ['name','tuan_id']:
continue
if total_sum.has_key(j):
total_sum[j]['paid'] += skus[i][j]['paid']
total_sum[j]['dispatch'] += skus[i][j]['dispatch']
total_sum[j]['onroad'] += skus[i][j]['onroad']
else:
total_sum[j] = {}
total_sum[j]['paid'] = skus[i][j]['paid']
total_sum[j]['dispatch'] = skus[i][j]['dispatch']
total_sum[j]['onroad'] = skus[i][j]['onroad']
return render.batch_job(helper.get_session_uname(), helper.get_privilege_name(),
skus, shop_name['name'], total_sum)
else:
raise web.seeother('/')
| StarcoderdataPython |
353022 | from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rdynload import dlopen, dlsym, DLOpenError
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.error import raise_import_error
from pypy.interpreter.error import OperationError, oefmt
from pypy.module._hpy_universal import llapi, handles
from pypy.module._hpy_universal.state import State
from pypy.module._hpy_universal.apiset import API
from pypy.module._hpy_universal.llapi import BASE_DIR
# these imports have side effects, as they call @API.func()
from pypy.module._hpy_universal import (
interp_err,
interp_long,
interp_module,
interp_number,
interp_unicode,
interp_float,
interp_bytes,
interp_dict,
interp_list,
interp_tuple,
interp_builder,
interp_object,
interp_cpy_compat,
interp_type,
interp_tracker,
)
def load_version():
# eval the content of _vendored/hpy/devel/version.py without importing it
version_py = BASE_DIR.join('version.py').read()
d = {}
exec(version_py, d)
return d['__version__'], d['__git_revision__']
HPY_VERSION, HPY_GIT_REV = load_version()
def create_hpy_module(space, name, origin, lib, initfunc_ptr):
state = space.fromcache(State)
initfunc_ptr = rffi.cast(llapi.HPyInitFunc, initfunc_ptr)
h_module = initfunc_ptr(state.ctx)
return handles.consume(space, h_module)
def descr_load_from_spec(space, w_spec):
name = space.text_w(space.getattr(w_spec, space.newtext("name")))
origin = space.fsencode_w(space.getattr(w_spec, space.newtext("origin")))
return descr_load(space, name, origin)
@unwrap_spec(name='text', libpath='fsencode')
def descr_load(space, name, libpath):
state = space.fromcache(State)
state.setup()
try:
with rffi.scoped_str2charp(libpath) as ll_libname:
lib = dlopen(ll_libname, space.sys.dlopenflags)
except DLOpenError as e:
w_path = space.newfilename(libpath)
raise raise_import_error(space,
space.newfilename(e.msg), space.newtext(name), w_path)
basename = name.split('.')[-1]
init_name = 'HPyInit_' + basename
try:
initptr = dlsym(lib, init_name)
except KeyError:
msg = b"function %s not found in library %s" % (
init_name, space.utf8_w(space.newfilename(libpath)))
w_path = space.newfilename(libpath)
raise raise_import_error(
space, space.newtext(msg), space.newtext(name), w_path)
return create_hpy_module(space, name, libpath, lib, initptr)
def descr_get_version(space):
w_ver = space.newtext(HPY_VERSION)
w_git_rev = space.newtext(HPY_GIT_REV)
return space.newtuple([w_ver, w_git_rev])
@API.func("HPy HPy_Dup(HPyContext ctx, HPy h)")
def HPy_Dup(space, ctx, h):
return handles.dup(space, h)
@API.func("void HPy_Close(HPyContext ctx, HPy h)")
def HPy_Close(space, ctx, h):
handles.close(space, h)
| StarcoderdataPython |
3243265 | # Generated by Django 2.1.7 on 2019-03-12 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizational_area', '0009_auto_20190305_1843'),
]
operations = [
migrations.AddField(
model_name='organizationalstructureoffice',
name='is_default',
field=models.BooleanField(default=False),
),
migrations.AlterUniqueTogether(
name='organizationalstructureoffice',
unique_together={('organizational_structure', 'is_active'), ('name', 'organizational_structure')},
),
]
| StarcoderdataPython |
4219 | <reponame>klemenkotar/dcrl<filename>projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py<gh_stars>10-100
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.tutorials.object_nav_ithor_ppo_one_object import (
ObjectNavThorPPOExperimentConfig,
)
class ObjectNavThorDaggerThenPPOExperimentConfig(ObjectNavThorPPOExperimentConfig):
"""A simple object navigation experiment in THOR.
Training with DAgger and then PPO.
"""
@classmethod
def tag(cls):
return "ObjectNavThorDaggerThenPPO"
@classmethod
def training_pipeline(cls, **kwargs):
dagger_steos = int(1e4)
ppo_steps = int(1e6)
lr = 2.5e-4
num_mini_batch = 2 if not torch.cuda.is_available() else 6
update_repeats = 4
num_steps = 128
metric_accumulate_interval = cls.MAX_STEPS * 10 # Log every 10 max length tasks
save_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 1.0
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig),
"imitation_loss": Imitation(), # We add an imitation loss.
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=dagger_steos,
),
max_stage_steps=dagger_steos,
),
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,),
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
| StarcoderdataPython |
3347400 | from __future__ import division
import numpy as np
import pycuda.driver as drv
from pycuda.compiler import SourceModule
import pycuda.autoinit
kernel_code_div_eigenenergy_cuda = """
#include<stdio.h>
#include<stdlib.h>
__global__ void calc_XXVV_gpu(float *nm2v_re, float *nm2v_im, int nm2v_dim1, int nm2v_dim2,
float *ksn2e, float *ksn2f, int nfermi, int vstart, int ksn2e_dim, double omega_re,
double omega_im)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //nocc
int j = blockIdx.y * blockDim.y + threadIdx.y; //nvirt
int m, index;
float en, em, fn, fm;
double alpha, beta, a, b;
if (i < nfermi)
{
en = ksn2e[i];
fn = ksn2f[i];
if ( (j < ksn2e_dim - i -1))
{
m = j + i + 1 - vstart;
if (m > 0)
{
em = ksn2e[i+1+j];
fm = ksn2f[i+1+j];
a = (omega_re - (em-en))*(omega_re - (em-en)) + omega_im*omega_im;
b = (omega_re + (em-en))*(omega_re + (em-en)) + omega_im*omega_im;
alpha = (b*(omega_re - (em-en)) - a*(omega_re + (em-en)))/(a*b);
beta = omega_im*(a-b)/(a*b);
index = i*nm2v_dim2 + m;
nm2v_re[index] = (fn - fm) * (nm2v_re[index]*alpha - nm2v_im[index]*beta);
nm2v_im[index] = (fn - fm) * (nm2v_re[index]*beta + nm2v_im[index]*alpha);
}
}
}
}
"""
def div_eigenenergy_cuda(ksn2e, ksn2f, nfermi, vstart, comega, nm2v_re, nm2v_im,
block_size, grid_size):
block = (int(block_size[0]), int(block_size[1]), int(1))
grid = (int(grid_size[0]), int(grid_size[1]))
mod = SourceModule(kernel_code_div_eigenenergy_cuda)
calc_XXVV = mod.get_function("calc_XXVV_gpu")
calc_XXVV(nm2v_re, nm2v_im, np.int32(nm2v_re.shape[0]),
np.int32(nm2v_re.shape[1]), ksn2e, ksn2f, np.int32(nfermi),
np.int32(vstart), np.int32(ksn2e.shape[0]), np.float64(comega.real),
np.float64(comega.imag), block = block, grid = grid)
| StarcoderdataPython |
5086380 | <filename>scrapers/scrape_matrix.py
#!/usr/bin/env python3
import sys
# This file contains expectations of what data is provided by each scraper.
# It is used by the parser to verify no expected field is missing,
# which would indicate broken parser, or change to a website.
#
# It is to track and detect regressions.
# A per-canton list of extra fields that are expected to be present.
matrix = {
# Note: Please keep the order of cantons and entries.
'AG': ['Confirmed cases', 'Deaths', 'Released', 'Hospitalized', 'ICU', 'Vent'],
'AI': ['Confirmed cases', 'Deaths', 'Isolated', 'Quarantined'],
'AR': ['Confirmed cases', 'Deaths', 'Hospitalized', 'ICU'],
'BE': ['Confirmed cases', 'Deaths', 'Hospitalized', 'ICU', 'Vent'],
'BL': ['Confirmed cases', 'Deaths', 'Released', 'Hospitalized', 'ICU'],
'BS': ['Confirmed cases', 'Deaths', 'Released'],
'FR': ['Confirmed cases', 'Deaths', 'Released', 'Hospitalized', 'ICU'],
'GE': [], # GE does not always provide the same numbers
'GL': ['Confirmed cases', 'Deaths', 'Hospitalized'],
'GR': ['Confirmed cases', 'Deaths', 'Hospitalized'],
'JU': ['Confirmed cases', 'Deaths', 'Hospitalized', 'ICU'],
'LU': [], # LU does not always provide the same numbers
'NE': [], # NE does not always provide the same numbers
'NW': ['Confirmed cases', 'Deaths', 'Hospitalized', 'ICU'],
'OW': ['Confirmed cases', 'Deaths', 'Hospitalized'],
'SG': [], # SG does not always provides the same numbers
'SH': ['Confirmed cases', 'Deaths', 'Hospitalized', 'ICU'],
'SO': ['Confirmed cases', 'Deaths'],
'SZ': ['Confirmed cases', 'Deaths', 'Released'],
'TG': ['Confirmed cases', 'Deaths', 'Hospitalized', 'ICU'],
'TI': ['Confirmed cases', 'Deaths', 'Released', 'Hospitalized', 'ICU', 'Vent'],
'UR': ['Confirmed cases', 'Deaths', 'Released', 'Hospitalized'],
'VD': ['Confirmed cases', 'Deaths', 'Hospitalized', 'ICU'],
'VS': ['Deaths', 'Hospitalized', 'ICU', 'Vent'],
'ZG': ['Confirmed cases', 'Deaths', 'Released', 'Hospitalized', 'ICU'],
'ZH': ['Confirmed cases', 'Deaths', 'Hospitalized'],
# 'FL': [], # No scraper.
}
allowed_extras = ['Confirmed cases', 'Deaths', 'Released', 'Hospitalized', 'ICU', 'Vent', 'Isolated', 'Quarantined']
# List of cantons that are expected to have date AND time.
matrix_time = [
'AG',
'AI',
'AR',
'BE',
# 'BL', # Not available.
'BS',
# 'FR', # Not available.
# 'GE', # Not available.
'GL',
# 'GR', # Not available.
# 'JU', # Not available in xls
# 'LU', # Available, but different values are reported at differnt times
# 'NW', # Not available in xls
# 'NE', # Not easily available.
'OW',
# 'SG', # Not available.
'SH',
'SO',
'SZ',
# 'TG', # Not available.
'TI',
'UR',
# 'VD', # Not available.
# 'VS', # Not available
'ZG',
'ZH',
# 'FL', # No scraper.
]
def check_expected(abbr, date, data):
"""
Verify that canton `abbr` has expected numbers presents.
If not, return a non-empty list of expectation violations back to the caller.
"""
expected_extras = matrix[abbr]
violated_expectations = []
warnings = []
for k in expected_extras:
if k not in allowed_extras:
text = f'Unknown extra {k} present (typo?) in expectation matrix[{abbr}]'
print(f'WARNING: {text}', file=sys.stderr)
warnings.append(text)
cross = {
'Confirmed cases': data.get('ncumul_conf'),
'Deaths': data.get('ncumul_deceased'),
'Hospitalized': data.get('current_hosp'),
'ICU': data.get('current_icu'),
'Vent': data.get('current_vent'),
'Released': data.get('ncumul_released'),
'Isolated': data.get('current_isolated'),
'Quarantined': data.get('current_quarantined'),
}
# Check for fields that should be there, but aren't
for k, v in cross.items():
if v is None and k in expected_extras:
violated_expectations.append(f'Expected {k} to be present for {abbr}')
# Check for new fields, that are there, but we didn't expect them
for k, v in cross.items():
if v is not None and k not in expected_extras:
text = f'Not expected {k} to be present for {abbr}. Update scrape_matrix.py file.'
print(f'WARNING: {text}', file=sys.stderr)
warnings.append(text)
assert date and "T" in date, f'Date is invalid: {date}'
date_time = date.split("T", 1)
assert len(date_time[0]) == 10
if abbr in matrix_time:
if len(date_time[1]) != 5:
violated_expectations.append(f'Expected time of a day to be present for {abbr}. Found none.')
else:
if len(date_time[1]) != 0:
text = f'Not expected time of a day to be present for {abbr}. Found "{date_time[1]}". Update scrape_matrix.py file?'
print(f'WARNING: {text}', file=sys.stderr)
warnings.append(text)
return (violated_expectations, warnings)
| StarcoderdataPython |
11306047 | <reponame>drewbrew/advent-of-code-2020
from typing import Deque, List, Set, Tuple
from collections import deque
TEST_INPUT = """Player 1:
9
2
6
3
1
Player 2:
5
8
4
7
10""".split(
"\n\n"
)
with open("day22.txt") as infile:
REAL_INPUT = infile.read().split("\n\n")
def build_hands(puzzle_input: List[str]) -> List[Deque[int]]:
result = []
for hand_desc in puzzle_input:
result.append(
deque(
list(
int(i) for hand in hand_desc.splitlines()[1:] for i in hand.split()
)
)
)
return result
class GameOver(Exception):
pass
def play_round(hands: List[Deque[int]], part_two: bool = False):
try:
p1_card = hands[0].popleft()
except IndexError as exc:
raise GameOver() from exc
try:
p2_card = hands[1].popleft()
except IndexError as exc:
# put the player 1 card back in the deck
hands[0].appendleft(p1_card)
raise GameOver() from exc
if part_two:
if len(hands[0]) >= p1_card and len(hands[1]) >= p2_card:
new_hands = [
deque(list(hands[0])[:p1_card]),
deque(list(hands[1])[:p2_card]),
]
winner_score = play_game(new_hands, True)
if winner_score == score_hand(new_hands[0]):
hands[0].append(p1_card)
hands[0].append(p2_card)
elif winner_score == score_hand(new_hands[1]):
hands[1].append(p2_card)
hands[1].append(p1_card)
else:
raise ValueError("unknown winner score")
return
if p1_card > p2_card:
hands[0].append(p1_card)
hands[0].append(p2_card)
elif p2_card > p1_card:
hands[1].append(p2_card)
hands[1].append(p1_card)
else:
raise ValueError("same card should not be possible")
def score_hand(hand: Deque[int]) -> int:
hand_length = len(hand)
return sum(value * (hand_length - index) for index, value in enumerate(hand))
def part_one(puzzle_input: List[str], part_two: bool = False) -> int:
hands = build_hands(puzzle_input)
return play_game(hands, part_two)
def play_game(hands: List[Deque[int]], part_two: bool = False) -> int:
turns = 0
hands_seen: Set[Tuple[Tuple[int]]] = set()
while True:
if part_two:
active_hands = tuple(tuple(i) for i in hands)
if active_hands in hands_seen:
return score_hand(hands[0])
hands_seen.add(active_hands)
try:
play_round(hands, part_two=part_two)
except GameOver:
scores = [score_hand(hand) for hand in hands]
assert (scores[0] == 0) is not (scores[1] == 0)
return [i for i in scores if i != 0][0]
else:
turns += 1
def main():
p1_score = part_one(TEST_INPUT)
assert p1_score == 306, p1_score
print(part_one(REAL_INPUT))
p2_score = part_one(TEST_INPUT, True)
assert p2_score == 291, p2_score
print(part_one(REAL_INPUT, True))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1668514 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Unit tests for shoplift.scrapers
Test the scraping APIs exposed by the different
scraping methods.
'''
import unittest
from shoplift.web import Resource
from shoplift.scrapers import *
class TestScrapers(unittest.TestCase):
def testResourceInputSupport(self):
"""web.Resource objects should be supported as input"""
testcases = (
(
amazon_api,
'http://www.amazon.com/Harry-Potter-Sorcerers-Stone-Book/dp/059035342X/ref=sr_1_1?s=books&ie=UTF8&qid=1403099669&sr=1-1&keywords=harry+potter',
'title',
r"Harry Potter and the Sorcerer's Stone \(Harry Potters\)",
),
(
microdata,
'http://www.flipkart.com/the-hobbit/p/itmdx5tngyzzpx2u?pid=DGBDG2GFJDGZVNZS&srno=b_15&ref=7ca275d0-da30-45c0-b46e-fc9d5bc28a71',
'/properties/name/0',
r"The Hobbit",
),
(
opengraph,
'https://itunes.apple.com/in/album/songs-innocence-deluxe-edition/id928428096',
'title',
r"Songs of Innocence \(Deluxe Edition\) by U2"
),
(
xpath,
'http://example.com',
'//h1//text()',
r"Example Domain",
)
)
for scraper_method, url, key, expected in testcases:
try:
result = scraper_method(Resource(url), key)
except Exception as e:
self.fail(scraper_method.__name__ + " unexpectedly raised an exception")
else:
self.assertIsNotNone(result, scraper_method.__name__ + " unexpectedly returned None")
self.assertRegexpMatches(result, expected)
def testAmazonApiScraper(self):
'''Test for amazon API scraping'''
self.assertEqual(amazon_api('avcd', ''), None)
self.assertEqual(amazon_api(1234, ''), None)
self.assertEqual(amazon_api('', 'ayush'), None)
self.assertEqual(amazon_api('http://www.amazon.com/Harry-Potter-Sorcerers-Stone-Book/dp/059035342X/ref=sr_1_1?s=books&ie=UTF8&qid=1403099669&sr=1-1&keywords=harry+potter', 'title'), "Harry Potter and the Sorcerer's Stone (Harry Potters)")
self.assertEqual(amazon_api('http://www.amazon.com/Harry-Potter-Sorcerers-Stone-Book/dp/059035342X', 'title'), "Harry Potter and the Sorcerer's Stone (Harry Potters)")
self.assertEqual(amazon_api('http://www.amazon.com/Harry-Potter-Sorcerers-Stone-Book/dp/059035342X/ref=sr_1_1?s=books&ie=UTF8&qid=1403099669&sr=1-1&keywords=harry+potter', 'rate'), None)
self.assertEqual(amazon_api('http://www.amazon.com/Harry-Potter-Sorcerers-Stone-Book/dp/059035342X/?ayush=ayush', 'title'), "Harry Potter and the Sorcerer's Stone (Harry Potters)")
self.assertEqual(amazon_api('ayush/dp/ayush', 'title'), None)
self.assertEqual(amazon_api('', ''), None)
self.assertEqual(amazon_api('http://www.flipkart.com/flippd-men-s-checkered-casual-shirt/p/itmdtsh62kgrczfw', 'title'), None)
def testMicrodataScraper(self):
'''Test for microdata_scraper module
Checking the expected results of microdata...
scraping for cached web-pages
'''
self.assertEqual(microdata('http://www.flipkart.com/the-hobbit/p/itmdx5tngyzzpx2u?pid=DGBDG2GFJDGZVNZS&srno=b_15&ref=7ca275d0-da30-45c0-b46e-fc9d5bc28a71', '/properties/name/0').strip(), "The Hobbit")
self.assertEqual(microdata('http://www.flipkart.com/the-hobbit/p/itmdx5tngyzzpx2u?pid=DGBDG2GFJDGZVNZS&srno=b_15&ref=7ca275d0-da30-45c0-b46e-fc9d5bc28a71', 123), None)
self.assertEqual(microdata('https://itunes.apple.com/in/album/songs-innocence-deluxe-edition/id928428096', 'name'), None)
self.assertEqual(microdata('', '/properties/name/0'), None)
self.assertEqual(microdata('http://www.flipkart.com/the-hobbit/p/itmdx5tngyzzpx2u?pid=DGBDG2GFJDGZVNZS&srno=b_15&ref=7ca275d0-da30-45c0-b46e-fc9d5bc28a71', '/properties/name/0').strip(), "The Hobbit")
self.assertEqual(microdata('', ''), None)
self.assertEqual(microdata('ayush', 'ayush'), None)
self.assertEqual(microdata(1234, ''), None)
self.assertEqual(microdata('', 1234), None)
def testXpathScraper(self):
'''Test for xpath_scraper module
Testing the expected output of xpath ...
scraping for cached web-pages
'''
self.assertEqual(xpath('https://itunes.apple.com/in/album/songs-innocence-deluxe-edition/id928428096','//div[@class="callout"]/div[@class="left"]//text()'), 'iTunes')
self.assertEqual(xpath('https://itunes.apple.com/in/album/songs-innocence-deluxe-edition/id928428096', 'a_string'), None)
self.assertEqual(xpath('https://itunes.apple.com/in/album/songs-innocence-deluxe-edition/id928428096', '/ayush'), None)
self.assertEqual(xpath('', ''), None)
self.assertEqual(xpath(1234, ''), None)
self.assertEqual(xpath('ayush', '//span'), None)
self.assertEqual(xpath('google.com', '*ayush*'), None)
self.assertEqual(xpath('google.com', ''), None)
def testOpenGraphScraper(self):
'''Test for opengraph_scraper module
Test the expected output for open_graph scraping ...
for cached web-pages
'''
self.assertEqual(opengraph('https://itunes.apple.com/in/album/songs-innocence-deluxe-edition/id928428096', 'title'), "Songs of Innocence (Deluxe Edition) by U2")
self.assertEqual(opengraph('https://itunes.apple.com/in/album/songs-innocence-deluxe-edition/id928428096', ''), None)
self.assertEqual(opengraph('https://itunes.apple.com/us/album/overexposed/id536292140', 'a_string'), None)
self.assertEqual(opengraph('', ''), None)
self.assertEqual(opengraph('string', 1234), None)
self.assertEqual(opengraph('string', ''), None)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
136938 | <gh_stars>0
#!/usr/bin/env python
# (C) Copyright 2016, NVIDIA CORPORATION.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# <NAME> <<EMAIL>>
"""
Generates dispatch functions for EGL.
The list of functions and arguments is read from the Khronos's XML files, with
additional information defined in the module eglFunctionList.
"""
import argparse
import collections
import eglFunctionList
import sys
import textwrap
import os
NEWAPI = os.path.join(os.path.dirname(__file__), "..", "..", "mapi", "new")
sys.path.insert(0, NEWAPI)
import genCommon
def main():
parser = argparse.ArgumentParser()
parser.add_argument("target", choices=("header", "source"),
help="Whether to build the source or header file.")
parser.add_argument("xml_files", nargs="+", help="The XML files with the EGL function lists.")
args = parser.parse_args()
xmlFunctions = genCommon.getFunctions(args.xml_files)
xmlByName = dict((f.name, f) for f in xmlFunctions)
functions = []
for (name, eglFunc) in eglFunctionList.EGL_FUNCTIONS:
func = xmlByName[name]
eglFunc = fixupEglFunc(func, eglFunc)
functions.append((func, eglFunc))
# Sort the function list by name.
functions = sorted(functions, key=lambda f: f[0].name)
if args.target == "header":
text = generateHeader(functions)
elif args.target == "source":
text = generateSource(functions)
sys.stdout.write(text)
def fixupEglFunc(func, eglFunc):
result = dict(eglFunc)
if result.get("prefix") is None:
result["prefix"] = ""
if result.get("extension") is not None:
text = "defined(" + result["extension"] + ")"
result["extension"] = text
if result["method"] in ("none", "custom"):
return result
if result["method"] not in ("display", "device", "current"):
raise ValueError("Invalid dispatch method %r for function %r" % (result["method"], func.name))
if func.hasReturn():
if result.get("retval") is None:
result["retval"] = getDefaultReturnValue(func.rt)
return result
def generateHeader(functions):
text = textwrap.dedent(r"""
#ifndef G_EGLDISPATCH_STUBS_H
#define G_EGLDISPATCH_STUBS_H
#ifdef __cplusplus
extern "C" {
#endif
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include "glvnd/libeglabi.h"
""".lstrip("\n"))
text += "enum {\n"
for (func, eglFunc) in functions:
text += generateGuardBegin(func, eglFunc)
text += " __EGL_DISPATCH_" + func.name + ",\n"
text += generateGuardEnd(func, eglFunc)
text += " __EGL_DISPATCH_COUNT\n"
text += "};\n"
for (func, eglFunc) in functions:
if eglFunc["inheader"]:
text += generateGuardBegin(func, eglFunc)
text += "{f.rt} EGLAPIENTRY {ex[prefix]}{f.name}({f.decArgs});\n".format(f=func, ex=eglFunc)
text += generateGuardEnd(func, eglFunc)
text += textwrap.dedent(r"""
#ifdef __cplusplus
}
#endif
#endif // G_EGLDISPATCH_STUBS_H
""")
return text
def generateSource(functions):
# First, sort the function list by name.
text = ""
text += '#include "egldispatchstubs.h"\n'
text += '#include "g_egldispatchstubs.h"\n'
text += "\n"
for (func, eglFunc) in functions:
if eglFunc["method"] not in ("custom", "none"):
text += generateGuardBegin(func, eglFunc)
text += generateDispatchFunc(func, eglFunc)
text += generateGuardEnd(func, eglFunc)
text += "\n"
text += "const char * const __EGL_DISPATCH_FUNC_NAMES[__EGL_DISPATCH_COUNT + 1] = {\n"
for (func, eglFunc) in functions:
text += generateGuardBegin(func, eglFunc)
text += ' "' + func.name + '",\n'
text += generateGuardEnd(func, eglFunc)
text += " NULL\n"
text += "};\n"
text += "const __eglMustCastToProperFunctionPointerType __EGL_DISPATCH_FUNCS[__EGL_DISPATCH_COUNT + 1] = {\n"
for (func, eglFunc) in functions:
text += generateGuardBegin(func, eglFunc)
if eglFunc["method"] != "none":
text += " (__eglMustCastToProperFunctionPointerType) " + eglFunc.get("prefix", "") + func.name + ",\n"
else:
text += " NULL, // " + func.name + "\n"
text += generateGuardEnd(func, eglFunc)
text += " NULL\n"
text += "};\n"
return text
def generateGuardBegin(func, eglFunc):
ext = eglFunc.get("extension")
if ext is not None:
return "#if " + ext + "\n"
else:
return ""
def generateGuardEnd(func, eglFunc):
if eglFunc.get("extension") is not None:
return "#endif\n"
else:
return ""
def generateDispatchFunc(func, eglFunc):
text = ""
if eglFunc.get("static"):
text += "static "
elif eglFunc.get("public"):
text += "PUBLIC "
text += textwrap.dedent(
r"""
{f.rt} EGLAPIENTRY {ef[prefix]}{f.name}({f.decArgs})
{{
typedef {f.rt} EGLAPIENTRY (* _pfn_{f.name})({f.decArgs});
""").lstrip("\n").format(f=func, ef=eglFunc)
if func.hasReturn():
text += " {f.rt} _ret = {ef[retval]};\n".format(f=func, ef=eglFunc)
text += " _pfn_{f.name} _ptr_{f.name} = (_pfn_{f.name}) ".format(f=func)
if eglFunc["method"] == "current":
text += "__eglDispatchFetchByCurrent(__EGL_DISPATCH_{f.name});\n".format(f=func)
elif eglFunc["method"] in ("display", "device"):
if eglFunc["method"] == "display":
lookupFunc = "__eglDispatchFetchByDisplay"
lookupType = "EGLDisplay"
else:
assert eglFunc["method"] == "device"
lookupFunc = "__eglDispatchFetchByDevice"
lookupType = "EGLDeviceEXT"
lookupArg = None
for arg in func.args:
if arg.type == lookupType:
lookupArg = arg.name
break
if lookupArg is None:
raise ValueError("Can't find %s argument for function %s" % (lookupType, func.name,))
text += "{lookupFunc}({lookupArg}, __EGL_DISPATCH_{f.name});\n".format(
f=func, lookupFunc=lookupFunc, lookupArg=lookupArg)
else:
raise ValueError("Unknown dispatch method: %r" % (eglFunc["method"],))
text += " if(_ptr_{f.name} != NULL) {{\n".format(f=func)
text += " "
if func.hasReturn():
text += "_ret = "
text += "_ptr_{f.name}({f.callArgs});\n".format(f=func)
text += " }\n"
if func.hasReturn():
text += " return _ret;\n"
text += "}\n"
return text
def getDefaultReturnValue(typename):
if typename.endswith("*"):
return "NULL"
elif typename == "EGLDisplay":
return "EGL_NO_DISPLAY"
elif typename == "EGLContext":
return "EGL_NO_CONTEXT"
elif typename == "EGLSurface":
return "EGL_NO_SURFACE"
elif typename == "EGLBoolean":
return "EGL_FALSE";
return "0"
if __name__ == "__main__":
main()
| StarcoderdataPython |
11348657 | import math
import torch
from torch import nn
from torch.nn import functional as F
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers=(3, 4, 23, 3)):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2./n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes*block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes*block.expansion, kernel_size=1, stride=stride, bias=False)
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class PSPModule(nn.Module):
def __init__(self, feat_dim, bins=(1, 2, 3, 6)):
super(PSPModule, self).__init__()
self.reduction_dim = feat_dim // len(bins)
self.stages = []
self.stages = nn.ModuleList([self._make_stage(feat_dim, size) for size in bins])
def _make_stage(self, feat_dim, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(feat_dim, self.reduction_dim, kernel_size=1, bias=False)
relu = nn.ReLU(inplace=True)
return nn.Sequential(prior, conv, relu)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [feats]
for stage in self.stages:
priors.append(F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True))
return torch.cat(priors, 1)
class PSPUpsample(nn.Module):
def __init__(self, in_channels, out_channels):
super(PSPUpsample, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.PReLU()
)
def forward(self, x):
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
return self.conv(x)
class PSPNet(nn.Module):
def __init__(self, bins=(1, 2, 3, 6), backend='resnet18'):
super(PSPNet, self).__init__()
if backend == 'resnet18':
self.feats = ResNet(BasicBlock, [2, 2, 2, 2])
feat_dim = 512
else:
raise NotImplementedError
self.psp = PSPModule(feat_dim, bins)
self.drop = nn.Dropout2d(p=0.15)
self.up_1 = PSPUpsample(1024, 256)
self.up_2 = PSPUpsample(256, 64)
self.up_3 = PSPUpsample(64, 64)
self.final = nn.Conv2d(64, 32, kernel_size=1)
def forward(self, x):
f = self.feats(x)
p = self.psp(f)
#print("psp:", p.shape)
#psp: torch.Size([32, 1024, 24, 24])
p = self.up_1(p)
#up1: torch.Size([32, 256, 48, 48])
#print("up1:", p.shape)
p = self.drop(p)
p = self.up_2(p)
#print("up2:", p.shape)
#up2: torch.Size([32, 64, 96, 96])
p = self.drop(p)
p = self.up_3(p)
#print("up3:", p.shape)
#up3: torch.Size([32, 64, 192, 192])
return self.final(p)
| StarcoderdataPython |
1977134 | <gh_stars>0
"""Enumerations.py: NeoPixel Indicator Module."""
from enum import Enum, auto, unique
from typing import Tuple
ColorType = Tuple[int, int, int]
class Color:
"""This is a doctring."""
# Color.BLACK is used in npin module code, and should not be modified.
BLACK = (0, 0, 0)
# WHITE, RED, GREEN and BLUE should probably be left alone for the sake of clarity.
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# Everything else is up for grabs - add and customize to your heart's content!
ORANGE = (255, 128, 0)
CYAN = (0, 128, 255)
@unique
class Value(Enum):
"""Make this a docstring."""
CURRENT = auto()
TARGET = auto()
class Script:
"""Make this a docstring."""
@unique
class Type(Enum):
"""Make this a docstring."""
ONCE = auto()
LOOP = auto()
@unique
class Command(Enum):
"""Make this a docstring."""
COLOR = auto()
BRIGHTNESS = auto()
WAIT = auto()
@unique
class Run(Enum):
"""Make this a docstring."""
NOW = auto()
AFTER = auto()
| StarcoderdataPython |
1773925 | <filename>deferpy/defer.py<gh_stars>1-10
from functools import partial, update_wrapper
import wrapt
def defer(name='_'):
return partial(DeferDecorated, return_name=name)
class DeferDecorated():
def __init__(self, f, return_name='_'):
update_wrapper(self, f)
self.underscore = wrapt.ObjectProxy(None)
f.__globals__[return_name] = self.underscore
self.func = f
self.stack = []
def defer(self, func, *args, **kwargs):
self.stack.append((func, args, kwargs))
def __call__(self, *args, **kwargs):
with self:
self.underscore.__wrapped__ = self.func(*args, **kwargs)
return self.underscore.__wrapped__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
while len(self.stack):
try:
func, args, kwargs = self.stack.pop()
func(*args, **kwargs)
except:
pass
| StarcoderdataPython |
4859001 | <gh_stars>1-10
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from keystoneauth1 import adapter
from openstack.tests.unit import base
from otcextensions.sdk.dcaas.v2 import virtual_interface
EXAMPLE = {
"name": "test-vi",
"tenant_id": "6fbe9263116a4b68818cf1edce16bc4f",
"description": "Virtual interface description",
"direct_connect_id": "456e9263116a4b68818cf1edce16bc4f",
"vgw_id": "56d5745e-6af2-40e4-945d-fe449be00148",
"type": "public",
"service_type": "vpc",
"vlan": 100,
"bandwidth": 10,
"local_gateway_v4_ip": "192.168.3.11/24",
"remote_gateway_v4_ip": "172.16.17.32/24",
"route_mode": "static",
"bgp_asn": 5,
"bgp_md5": "",
"remote_ep_group_id": "8ube9263116a4b68818cf1edce16bc4f",
"service_ep_group_id": "6fbe9263116a4b68818cf1edce16bc4f",
"create_time": "2016-01-28T16:14:09.466Z",
"delete_time": "2016-01-28T16:25:27.690Z",
"admin_state_up": True,
"rate_limit": False,
"status": "PENDING_CREATE"
}
class TestVirtualInterface(base.TestCase):
def setUp(self):
super(TestVirtualInterface, self).setUp()
self.sess = mock.Mock(spec=adapter.Adapter)
self.sess.put = mock.Mock()
def test_basic(self):
sot = virtual_interface.VirtualInterface()
self.assertEqual('virtual_interface', sot.resource_key)
self.assertEqual('virtual_interfaces', sot.resources_key)
self.assertEqual('/dcaas/virtual-interfaces', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_list)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
def test_make_it(self):
sot = virtual_interface.VirtualInterface(**EXAMPLE)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['tenant_id'], sot.project_id)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['direct_connect_id'], sot.direct_connect_id)
self.assertEqual(EXAMPLE['vgw_id'], sot.vgw_id)
self.assertEqual(EXAMPLE['type'], sot.type)
self.assertEqual(EXAMPLE['service_type'], sot.service_type)
self.assertEqual(EXAMPLE['vlan'], sot.vlan)
self.assertEqual(EXAMPLE['bandwidth'], sot.bandwidth)
self.assertEqual(EXAMPLE['local_gateway_v4_ip'],
sot.local_gateway_v4_ip)
self.assertEqual(EXAMPLE['remote_gateway_v4_ip'],
sot.remote_gateway_v4_ip)
self.assertEqual(EXAMPLE['route_mode'], sot.route_mode)
self.assertEqual(EXAMPLE['bgp_asn'], sot.bgp_asn)
self.assertEqual(EXAMPLE['bgp_md5'], sot.bgp_md5)
self.assertEqual(EXAMPLE['remote_ep_group_id'],
sot.remote_ep_group_id)
self.assertEqual(EXAMPLE['service_ep_group_id'],
sot.service_ep_group_id)
self.assertEqual(EXAMPLE['create_time'], sot.create_time)
self.assertEqual(EXAMPLE['delete_time'], sot.delete_time)
self.assertEqual(EXAMPLE['admin_state_up'],
sot.admin_state_up)
self.assertEqual(EXAMPLE['rate_limit'], sot.rate_limit)
self.assertEqual(EXAMPLE['status'], sot.status)
| StarcoderdataPython |
8030063 | <gh_stars>10-100
#!/usr/bin/env python
#
# Title: docker-mount.py
# Author: <NAME>
# Date: 2021-01-07
# Version: 1.0.2
#
# Purpose: Allow the mounting of the AUFS layered/union filesystem from
# a docker container to be mounted (read-only) for the purposes
# of forensic examination
#
# Copyright (c) 2016-2021 AT&T Open Source. All rights reserved.
#
from sys import *
import os
import argparse
from subprocess import call
import json
def aufs_mount():
layers = open(layerid).read().split("\n")
layers = layers[:-1]
layers.insert(0, layerid)
elements = [args.root + args.path + "/" + args.storage + "/diff/" + s for s in layers]
f = ":".join(elements)
call(["/bin/mount", "-t", "aufs", "-r", "-o", "br:" + f, "none", args.mntpnt], shell=False)
return ()
def overlay2_mount():
lowerdir = open(args.root + args.path + "/" + args.storage + "/" + layerid + "/lower").read().rstrip()
os.chdir(dockerpath)
call(
[
"/bin/mount",
"-t",
"overlay",
"overlay",
"-r",
"-o",
"lowerdir=" + lowerdir + ",upperdir=" + layerid + "/diff,workdir=" + layerid + "/work",
args.mntpnt,
],
shell=False,
)
return ()
__version_info__ = (1, 0, 2)
__version__ = ".".join(map(str, __version_info__))
parser = argparse.ArgumentParser(
prog="docker-mount", description="Mount docker container filesystem for forensic examination"
)
parser.add_argument("container", help="container id (sha256 hex string)")
parser.add_argument("mntpnt", help="mount point where read-only filesystem will be mounted")
# parser.add_argument('--verbose','-v', action='store_true', help='verbose',)
parser.add_argument(
"-V", "--version", action="version", help="print version number", version="%(prog)s v" + __version__
)
parser.add_argument(
"--root",
"-r",
help="root of filesystem containing forensic image (should include trailing /, e.g. /mnt/image/) [default: none]",
default="",
) # e.g., /mnt/image/
parser.add_argument("--path", "-p", help="path to docker files [default: /var/lib/docker]", default="/var/lib/docker")
parser.add_argument(
"--storage", "-s", help="storage driver, currently aufs and overlay2 are supported [default: aufs]", default="aufs"
)
args = parser.parse_args()
dockerroot = args.root + args.path
config1 = dockerroot + "/containers/" + args.container + "/config.json"
config2 = dockerroot + "/containers/" + args.container + "/config.v2.json"
if os.path.isfile(config1):
dockerversion = 1
elif os.path.isfile(config2):
dockerversion = 2
else:
raise Exception("Unknown docker version or invalid container id, check and try again")
if dockerversion == 1:
layerid = args.container
else:
layerid = open(
args.root + args.path + "/image/" + args.storage + "/layerdb/mounts/" + args.container + "/mount-id"
).read()
# image/aufs/layerdb/mounts/516ae11fdca97f3228dac2dea2413c6d34a444e24b1d8b2a47cd54fbca091905/mount-id
if args.storage == "aufs":
dockerpath = args.root + args.path + "/" + args.storage + "/layers"
elif args.storage == "overlay2":
dockerpath = args.root + args.path + "/" + args.storage
os.chdir(dockerpath)
if args.storage == "aufs":
aufs_mount()
elif args.storage == "overlay2":
overlay2_mount()
| StarcoderdataPython |
3259370 | from netaddr import *
from scapy.all import *
WPS_QUERY = {
b"\x00\x10\x18": "Broadcom", # Broadcom */
b"\x00\x03\x7f": "AtherosC", # Atheros Communications */
b"\x00\x0c\x43": "RalinkTe", # Ralink Technology, Corp. */
b"\x00\x17\xa5": "RalinkTe", # Ralink Technology Corp */
b"\x00\xe0\x4c": "RealtekS", # Realtek Semiconductor Corp. */
b"\x00\x0a\x00": "Mediatek", # Mediatek Corp. */
b"\x00\x0c\xe7": "Mediatek", # Mediatek MediaTek Inc. */
b"\x00\x1c\x51": "CelenoCo", # Celeno Communications */
b"\x00\x50\x43": "MarvellS", # Marvell Semiconductor, Inc. */
b"\x00\x26\x86": "Quantenn", # Quantenna */
b"\x00\x09\x86": "LantiqML", # Lantiq/MetaLink */
b"\x00\x50\xf2": "Microsof"
}
# wps_attributes = {
# 0x104A : {'name' : 'Version ', 'type' : 'hex'},
# 0x1044 : {'name' : 'WPS State ', 'type' : 'hex'},
# 0x1057 : {'name' : 'AP Setup Locked ', 'type' : 'hex'},
# 0x1041 : {'name' : 'Selected Registrar ', 'type' : 'hex'},
# 0x1012 : {'name' : 'Device Password ID ', 'type' : 'hex'},
# 0x1053 : {'name' : 'Selected Registrar Config Methods', 'type' : 'hex'},
# 0x103B : {'name' : 'Response Type ', 'type' : 'hex'},
# 0x1047 : {'name' : 'UUID-E ', 'type' : 'hex'},
# 0x1021 : {'name' : 'Manufacturer ', 'type' : 'str'},
# 0x1023 : {'name' : 'Model Name ', 'type' : 'str'},
# 0x1024 : {'name' : 'Model Number ', 'type' : 'str'},
# 0x1042 : {'name' : 'Serial Number ', 'type' : 'str'},
# 0x1054 : {'name' : 'Primary Device Type ', 'type' : 'hex'},
# 0x1011 : {'name' : 'Device Name ', 'type' : 'str'},
# 0x1008 : {'name' : 'Config Methods ', 'type' : 'hex'},
# 0x103C : {'name' : 'RF Bands ', 'type' : 'hex'},
# 0x1045 : {'name' : 'SSID ', 'type' : 'str'},
# 0x102D : {'name' : 'OS Version ', 'type' : 'str'}
# }
def get_rssi(decoded):
try:
rssi = -(256 - ord(decoded[-2:-1]))
except:
rssi = -(256 - ord(decoded[-4:-3]))
if rssi < -100:
return -1
return rssi
def get_ssid(p):
if p and u"\x00" not in "".join([str(x) if x < 128 else "" for x in p]):
try:
return p.decode("utf-8") # Remove assholes emojis in SSID's
except:
return unicode(p, errors='ignore')
else:
return (("< len: {0} >").format(len(p)))
def get_channel(packet):
try:
return str(ord(packet.getlayer(Dot11Elt, ID=3).info))
except:
dot11elt = packet.getlayer(Dot11Elt, ID=61)
return ord(dot11elt.info[-int(dot11elt.len):-int(dot11elt.len)+1])
def get_security(packet):
cap = packet.sprintf("{Dot11Beacon:%Dot11Beacon.cap%}"
"{Dot11ProbeResp:%Dot11ProbeResp.cap%}").split("+")
sec = ""
cipher = None
p_layer = ""
try:
p_layer = packet.getlayer(Dot11Elt, ID=48).info
sec = "WPA2"
except AttributeError:
p_layer = packet.getlayer(Dot11Elt, ID=221).info
if p_layer.startswith(b"\x00P\xf2\x01\x01\x00"):
sec = "WPA"
if not sec:
# Check for wep
if "privacy" in cap:
sec = "WEP"
elif not sec:
return "OPEN", None
if sec == "WPA2" and p_layer:
if p_layer[8:12] == b"\x00\x0f\xac\x02":
print(p_layer)
# Broken becuase no one has a CCMP/TKIP network.
cipher = "CCMP/TKIP" if temp[16:24] == b"\x00\x0f\xac\x04" else "TKIP"
elif p_layer[8:12] == b"\x00\x0f\xac\x04":
cipher = "CCMP"
temp = packet.getlayer(Dot11Elt, ID=221).info
for key in WPS_QUERY:
if key in temp:
sec += "/WPS"
# print(sec, cipher)
return sec, cipher
def get_vendor(addr3):
try:
return (EUI(addr3)).oui.registration().org
except NotRegisteredError:
return "-"
class AccessPoint:
def __init__(
self,
ssid,
enc,
cipher,
ch,
mac,
ven,
sig
):
self.ssid = ssid
self.enc = enc
self.cip = cipher
self.ch = ch
self.mac = mac
self.ven = ven
self.sig = sig
return
def __eq__(self, other):
return True if other == self.mMAC else False
class Client:
def __init__(
self,
mac,
bssid,
rssi,
essid
):
self.mac = mac
self.bssid = bssid
self.sig = rssi
self.essid = essid
return
def __eq__(self, other):
return True if other == self.mMac else False
| StarcoderdataPython |
6635446 | <filename>pyzoo/zoo/automl/model/VanillaLSTM.py
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
import keras
import os
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
class VanillaLSTM(BaseModel):
def __init__(self, check_optional_config=True, future_seq_len=1):
"""
Constructor of Vanilla LSTM model
"""
self.model = None
self.check_optional_config = check_optional_config
self.future_seq_len = future_seq_len
def _build(self, **config):
"""
build vanilla LSTM model
:param config: model hyper parameters
:return: self
"""
super()._check_config(**config)
self.metric = config.get('metric', 'mean_squared_error')
self.model = Sequential()
self.model.add(LSTM(
# input_shape=(config.get('input_shape_x', 20),
# config.get('input_shape_y', 20)),
units=config.get('lstm_1_units', 20),
return_sequences=True))
self.model.add(Dropout(config.get('dropout_1', 0.2)))
self.model.add(LSTM(
units=config.get('lstm_2_units', 10),
return_sequences=False))
self.model.add(Dropout(config.get('dropout_2', 0.2)))
self.model.add(Dense(self.future_seq_len))
self.model.compile(loss='mse',
metrics=[self.metric],
optimizer=keras.optimizers.RMSprop(lr=config.get('lr', 0.001)))
return self.model
def fit_eval(self, x, y, validation_data=None, **config):
"""
fit for one iteration
:param x: 3-d array in format (no. of samples, past sequence length, 2+feature length), in the last
dimension, the 1st col is the time index (data type needs to be numpy datetime type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
:param y: 2-d numpy array in format (no. of samples, future sequence length) if future sequence length > 1,
or 1-d numpy array in format (no. of samples, ) if future sequence length = 1
:param validation_data: tuple in format (x_test,y_test), data used for validation. If this is specified,
validation result will be the optimization target for automl. Otherwise, train metric will be the optimization
target.
:param config: optimization hyper parameters
:return: the resulting metric
"""
# if model is not initialized, __build the model
if self.model is None:
self._build(**config)
hist = self.model.fit(x, y,
validation_data=validation_data,
batch_size=config.get('batch_size', 1024),
epochs=config.get('epochs', 20),
verbose=0
)
# print(hist.history)
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metric)[0]
else:
result = hist.history.get('val_' + str(self.metric))[0]
return result
def evaluate(self, x, y, metric=['mean_squared_error']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
e = Evaluator()
y_pred = self.predict(x)
return [e.evaluate(m, y, y_pred) for m in metric]
def predict(self, x):
"""
Prediction on x.
:param x: input
:return: predicted y
"""
return self.model.predict(x)
def save(self, model_path, config_path):
"""
save model to file.
:param model_path: the model file.
:param config_path: the config file
:return:
"""
self.model.save("vanilla_lstm_tmp.h5")
os.rename("vanilla_lstm_tmp.h5", model_path)
config_to_save = {
"future_seq_len": self.future_seq_len
}
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
:return: the restored model
"""
#self.model = None
#self._build(**config)
self.model = keras.models.load_model(model_path)
#self.model.load_weights(file_path)
def _get_required_parameters(self):
return {
# 'input_shape_x',
# 'input_shape_y',
# 'out_units'
}
def _get_optional_parameters(self):
return {
'lstm_1_units',
'dropout_1',
'lstm_2_units',
'dropout_2',
'metric',
'lr',
'epochs',
'batch_size'
}
if __name__ == "__main__":
model = VanillaLSTM(check_optional_config=False)
x_train, y_train, x_test, y_test = load_nytaxi_data('../../../../data/nyc_taxi_rolled_split.npz')
config = {
# 'input_shape_x': x_train.shape[1],
# 'input_shape_y': x_train.shape[-1],
'out_units': 1,
'dummy1': 1,
'batch_size': 1024,
'epochs': 1
}
print("fit_eval:",model.fit_eval(x_train, y_train, validation_data=(x_test, y_test), **config))
print("evaluate:",model.evaluate(x_test, y_test))
print("saving model")
model.save("testmodel.tmp.h5",**config)
print("restoring model")
model.restore("testmodel.tmp.h5",**config)
print("evaluate after retoring:",model.evaluate(x_test, y_test))
os.remove("testmodel.tmp.h5")
| StarcoderdataPython |
1601352 | MOUNT_JS = \
"""
if (typeof {var}.React === 'undefined') throw new Error('Cannot find `React` variable. Have you added an object to your JS export which points to React?');
if (typeof {var}.router === 'undefined') throw new Error('Cannot find `router` variable. Have you added an object to your JS export which points to a function that returns a react-router.Router?');
if (typeof {var} === 'undefined') throw new Error('Cannot find component variable `{var}`');
(function(React, routes, router, containerId) {{
var props = {props};
var element = router(routes, props);
var container = document.getElementById(containerId);
if (!container) throw new Error('Cannot find the container element `#{container_id}` for component `{var}`');
React.render(element, container);
}})({var}.ReactDOM, {var}.routes, {var}.router, '{container_id}');
"""
| StarcoderdataPython |
6646593 | <filename>pyserverlessdb/__main__.py
from pyserverlessdb.db import DB
import json
import textwrap
BANNER = textwrap.dedent('''
+==============================================+
| ╔═╗┬ ┬╔═╗┌─┐┬─┐┬ ┬┌─┐┬─┐┬ ┌─┐┌─┐┌─┐╔╦╗╔╗ |
| ╠═╝└┬┘╚═╗├┤ ├┬┘└┐┌┘├┤ ├┬┘│ ├┤ └─┐└─┐ ║║╠╩╗ |
| ╩ ┴ ╚═╝└─┘┴└─ └┘ └─┘┴└─┴─┘└─┘└─┘└─┘═╩╝╚═╝ |
+----------------------------------------------+
| A Serverless DB for hobby python projects |
+----------------------------------------------+
~ v0.0.1Beta Author: <NAME> ~
+==============================================+
''')
HELP = textwrap.dedent('''
- Commands:
help\t\tprints help menu
exit\t\tclose interpreter
dbs\t\tprint all the created/connected dbs
createdb\tcreates/connects with the db
seldb\t\tselects a db
showdb\t\tshow selected db
printdb\t\tprints db content
tbls\t\tshow tables in selected db
createtbl\tcreate a table in selected db
deltbl\t\tdelete a table from selected db
seltbl\t\tselect a table from selected db
showtbl\t\tprint selected table name from selected db
printtbl\tprint selected table data from selected db
addobj\t\tadd object to selected table in selected db
updateobj\tupdate object from selected table in selected db
- Short forms:
DB\t\tDatabase
tbl\t\tTable
sel\t\tSelect
del\t\tDelete
''')
dbs = []
selected_db:DB = None
selected_table_name:str = None
def dict_to_jsonstr(dictionary:dict) -> dict:
'''
description:
takes dictionary as parameter and returns it as a json string.
parameters:
dictionary (dict):
returns:
str : dictionary as json string
bool: False if dictionary is invalid
'''
try:
return json.dumps(dictionary, indent=4, sort_keys=True)
except json.JSONDecodeError:
return False
def jsonstr_to_dict() -> dict:
'''
description:
takes json object as user input and returns it as a dictionary.
parameters:
None
returns:
dict: user input to dict
bool: False if json object is invalid
'''
try:
user_input = input("[+] Enter Json Object : \n").strip()
json_obj:dict = json.loads(user_input)
return json_obj
except json.JSONDecodeError:
return False
def select_db(db_name:str) -> bool:
'''
description:
selects a db from the dbs list using passed db_name.
parameters:
db_name (str): name of the db to be selected
returns:
type (bool): returns True if db is selected, else False
'''
global selected_db, dbs
for db in dbs:
if str(db) == db_name:
selected_db = db
return True
return False
def handle_and_execute(command:str):
'''
description:
handles commands and executes if valid.
commands:
createdb: creates db
parameters:
command (str): command to be executed.
returns:
result (bool): result after command execution. True if operation is successful else False.
'''
global HELP, dbs, selected_db, selected_table_name
command = command.split(' ')
words = len(command)
if command[0] == "help":
print(HELP)
elif command[0] == "createdb":
if words <= 1:
print("[X] Database name missing. usage: createdb [db_name]")
return False
file_path = command[1]
new_db = DB(file_path)
dbs.append(new_db)
print(f"[*] {file_path} DB has been created.")
elif command[0] == "dbs":
if len(dbs) == 0:
print("[!] No database created/connected.")
return False
print("[*] Databases :")
for db in dbs:
print(db)
elif command[0] == "seldb":
if words <= 1:
print("[X] Database name missing. usage: seldb [db_name]")
return False
if not command[1].endswith('.pysdb'):
command[1] += '.pysdb'
if select_db(command[1]):
print(f"[*] {selected_db} DB selected.")
else:
print("[X] Invalid DB name. use dbs to view valid db list.")
elif command[0] == "showdb":
if selected_db is not None:
print(selected_db)
else:
print("[!] Select a database before accessing.")
return False
elif command[0] == "printdb":
if selected_db:
print(dict_to_jsonstr(selected_db.get_db_copy()))
else:
print("[!] Select a database before accessing.")
return False
elif command[0] == "createtbl":
if words <= 1:
print("[X] table name missing. usage: createtbl [table_name]")
return False
if selected_db is None:
print("[X] Select DB before creating a table. use createdb command to create db.")
return False
if selected_db.create_table(command[1]):
print(f"[*] {command[1]} table created in {selected_db}.")
else:
print(f"[!] {command[1]} table was not created. Table might already exist.")
elif command[0] == "tbls":
if selected_db is None:
print("[X] Select DB before viewing tables. use createdb command to create db.")
return False
table_names = selected_db.get_table_names()
if len(table_names) != 0:
print("[*] Tables :")
for tbl in table_names:
print(tbl)
elif command[0] == "deltbl":
if words <= 1:
print("[X] table name missing. usage: deletetbl [table_name]")
return False
if selected_db is None:
print("[X] Select DB before deleting a table. use createdb command to create db.")
return False
if selected_db.delete_table(command[1]):
print(f"[*] {command[1]} was deleted successfully from {selected_db} db.")
return True
else:
print(f"[*] {command[1]} wasn't deleted. {command[1]} might be absent in {selected_db} db.")
return False
elif command[0] == "seltbl":
if words <= 1:
print("[X] table name missing. usage: seltbl [table_name]")
return False
if command[1] in selected_db.get_table_names():
selected_table_name = command[1]
print (f"[*] {selected_table_name} table selected from {selected_db} db.")
return True
elif command[0] == "showtbl":
if selected_db is None:
print("[X] Select DB before selecting tables. use createdb command to create db.")
return False
if selected_table_name is None:
print("[!] No table selected. select a table using seltbl [table_name].")
return False
else:
print(selected_table_name)
return True
elif command[0] == "printtbl":
if selected_db is None:
print("[X] Select DB before using printtbl. use createdb command to create db.")
return False
if selected_table_name is None:
print("[!] No table selected. select a table using seltbl [table_name].")
return False
else:
table_enteries = selected_db.get_table(selected_table_name)
for index in range(len(table_enteries)):
print(f"{index} : {dict_to_jsonstr(table_enteries[index])}")
return True
elif command[0] == "addobj":
if words <= 1:
print("[X] table name missing. usage: addobj [table_name]. Using selected table.")
command.append(selected_table_name)
if selected_db is None:
print("[X] Select DB before adding object to the table. use createdb command to create db.")
return False
json_data = jsonstr_to_dict()
if json_data:
if selected_db.add_in_table(selected_table_name ,json_data):
print(f"[*] json object added to {command[1]}")
return True
else:
print(f"[X] Operation was unsuccessful. Table might not exist or data is already present.")
else:
print("[X] invalid json object.")
elif command[0] == "updateobj":
if words <= 1:
print("[X] table name missing. usage: addobj [table_name]. Using selected table.")
command.append(selected_table_name)
if selected_db is None:
print("[X] Select DB before adding object to the table. use createdb command to create db.")
return False
try:
index = int(input('[+] Enter index : ').strip())
except ValueError:
print("[X] invalid index integer.")
return False
json_data = jsonstr_to_dict()
if json_data:
if selected_db.update_in_table(selected_table_name , index, json_data):
print(f"[*] json object updated to {command[1]}")
return True
else:
print(f"[X] Operation was unsuccessful. Table might not exist or index is invalid/out of range.")
else:
print("[X] invalid json object.")
elif command[0] == "dump":
if selected_db.dump_data():
print("[*] Data saved successfully.")
return True
else:
print('[X] Error while Dumping data.')
return False
else:
print("[X] Invalid Command. use help to view commands list.")
return False
return True
if __name__ == "__main__":
print(BANNER)
is_running = True
while is_running:
command = input('> ').lower().strip()
if command == "exit":
is_running = False
else:
handle_and_execute(command)
print("see you later!!") | StarcoderdataPython |
3392996 | <reponame>CarbonDDR/al-go-rithms
def squareRoot(n):
x = n
y = 1
e = 0.000001
while (x - y > e):
x = (x + y) / 2
y = n/x
return x
print(squareRoot(50)) | StarcoderdataPython |
6635213 | <reponame>gerryjenkinslb/cs22-slides-and-py-files
import turtle
myTurtle = turtle.Turtle()
myWin = turtle.Screen()
size = 300
myWin.setup(width=size, height=size, startx=30, starty=30)
myTurtle.speed(10) # speed from 1 to 10
def drawSpiral(myTurtle, lineLen):
if lineLen > 0:
myTurtle.forward(lineLen)
myTurtle.right(90)
drawSpiral(myTurtle,lineLen-5)
figsize = 100
print( myTurtle.position())
myTurtle.penup()
myTurtle.goto(-figsize/2,figsize/2)
myTurtle.pendown()
drawSpiral(myTurtle,100) # do recursion
myWin.exitonclick() # remember to click on widow to close
| StarcoderdataPython |
4921156 | #!/usr/bin/env python
from datetime import date, time
from sagescrape.dpw.timemanagement import TimeManagement
if __name__ == "__main__":
# datetime.time() gives the current time
my_enter = time(9,23)
my_exit = time(17,42)
tm = TimeManagement()
tm.launch()
tm.fill_times(date.today(), my_enter, my_exit)
tm.shutdown()
| StarcoderdataPython |
3404743 | <filename>app.py
from datetime import datetime
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
app = Flask(__name__)
app.config['SECRET_KEY'] = 'this is secret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
@app.route('/')
@app.route('/index')
def index():
posts = Post.query.all()
return render_template('index.html', posts=posts)
@app.route('/about')
def about():
return render_template('about.html', title='About')
class User(db.Model):
__table_name__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(100), nullable=False)
profile_image = db.Column(db.String(100), default='default.png')
posts = db.relationship('Post', backref='author', lazy=True)
def __init__(self, username, email, password):
self.username = username
self.email = email
self.set_password(password)
def __repr__(self):
return f"<User('{self.username}', '{self.email}')>"
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
class Post(db.Model):
__table_name__ = 'post'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), unique=True, nullable=False)
content = db.Column(db.Text)
date_posted = db.Column(db.DateTime, default=datetime.utcnow())
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"<Post('{self.id}', '{self.title}')>"
| StarcoderdataPython |
3353012 | <filename>scripts/prepare_megadepth_valid_list.py
import os
import json
import tables
from tqdm import tqdm
import numpy as np
def read_all_imgs(base_dir):
all_imgs = []
for cur, dirs, files in os.walk(base_dir):
if 'imgs' in cur:
all_imgs += [os.path.join(cur, f) for f in files]
all_imgs.sort()
return all_imgs
def filter_semantic_depth(imgs):
valid_imgs = []
for item in tqdm(imgs):
f_name = os.path.splitext(os.path.basename(item))[0] + '.h5'
depth_dir = os.path.abspath(os.path.join(os.path.dirname(item), '../depths'))
depth_path = os.path.join(depth_dir, f_name)
depth_h5 = tables.open_file(depth_path, mode='r')
_depth = np.array(depth_h5.root.depth)
if _depth.min() >= 0:
prefix = os.path.abspath(os.path.join(item, '../../../../')) + '/'
rel_image_path = item.replace(prefix, '')
valid_imgs.append(rel_image_path)
depth_h5.close()
valid_imgs.sort()
return valid_imgs
if __name__ == "__main__":
MegaDepth_v1 = '/media/jiangwei/data_ssd/MegaDepth_v1/'
assert os.path.isdir(MegaDepth_v1), 'Change to your local path'
all_imgs = read_all_imgs(MegaDepth_v1)
valid_imgs = filter_semantic_depth(all_imgs)
with open('megadepth_valid_list.json', 'w') as outfile:
json.dump(valid_imgs, outfile, indent=4)
| StarcoderdataPython |
8186339 | # -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from config_loader import try_load_from_file
# This resource is only available on C7000 enclosures
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
# A Logical Switch Group, the Switches IP address/host name, and credentials must be set to run this example
logical_switch_group_name = '<lsg_name>'
switch_ip_1 = '<switch_ip_or_hostname>'
switch_ip_2 = '<switch_ip_or_hostname>'
ssh_username = '<user_name_for_switches>'
ssh_password = '<<PASSWORD>>'
# To run the scope patch operations in this example, a scope name is required.
scope_name = "<scope_name>"
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# Check for existance of Logical Switch Group specified, otherwise stops execution
logical_switch_group = oneview_client.logical_switch_groups.get_by('name', logical_switch_group_name)[0]
if logical_switch_group:
print('Found logical switch group "%s" with uri: %s' % (logical_switch_group['name'], logical_switch_group['uri']))
else:
raise Exception('Logical switch group "%s" was not found on the appliance.' % logical_switch_group_name)
switch_connection_properties = {
"connectionProperties": [{
"propertyName": "SshBasicAuthCredentialUser",
"value": ssh_username,
"valueFormat": "Unknown",
"valueType": "String"
}, {
"propertyName": "SshBasicAuthCredentialPassword",
"value": ssh_password,
"valueFormat": "SecuritySensitive",
"valueType": "String"
}]
}
options = {
"logicalSwitch": {
"name": "Test Logical Switch",
"logicalSwitchGroupUri": logical_switch_group['uri'],
"switchCredentialConfiguration": [
{
"snmpV1Configuration": {
"communityString": "public"
},
"logicalSwitchManagementHost": switch_ip_1,
"snmpVersion": "SNMPv1",
"snmpPort": 161
}, {
"snmpV1Configuration": {
"communityString": "public"
},
"logicalSwitchManagementHost": switch_ip_2,
"snmpVersion": "SNMPv1",
"snmpPort": 161
}
]
},
"logicalSwitchCredentials": [switch_connection_properties, switch_connection_properties]
}
# Create a Logical Switch
logical_switch = oneview_client.logical_switches.create(options)
print("\nCreated Logical Switch '{name}' successfully.\n uri = '{uri}'".format(**logical_switch))
# Find the recently created Logical Switch by name
logical_switch = oneview_client.logical_switches.get_by('name', 'Test Logical Switch')[0]
print("\nFound Logical Switch by name: '{name}'.\n uri = '{uri}'".format(**logical_switch))
# Update the name of the Logical Switch
options_update = {
"logicalSwitch": {
"name": "Renamed Logical Switch",
"uri": logical_switch['uri'],
"switchCredentialConfiguration": logical_switch['switchCredentialConfiguration'],
"logicalSwitchGroupUri": logical_switch_group['uri'],
"consistencyStatus": "CONSISTENT"
},
"logicalSwitchCredentials": [switch_connection_properties, switch_connection_properties]
}
logical_switch = oneview_client.logical_switches.update(options_update)
print("\nUpdated Logical Switch successfully.\n uri = '{uri}'".format(**logical_switch))
print(" with attribute name = {name}".format(**logical_switch))
# Get scope to be added
print("\nGet the scope named '%s'." % scope_name)
scope = oneview_client.scopes.get_by_name(scope_name)
# Performs a patch operation on the Logical Switch
if scope:
print("\nPatches the logical switch assigning the '%s' scope to it." % scope_name)
logical_switch = oneview_client.logical_switches.patch(logical_switch['uri'],
'replace',
'/scopeUris',
[scope['uri']])
pprint(logical_switch)
# Get all, with defaults
print("\nGet all Logical Switches")
logical_switches = oneview_client.logical_switches.get_all()
for logical_switch in logical_switches:
print(' Name: %s' % logical_switch['name'])
# Get by URI
print("\nGet a Logical Switch by URI")
logical_switch = oneview_client.logical_switches.get(logical_switch['uri'])
pprint(logical_switch)
# Reclaim the top-of-rack switches in the logical switch
print("\nReclaim the top-of-rack switches in the logical switch")
logical_switch = oneview_client.logical_switches.refresh(logical_switch['uri'])
print(" Done.")
# Delete the Logical Switch
oneview_client.logical_switches.delete(logical_switch)
print("\nLogical switch deleted successfully.")
| StarcoderdataPython |
127291 | import gym
from gym.wrappers import TimeLimit
env = TimeLimit(gym.make('gym_custom:pomdp-mountain-car-episodic-easy-v0'), max_episode_steps=15)
print(env.observation_space.shape)
print(env.action_space.shape)
print(env.action_space.high)
rewards = []
for i in range(1):
state = env.reset()
done = False
episode_length = 0
while not done:
print(state[-1])
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
episode_length += 1
state = next_state
print(episode_length) | StarcoderdataPython |
11357496 | name = input("Enter your name: ")
age = input("Enter your age: ")
print("User name is", name, "and your age is",age)
#other example:
print("\t Calculate average")
num1 = int(input("Enter the first number "))
num2 = int(input("Enter the second number "))
average = (num1 + num2) / 2
print("Average is =", average) | StarcoderdataPython |
11230423 | # Copyright (c) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Utilities to make logging consistent and easy for any any subprocess
operations.
"""
from __future__ import absolute_import
from functools import partial
__all__ = ['SubprocessLogger']
class SubprocessLogger(object):
"""Provides a limited set of log methods that :mod:`slimta` packages may
use. This prevents free-form logs from mixing in with standard, machine-
parseable logs.
:param log: :py:class:`logging.Logger` object to log through.
"""
def __init__(self, log):
from slimta.logging import logline
self.log = partial(logline, log.debug, 'pid')
def popen(self, process, args):
self.log(process.pid, 'popen', args=args)
def stdio(self, process, stdin, stdout, stderr):
self.log(process.pid, 'stdio',
stdin=stdin,
stdout=stdout,
stderr=stderr)
def exit(self, process):
self.log(process.pid, 'exit', returncode=process.returncode)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| StarcoderdataPython |
5119115 | import numpy as np
import sys
import numpy as np
from numpy import float32, int32, uint8, dtype, genfromtxt
N = len( sys.argv )
direction = sys.argv[ 1 ]
cx = float(sys.argv[ 2 ])
cx = float(sys.argv[ 2 ])
cy = float(sys.argv[ 3 ])
cz = float(sys.argv[ 4 ])
t=np.array([[1.0,0.0,0.0,cx],[0.0,1.0,0.0,cy],[0.0,0.0,1.0,cz],[0.0,0.0,0.0,1.0]])
sqrt2=0.70710678118
def translation_inv( t ):
ti = np.copy( t )
ti[0,3] = -ti[0,3]
ti[1,3] = -ti[1,3]
ti[2,3] = -ti[2,3]
return ti
ti=translation_inv(t)
rotcw = np.array([[sqrt2,sqrt2,0.0,0.0],[-sqrt2,sqrt2,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,1.0]])
rotccw = np.array([[sqrt2,-sqrt2,0.0,0.0],[sqrt2,sqrt2,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,1.0]])
if( direction == 'CW' or direction == 'cw' ):
rot = rotcw
else:
rot = rotccw
# Concatenate
xfm = np.matmul( t, np.matmul( rot, ti ))
print( xfm )
| StarcoderdataPython |
3599078 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 11:54:58 2019
@author: <NAME>
If you are using free search api then it only has access to 7 days old data so you might get nothing on older tweets
But using this method you can make a better reply network
"""
import tweepy
from tweepy import OAuthHandler
#insert Twitter Keys
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_KEY = ''
ACCESS_SECRET = ''
#auth = OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
#auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
auth = tweepy.AppAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
screen_name = 'BarcaWorldwide'
id_str = '1195425989540663303'
replies_id=[]
name=[]
id_parse = []
count=0
replies = 1
name.append(screen_name)
id_parse.append(id_str)
name_ind = 0
while replies > 0:
for tweet in tweepy.Cursor(api.search,q='to:'+name[name_ind],result_type='recent',timeout=999999).items(1000):
if hasattr(tweet, 'in_reply_to_status_id_str'):
if (tweet.in_reply_to_status_id_str==id_parse[name_ind]):
print(tweet.text)
count += 1
name.append(tweet.user.screen_name)
replies += 1
id_parse.append(tweet.id_str)
replies_id.append(tweet)
replies -= 1
name_ind += 1
flag = 0
print("Replies Count: ")
print(count)
print("Repliers screen_names: ")
print(name) | StarcoderdataPython |
389875 | <reponame>amirhossein-bayati/pre-processing-dibets-dataset<filename>Final.py
# Import Libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
from fitter import Fitter, get_common_distributions
# Read File Function
def read_file(name):
data = pd.read_excel(name)
return data
# Write And Save New Data On Excel
def re_write_file(data):
data.to_excel('new-release.xlsx')
# Read File And Save On Data
data = read_file("pima-indians-diabetes.v1.xlsx")
# Delete Empty Records
data.dropna(inplace=True)
# Remove Wrong Formats
for i in data.index:
try:
data.loc[i] = pd.to_numeric(data.loc[i])
except:
data.drop(i, inplace=True)
# Remove Duplicated Items
data.drop_duplicates(inplace=True)
re_write_file(data)
data = read_file("new-release.xlsx")
data.pop('Unnamed: 0')
# Remove Outlier Data
for col in data:
Q1 = data[col].describe()[4]
Q3 = data[col].describe()[6]
IQR = data[col].describe()[5]
lower = Q1 - 1.5 * IQR
upper = Q3 + 1.5 * IQR
for row in data.index:
item = data.loc[row, col]
if item < lower or item > upper:
data.drop(row, inplace=True)
# Remove Wrong Data on output
for x in data.index:
if data.loc[x, "output"] > 1 or data.loc[x, "output"] < 0:
data.drop(x, inplace=True)
if data.loc[x, "A4"] == 0 or data.loc[x, "A2"] == 0 or data.loc[x, "A3"] == 0 or data.loc[x, "A5"] == 0:
data.drop(x, inplace=True)
# Reset Data Index
data = data.reset_index(drop=True)
# Calculate Mathematical Operations
res = []
for column in data:
row = {
"minimum": data[column].min(),
"maximum": data[column].max(),
"median": data[column].median(),
"average": data[column].mean(),
"standard-deviation": data[column].std(),
"variance": data[column].var()
}
res.append(row)
resDf = pd.DataFrame(res, index=[col for col in data])
# Draw Distribution
for col in data:
f = Fitter(data[col], distributions=get_common_distributions())
f.fit()
f.summary()
plt.show()
plt.clf()
# Draw Histogram
for column in data:
plt.hist(data[column], rwidth=0.9)
plt.xlabel(column)
plt.ylabel("count")
plt.show()
# Normalize Data
normalized_df = (data-data.min())/(data.max()-data.min())
res = []
for column in normalized_df:
row = {
"minimum": normalized_df[column].min(),
"maximum": normalized_df[column].max(),
"median": normalized_df[column].median(),
"average": normalized_df[column].mean(),
"standard-deviation": normalized_df[column].std(),
"variance": normalized_df[column].var()
}
res.append(row)
normalized_result = pd.DataFrame(res, index=[col for col in data])
# Draw Convolution
sn.heatmap(data.corr(), annot=True, fmt=".2f", cmap="Blues")
plt.show()
# Draw Scatter Plot
plt.clf()
sn.scatterplot(data=data, x="A1", y="A8", hue="output")
plt.show()
plt.clf()
sn.scatterplot(data=data, x="A2", y="A5", hue="output")
plt.show()
plt.clf()
sn.scatterplot(data=data, x="A4", y="A6", hue="output")
plt.show()
plt.clf()
sn.scatterplot(data=data, x="A3", y="A8", hue="output")
plt.show()
| StarcoderdataPython |
1762337 | from surveytoolbox.config import EASTING, NORTHING, ELEVATION, BEARING
# Import functions
from surveytoolbox.SurveyPoint import NewSurveyPoint
from surveytoolbox.bdc import bearing_distance_from_coordinates
from surveytoolbox.fmt_dms import format_as_dms
point_1 = NewSurveyPoint("JRR")
point_2 = NewSurveyPoint("JayArghArgh")
point_1.set_vertex(
{
EASTING: 100,
NORTHING: 100,
ELEVATION: 30
}
)
point_2.set_vertex(
{
EASTING: 200,
NORTHING: 300,
ELEVATION: 30
}
)
# Calculate and print the bearing and distance between two points.
target_loc = bearing_distance_from_coordinates(point_1.get_vertex(), point_2.get_vertex())
print(
target_loc
)
print(format_as_dms(target_loc[BEARING]))
| StarcoderdataPython |
1911348 | <reponame>SimenKH/DataDrivenModelling<filename>core/__init__.py
# this lstm core module implementation provides an implementation
# of time series prediction using a lstm approach. It is provided
# as is with no warranties or support.
__author__ = "<NAME>"
__altered_by___="<NAME>"
__copyright__ = "<NAME> 2018"
__version__ = "2.5.0"
__license__ = "MIT"
import warnings
warnings.filterwarnings("ignore") # ignore messy numpy warnings
| StarcoderdataPython |
14298 | """
Entry point for the CLI
"""
import logging
import click
from samcli import __version__
from .options import debug_option
from .context import Context
from .command import BaseCommand
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
pass_context = click.make_pass_decorator(Context)
def common_options(f):
"""
Common CLI options used by all commands. Ex: --debug
:param f: Callback function passed by Click
:return: Callback function
"""
f = debug_option(f)
return f
@click.command(cls=BaseCommand)
@common_options
@click.version_option(version=__version__, prog_name="SAM CLI")
@pass_context
def cli(ctx):
"""
AWS Serverless Application Model (SAM) CLI
The AWS Serverless Application Model extends AWS CloudFormation to provide a simplified way of defining the
Amazon API Gateway APIs, AWS Lambda functions, and Amazon DynamoDB tables needed by your serverless application.
You can find more in-depth guide about the SAM specification here:
https://github.com/awslabs/serverless-application-model.
"""
pass
| StarcoderdataPython |
64391 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.admin.views import WPAdmin
from indico.util.i18n import _
from indico.web.views import WPDecorated, WPJinjaMixin
class WPNews(WPJinjaMixin, WPDecorated):
template_prefix = 'news/'
title = _('News')
def _get_body(self, params):
return self._get_page_content(params)
class WPManageNews(WPAdmin):
template_prefix = 'news/'
| StarcoderdataPython |
12860463 | from django.apps import AppConfig
class ConnectConfig(AppConfig):
name = 'Connect'
| StarcoderdataPython |
6415686 | # -*- coding: utf-8 -*-
from mimes.image import IMAGE_MIMES
from mimes.audio import AUDIO_MIMES
from mimes.video import VIDEO_MIMES
| StarcoderdataPython |
11221190 | # coding: utf-8
from abc import ABCMeta, abstractmethod
##################################################
# 学習・評価・予測 実行クラスの基底クラス
##################################################
class AbsRunner(metaclass=ABCMeta):
"""学習・評価・予測 実行クラス
Attributes:
run_name (string) : ランの名称
model (AbsModel) : モデル
params (dict) : パラメータ
"""
##################################################
# コンストラクタ
##################################################
def __init__(self, run_name, model, params):
self._run_name = run_name
self._model = model
self._params = params
##################################################
# foldを指定して学習・評価を行う
##################################################
@abstractmethod
def run_train_fold(self, fold):
raise NotImplementedError()
##################################################
# クロスバリデーションで学習・評価を行う
##################################################
@abstractmethod
def run_train_cv(self):
raise NotImplementedError()
##################################################
# クロスバリデーションで学習した
# 各foldモデルの平均で予測を行う
##################################################
@abstractmethod
def run_predict_cv(self):
raise NotImplementedError()
##################################################
# 学習データ全てを使用して、学習を行う
##################################################
@abstractmethod
def run_train_all(self):
raise NotImplementedError()
##################################################
# 学習データ全てを学習したモデルで、
# テストデータの予測を行う
##################################################
@abstractmethod
def run_predict_all(self):
raise NotImplementedError()
| StarcoderdataPython |
6563570 | <reponame>JordanMilne/Redhawk<filename>redhawk/test/test_common_xml_writer.py<gh_stars>0
#!/usr/bin/env python
import redhawk.common.writers.xml_writer as X
from . import common_test_utils as T
import nose.tools
import random
import itertools
import tempfile
import os
class TestXMLWriter:
def __init__(self):
self.counter = itertools.count(0)
self.temp_dir = tempfile.mkdtemp(prefix='xml')
return
def GetFilename(self):
i = next(self.counter)
return os.path.join(self.temp_dir, str(i))
def FunctionTestXML(self, ast):
v = self.GetFilename()
X.WriteToFile(ast, filename = v + '.xml')
return
def TestGenerator():
""" Testing XML Writer. """
PICK=5
c = TestXMLWriter()
all_asts = list(T.GetAllLASTs())
for i in range(PICK):
r_index = random.randrange(0, len(all_asts))
yield c.FunctionTestXML, all_asts[r_index]
# Disable the test by default.
@nose.tools.nottest
def TestAllPrograms():
""" Testing XML Writer (all programs) """
c = TestXMLWriter()
all_asts = list(T.GetAllLASTs())
for (i, ast) in enumerate(all_asts):
yield c.FunctionTestXML, ast
| StarcoderdataPython |
165658 | from services.recommendation import Recommendation
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
log = logging.getLogger(__name__)
def start_recommendation(elk_rec=False, **kwargs):
"""This function decides which type of recommendation is made."""
try:
parameters = kwargs['parameters']
source = parameters['source']
source_type = parameters['source_type']
recommendation_type = parameters['recommendation_type']
except Exception as e:
log.error(e)
log.error('Data provided are not in proper format')
return {"Error": 'Data provided not in proper format'}, 400
if source_type == 'cv':
if recommendation_type == 'courses':
skill_list = [skill['label'] for skill in source['skills']]
recommender = Recommendation()
if elk_rec:
response = recommender.elk_recommend(cv_skills=skill_list)
else:
response = recommender.recommend(cv_skills=skill_list)
recommender.pg_client.engine.dispose()
recommender.pg_client.qualichain_db_engine.dispose()
return response, 200
elif recommendation_type == 'skills':
pass
elif recommendation_type == 'job_titles':
pass
elif source_type == 'skills':
if recommendation_type == 'courses':
pass
elif recommendation_type == 'skills':
pass
elif recommendation_type == 'job_titles':
pass
elif source_type == 'job_titles':
if recommendation_type == 'courses':
pass
elif recommendation_type == 'skills':
pass
| StarcoderdataPython |
6490957 | class Solution:
def trap(self, height: list) -> int:
left_to_right = [0 for _ in range(len(height))]
right_to_left = [0 for _ in range(len(height))]
max_value = 0
for i in range(len(height)):
max_value = max((max_value, height[i]))
left_to_right[i] = max_value
max_value = 0
for i in range(len(height) - 1, -1, -1):
max_value = max((max_value, height[i]))
right_to_left[i] = max_value
water = 0
for i in range(len(height)):
water += (min((left_to_right[i], right_to_left[i])) - height[i])
return water | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.