hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfc9bc553f33ab7c4ec3f6504bde18eeb37b64d
| 876
|
py
|
Python
|
nvtabular/ops/internal/__init__.py
|
bschifferer/NVTabular
|
5f8c040b6a42b366d2ac70e8c5087729cdeb95cf
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ops/internal/__init__.py
|
bschifferer/NVTabular
|
5f8c040b6a42b366d2ac70e8c5087729cdeb95cf
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ops/internal/__init__.py
|
bschifferer/NVTabular
|
5f8c040b6a42b366d2ac70e8c5087729cdeb95cf
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# alias submodules here to avoid breaking everything with moving to submodules
# flake8: noqa
from .concat_columns import ConcatColumns
from .identity import Identity
from .selection import SelectionOp
from .subset_columns import SubsetColumns
from .subtraction import SubtractionOp
| 36.5
| 78
| 0.784247
|
acfc9bd29feae2d0863a2b5ed5ec99356a863117
| 11,808
|
py
|
Python
|
tests/test_ignite.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ignite.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ignite.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for IGFS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
import pytest
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from tensorflow import dtypes # pylint: disable=wrong-import-position
from tensorflow import errors # pylint: disable=wrong-import-position
from tensorflow import test # pylint: disable=wrong-import-position
from tensorflow.compat.v1 import data # pylint: disable=wrong-import-position
from tensorflow.compat.v1 import gfile # pylint: disable=wrong-import-position
import tensorflow_io.ignite as ignite_io # pylint: disable=wrong-import-position
class __TestFS(): # pylint: disable=invalid-name,old-style-class,no-init
"""The Apache Ignite servers have to setup before the test and tear down
after the test manually. The docker engine has to be installed.
To setup Apache Ignite servers:
$ bash start_ignite.sh
To tear down Apache Ignite servers:
$ bash stop_ignite.sh
"""
def prefix(self):
pass
def test_create_file(self):
"""Test create file.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_create_file")
file_name = self.prefix() + ":///test_create_file/1"
self.assertFalse(gfile.Exists(file_name))
# Create file.
with gfile.Open(file_name, mode="w") as w:
w.write("")
# Check that file was created.
self.assertTrue(gfile.Exists(file_name))
# Remove file.
gfile.Remove(file_name)
# Check that file was removed.
self.assertFalse(gfile.Exists(file_name))
def test_write_read_file(self):
"""Test write/read file.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_write_read_file")
file_name = self.prefix() + ":///test_write_read_file/1"
rows = 10
self.assertFalse(gfile.Exists(file_name))
# Write data.
with gfile.Open(file_name, mode="w") as w:
for i in range(rows):
w.write("This is row\n")
# Read data.
with gfile.Open(file_name, mode="r") as r:
lines = r.readlines()
# Check that data is equal.
self.assertEqual(rows, len(lines))
for i in range(rows):
self.assertEqual("This is row\n", lines[i])
# Remove file.
gfile.Remove(file_name)
# Check that file was removed.
self.assertFalse(gfile.Exists(file_name))
def test_delete_recursively(self):
"""Test delete recursively.
"""
# Setup and check preconditions.
dir_name = self.prefix() + ":///test_delete_recursively"
file_name = self.prefix() + ":///test_delete_recursively/1"
self.assertFalse(gfile.Exists(dir_name))
self.assertFalse(gfile.Exists(file_name))
gfile.MkDir(dir_name)
with gfile.Open(file_name, mode="w") as w:
w.write("")
self.assertTrue(gfile.Exists(dir_name))
self.assertTrue(gfile.Exists(file_name))
# Delete directory recursively.
gfile.DeleteRecursively(dir_name)
# Check that directory was deleted.
self.assertFalse(gfile.Exists(dir_name))
self.assertFalse(gfile.Exists(file_name))
def test_copy(self):
"""Test copy.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_copy")
src_file_name = self.prefix() + ":///test_copy/1"
dst_file_name = self.prefix() + ":///test_copy/2"
self.assertFalse(gfile.Exists(src_file_name))
self.assertFalse(gfile.Exists(dst_file_name))
with gfile.Open(src_file_name, mode="w") as w:
w.write("42")
self.assertTrue(gfile.Exists(src_file_name))
self.assertFalse(gfile.Exists(dst_file_name))
# Copy file.
gfile.Copy(src_file_name, dst_file_name)
# Check that files are identical.
self.assertTrue(gfile.Exists(src_file_name))
self.assertTrue(gfile.Exists(dst_file_name))
with gfile.Open(dst_file_name, mode="r") as r:
data_v = r.read()
self.assertEqual("42", data_v)
# Remove file.
gfile.Remove(src_file_name)
gfile.Remove(dst_file_name)
# Check that file was removed.
self.assertFalse(gfile.Exists(src_file_name))
self.assertFalse(gfile.Exists(dst_file_name))
def test_is_directory(self):
"""Test is directory.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_is_directory")
dir_name = self.prefix() + ":///test_is_directory/1"
file_name = self.prefix() + ":///test_is_directory/2"
with gfile.Open(file_name, mode="w") as w:
w.write("")
gfile.MkDir(dir_name)
# Check that directory is a directory.
self.assertTrue(gfile.IsDirectory(dir_name))
# Check that file is not a directory.
self.assertFalse(gfile.IsDirectory(file_name))
def test_list_directory(self):
"""Test list directory.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_list_directory")
gfile.MkDir(self.prefix() + ":///test_list_directory/2")
gfile.MkDir(self.prefix() + ":///test_list_directory/4")
dir_name = self.prefix() + ":///test_list_directory"
file_names = [
self.prefix() + ":///test_list_directory/1",
self.prefix() + ":///test_list_directory/2/3"
]
ch_dir_names = [
self.prefix() + ":///test_list_directory/4",
]
for file_name in file_names:
with gfile.Open(file_name, mode="w") as w:
w.write("")
for ch_dir_name in ch_dir_names:
gfile.MkDir(ch_dir_name)
ls_expected_result = file_names + ch_dir_names
# Get list of files in directory.
ls_result = gfile.ListDirectory(dir_name)
# Check that list of files is correct.
self.assertEqual(len(ls_expected_result), len(ls_result))
for e in ["1", "2", "4"]:
self.assertTrue(e in ls_result, msg="Result doesn't contain '%s'" % e)
def test_make_dirs(self):
"""Test make dirs.
"""
# Setup and check preconditions.
dir_name = self.prefix() + ":///test_make_dirs/"
self.assertFalse(gfile.Exists(dir_name))
# Make directory.
gfile.MkDir(dir_name)
# Check that directory was created.
self.assertTrue(gfile.Exists(dir_name))
# Remove directory.
gfile.Remove(dir_name)
# Check that directory was removed.
self.assertFalse(gfile.Exists(dir_name))
def test_remove(self):
"""Test remove.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_remove")
file_name = self.prefix() + ":///test_remove/1"
self.assertFalse(gfile.Exists(file_name))
with gfile.Open(file_name, mode="w") as w:
w.write("")
self.assertTrue(gfile.Exists(file_name))
# Remove file.
gfile.Remove(file_name)
# Check that file was removed.
self.assertFalse(gfile.Exists(file_name))
def test_rename_file(self):
"""Test rename file.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_rename_file")
src_file_name = self.prefix() + ":///test_rename_file/1"
dst_file_name = self.prefix() + ":///test_rename_file/2"
with gfile.Open(src_file_name, mode="w") as w:
w.write("42")
self.assertTrue(gfile.Exists(src_file_name))
# Rename file.
gfile.Rename(src_file_name, dst_file_name)
# Check that only new name of file is available.
self.assertFalse(gfile.Exists(src_file_name))
self.assertTrue(gfile.Exists(dst_file_name))
with gfile.Open(dst_file_name, mode="r") as r:
data_v = r.read()
self.assertEqual("42", data_v)
# Remove file.
gfile.Remove(dst_file_name)
# Check that file was removed.
self.assertFalse(gfile.Exists(dst_file_name))
def test_rename_dir(self):
"""Test rename dir.
"""
# Setup and check preconditions.
gfile.MkDir(self.prefix() + ":///test_rename_dir")
src_dir_name = self.prefix() + ":///test_rename_dir/1"
dst_dir_name = self.prefix() + ":///test_rename_dir/2"
gfile.MkDir(src_dir_name)
# Rename directory.
gfile.Rename(src_dir_name, dst_dir_name)
# Check that only new name of directory is available.
self.assertFalse(gfile.Exists(src_dir_name))
self.assertTrue(gfile.Exists(dst_dir_name))
self.assertTrue(gfile.IsDirectory(dst_dir_name))
# Remove directory.
gfile.Remove(dst_dir_name)
# Check that directory was removed.
self.assertFalse(gfile.Exists(dst_dir_name))
@pytest.mark.skipif(platform.uname()[0] == 'Darwin', reason=None)
class TestGGFS(test.TestCase, __TestFS):
"""Test GGFS.
"""
def setUp(self): # pylint: disable=invalid-name
os.environ["IGNITE_PORT"] = '10801'
gfile.MkDir("ggfs:///")
def prefix(self):
return "ggfs"
class TestIGFS(test.TestCase, __TestFS):
"""Test IGFS.
"""
def prefix(self):
return "igfs"
class IgniteDatasetTest(test.TestCase):
"""The Apache Ignite servers have to setup before the test and tear down
after the test manually. The docker engine has to be installed.
To setup Apache Ignite servers:
$ bash start_ignite.sh
To tear down Apache Ignite servers:
$ bash stop_ignite.sh
"""
def test_ignite_dataset_with_plain_client(self):
"""Test Ignite Dataset with plain client.
"""
self._clear_env()
ds = ignite_io.IgniteDataset(cache_name="SQL_PUBLIC_TEST_CACHE", port=10800)
self._check_dataset(ds)
def test_ignite_dataset_with_plain_client_with_interleave(self):
"""Test Ignite Dataset with plain client with interleave.
"""
self._clear_env()
ds = data.Dataset.from_tensor_slices(["localhost"]).interleave(
lambda host: ignite_io.IgniteDataset(
cache_name="SQL_PUBLIC_TEST_CACHE",
schema_host="localhost", host=host,
port=10800), cycle_length=4, block_length=16
)
self._check_dataset(ds)
def _clear_env(self):
"""Clears environment variables used by Ignite Dataset.
"""
if "IGNITE_DATASET_USERNAME" in os.environ:
del os.environ["IGNITE_DATASET_USERNAME"]
if "IGNITE_DATASET_PASSWORD" in os.environ:
del os.environ["IGNITE_DATASET_PASSWORD"]
if "IGNITE_DATASET_CERTFILE" in os.environ:
del os.environ["IGNITE_DATASET_CERTFILE"]
if "IGNITE_DATASET_CERT_PASSWORD" in os.environ:
del os.environ["IGNITE_DATASET_CERT_PASSWORD"]
def _check_dataset(self, dataset):
"""Checks that dataset provides correct data."""
self.assertEqual(dtypes.int64, dataset.output_types["key"])
self.assertEqual(dtypes.string, dataset.output_types["val"]["NAME"])
self.assertEqual(dtypes.int64, dataset.output_types["val"]["VAL"])
it = dataset.make_one_shot_iterator()
ne = it.get_next()
with tf.compat.v1.Session() as sess:
rows = [sess.run(ne), sess.run(ne), sess.run(ne)]
with self.assertRaises(errors.OutOfRangeError):
sess.run(ne)
self.assertEqual({"key": 1, "val": {"NAME": b"TEST1", "VAL": 42}}, rows[0])
self.assertEqual({"key": 2, "val": {"NAME": b"TEST2", "VAL": 43}}, rows[1])
self.assertEqual({"key": 3, "val": {"NAME": b"TEST3", "VAL": 44}}, rows[2])
if __name__ == "__main__":
test.main()
| 33.931034
| 95
| 0.676152
|
acfc9ce74e960e2198d732db5e764b1a92093582
| 1,674
|
py
|
Python
|
app/main/views.py
|
Kennedy128/kennedy-blog
|
835b911f1eea7f24fc0db9e3c1250a81ddfa488e
|
[
"MIT",
"Unlicense"
] | null | null | null |
app/main/views.py
|
Kennedy128/kennedy-blog
|
835b911f1eea7f24fc0db9e3c1250a81ddfa488e
|
[
"MIT",
"Unlicense"
] | null | null | null |
app/main/views.py
|
Kennedy128/kennedy-blog
|
835b911f1eea7f24fc0db9e3c1250a81ddfa488e
|
[
"MIT",
"Unlicense"
] | null | null | null |
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..models import User,Comment,Quote,Blog,Subscriber
from .forms import CommentForm,NewBlog
from .. import db
from flask_login import login_required,current_user
from ..requests import get_quote
from ..email import subscribe_message
@main.route('/')
def index():
'''
view root that returns the index page with the various blogs
'''
title = 'Blog Master'
quote=get_quote()
blogs=Blog.query.all()
return render_template('index.html',title=title,quote=quote,blogs=blogs)
@main.route('/blog/new', methods = ['GET','POST'])
@login_required
def add_blog():
'''
view function for creating new blogs
'''
form = NewBlog()
if form.validate_on_submit():
title = form.title.data
content = form.content.data
# Updated blog instance
new_blog = Blog(title=title,content=content, user = current_user)
# Save pitch method
new_blog.save_blog()
return redirect(url_for('.index'))
title = 'New Blog'
return render_template('newblog.html',title = title,blog_form=form)
@main.route('/blog/<int:id>', methods = ['GET','POST'])
def blog(id):
'''
view function for showing one particular blog and its comments
'''
comment=Comment.get_comment(id)
blog = Blog.get_blog(id)
comment_form = CommentForm()
if comment_form.validate_on_submit():
comment = comment_form.text.data
new_comment = Comment(content = comment,user = current_user,blog_id = blog.id)
new_comment.save_comment()
comments = Blog.get_comments(blog)
return render_template("blog.html", comment_form = comment_form, comments = comments,blog=blog)
| 28.862069
| 99
| 0.721625
|
acfc9d1871fd6a9e7ab8d9326441c808bb7adc76
| 905
|
py
|
Python
|
calcPostsPerUserPerWeek.py
|
fgolemo/SocWeb-Reddit-Crawler
|
bb115ca28b5354bdfc36ff94ee9f51793fa8b106
|
[
"MIT"
] | 1
|
2016-07-26T14:50:49.000Z
|
2016-07-26T14:50:49.000Z
|
calcPostsPerUserPerWeek.py
|
fgolemo/SocWeb-Reddit-Crawler
|
bb115ca28b5354bdfc36ff94ee9f51793fa8b106
|
[
"MIT"
] | null | null | null |
calcPostsPerUserPerWeek.py
|
fgolemo/SocWeb-Reddit-Crawler
|
bb115ca28b5354bdfc36ff94ee9f51793fa8b106
|
[
"MIT"
] | null | null | null |
import datetime
import os
import math
import cPickle as pickle
user_data = []
with open("submissions-0-8000.csv") as f:
for line in f:
user_data_line = []
segments = line.split(",")
join_date = datetime.datetime.fromtimestamp(int(segments[1]))
user_data_line.append(int(segments[1]))
posts = sorted(segments[3:])
week_nrs = []
for post in posts:
post_date = datetime.datetime.fromtimestamp(int(post))
date_diff = post_date - join_date
week_nr = int(math.floor(date_diff.days / 7.0) + 1)
week_nrs.append(week_nr)
week_counts = dict((i, week_nrs.count(i)) for i in week_nrs)
user_data_line.append(week_counts)
user_data.append(user_data_line)
pickle.dump(user_data, open("ppw.userdata.pickle.tmp", "wb"))
os.rename("ppw.userdata.pickle.tmp", "ppw.userdata.pickle")
| 26.617647
| 69
| 0.639779
|
acfc9d27624f1b8c54c851228f818f72d4a0b833
| 1,885
|
py
|
Python
|
vistrails/gui/extras/core/db/__init__.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | 1
|
2015-05-11T16:46:49.000Z
|
2015-05-11T16:46:49.000Z
|
vistrails/gui/extras/core/db/__init__.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | null | null | null |
vistrails/gui/extras/core/db/__init__.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
pass
| 52.361111
| 79
| 0.689125
|
acfc9d86e1537f809bae9523284189eda6991691
| 1,583
|
py
|
Python
|
test/test_web_link.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | 1
|
2021-04-20T15:28:57.000Z
|
2021-04-20T15:28:57.000Z
|
test/test_web_link.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | null | null | null |
test/test_web_link.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import jamf
from jamf.models.web_link import WebLink # noqa: E501
from jamf.rest import ApiException
class TestWebLink(unittest.TestCase):
"""WebLink unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test WebLink
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = jamf.models.web_link.WebLink() # noqa: E501
if include_optional :
return WebLink(
name = 'A popular search engine',
url = 'https://duckduckgo.com'
)
else :
return WebLink(
)
def testWebLink(self):
"""Test WebLink"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 29.314815
| 342
| 0.664561
|
acfc9d87e895afeb2eab9df1a8c37e893791c200
| 1,838
|
py
|
Python
|
hylfm/datasets/dualview.py
|
kreshuklab/hylfm-net
|
9f1013640e40e998674b65176023367b1e978782
|
[
"MIT"
] | 8
|
2020-11-13T05:46:59.000Z
|
2022-01-30T06:12:04.000Z
|
hylfm/datasets/dualview.py
|
kreshuklab/hylfm-net
|
9f1013640e40e998674b65176023367b1e978782
|
[
"MIT"
] | 1
|
2020-11-13T08:29:23.000Z
|
2022-02-10T16:45:19.000Z
|
hylfm/datasets/dualview.py
|
kreshuklab/hylfm-net
|
9f1013640e40e998674b65176023367b1e978782
|
[
"MIT"
] | 2
|
2020-10-30T11:02:42.000Z
|
2021-01-12T06:51:33.000Z
|
from hylfm.datasets import TensorInfo
beads_right = TensorInfo(
name="lf",
root="GKRESHUK",
location="LF_computed/LenseLeNet_Microscope/DualView_comparison_heart_movie/beads/2018-09-06_13.16.46/stack_2_channel_0/RC_rectified/*.tif",
insert_singleton_axes_at=[0, 0],
tag="dualbeads_right",
)
beads_left = TensorInfo(
name="lf",
root="GKRESHUK",
location="LF_computed/LenseLeNet_Microscope/DualView_comparison_heart_movie/beads/2018-09-06_13.16.46/stack_2_channel_0/LC_rectified/*.tif",
insert_singleton_axes_at=[0, 0],
tag="dualbeads_left",
)
heart_right = TensorInfo(
name="lf",
root="GKRESHUK",
location="LF_computed/LenseLeNet_Microscope/DualView_comparison_heart_movie/heart/Rectified_RC/*.tif",
insert_singleton_axes_at=[0, 0],
tag="dualheart_right",
)
def get_tensor_info(tag: str, name: str, meta: dict):
root = "GKRESHUK"
if tag == "RC_LFD_n156to156_steps4":
if name == "lfd":
location = "LF_computed/LenseLeNet_Microscope/dualview_060918_added/RC_LFD_-156to156_steps4/Cam_Right_*.tif"
elif name == "lf":
location = "LF_computed/LenseLeNet_Microscope/dualview_060918_added/RC_rectified/Cam_Right_*.tif"
else:
raise NotImplementedError(tag, name)
elif tag == "LC_LFD_n156to156_steps4":
if name == "lfd":
location = "LF_computed/LenseLeNet_Microscope/dualview_060918_added/LC_LFD_-156to156_steps4/Cam_Right_*.tif"
elif name == "lf":
location = "LF_computed/LenseLeNet_Microscope/dualview_060918_added/LC_rectified/Cam_Right_*.tif"
else:
raise NotImplementedError(tag, name)
else:
raise NotImplementedError(tag, name)
return TensorInfo(name=name, root=root, location=location, insert_singleton_axes_at=[0, 0], tag=tag)
| 37.510204
| 144
| 0.714363
|
acfc9db1c5f9e36ecbff43d461338f9a8928b3ea
| 2,163
|
py
|
Python
|
agent/tests/test_input/test_1/test_solarwinds.py
|
anodot/daria
|
d475899309f56cd85347be0f7001a0dd97dd197a
|
[
"Apache-2.0"
] | 16
|
2019-04-03T08:31:54.000Z
|
2021-01-24T17:12:04.000Z
|
agent/tests/test_input/test_1/test_solarwinds.py
|
anodot/daria
|
d475899309f56cd85347be0f7001a0dd97dd197a
|
[
"Apache-2.0"
] | 10
|
2020-01-20T14:59:06.000Z
|
2022-01-21T10:19:16.000Z
|
agent/tests/test_input/test_1/test_solarwinds.py
|
anodot/daria
|
d475899309f56cd85347be0f7001a0dd97dd197a
|
[
"Apache-2.0"
] | 5
|
2021-01-08T19:23:03.000Z
|
2021-11-09T14:15:49.000Z
|
from datetime import datetime
from ..test_zpipeline_base import TestInputBase
from ...conftest import generate_input
from agent import cli, source, pipeline
class TestSolarwinds(TestInputBase):
__test__ = True
params = {
'test_create_source_with_file': [{'file_name': 'solarwinds_sources'}],
'test_create_with_file': [{'file_name': 'solarwinds_pipelines'}],
}
def test_source_create(self, cli_runner):
source_name = 'solarwinds'
input_ = {
'type': 'solarwinds',
'name': source_name,
'api url': 'http://dummy_destination:80/',
'api user': 'Admin',
'api pass': 'admin',
}
result = cli_runner.invoke(cli.source.create, catch_exceptions=False, input=generate_input(input_))
assert result.exit_code == 0
assert source.repository.exists(source_name)
def test_pipeline_create(self, cli_runner):
offset = datetime.fromtimestamp(1617062400)
pipeline_id = 'solarwinds'
input_ = {
'source': 'solarwinds',
'id': pipeline_id,
'query': 'SELECT TOP 1000 NodeID, DateTime, Archive, MinLoad, MaxLoad, AvgLoad, TotalMemory,'
' MinMemoryUsed, MaxMemoryUsed, AvgMemoryUsed, AvgPercentMemoryUsed'
' FROM Orion.CPULoad WHERE {TIMESTAMP_CONDITION}',
'delay': 0,
'collect since': (datetime.now() - offset).days,
'interval in sec': 86400,
'timestamp property name': 'DateTime',
'timestamp type': 'string',
'timestamp format': "yyyy-MM-dd'T'HH:mm:ss",
'count': 'n',
'metrics': 'MinMemoryUsed:gauge AvgPercentMemoryUsed:gauge',
'metric names': 'MinMemoryUsed:MinMemoryUsed AvgPercentMemoryUsed:AvgPercentMemoryUsed',
'req dimensions': 'NodeID',
'optional dimensions': '',
'preview': 'y',
}
result = cli_runner.invoke(cli.pipeline.create, catch_exceptions=False, input=generate_input(input_))
assert result.exit_code == 0
pipeline.repository.get_by_id(pipeline_id)
| 41.596154
| 109
| 0.616274
|
acfc9ef5527be5dcf97e9485484dddcfa9f48ac7
| 33,724
|
py
|
Python
|
doc/integrations/yolo/train.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 552
|
2020-09-24T18:16:09.000Z
|
2022-03-25T06:21:55.000Z
|
doc/integrations/yolo/train.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 722
|
2020-09-24T19:48:44.000Z
|
2022-03-31T17:42:41.000Z
|
doc/integrations/yolo/train.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 442
|
2020-09-24T14:24:21.000Z
|
2022-03-25T10:40:16.000Z
|
import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.safe_load(f) # data dict
is_coco = opt.data.endswith('coco.yaml')
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = test.test(data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if wandb_logger.wandb:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco)
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
if wandb_logger.wandb and not opt.evolve: # Log the stripped model
wandb_logger.wandb.log_artifact(str(final), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['last', 'best', 'stripped'])
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.safe_load(f)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \
'', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
| 53.786284
| 123
| 0.569565
|
acfc9ffbd78124f9bb143ee30d8e1138a0c20f50
| 16,536
|
py
|
Python
|
src/m4_wait_for_events.py
|
tabuyoah/14-WaitUntilEvent_WhileLoops
|
a4befd7d2bdc4af7ee4d348ac3b74483a1cf860b
|
[
"MIT"
] | null | null | null |
src/m4_wait_for_events.py
|
tabuyoah/14-WaitUntilEvent_WhileLoops
|
a4befd7d2bdc4af7ee4d348ac3b74483a1cf860b
|
[
"MIT"
] | null | null | null |
src/m4_wait_for_events.py
|
tabuyoah/14-WaitUntilEvent_WhileLoops
|
a4befd7d2bdc4af7ee4d348ac3b74483a1cf860b
|
[
"MIT"
] | null | null | null |
"""
This module lets you practice the WAIT-FOR-EVENT pattern.
See your instructor for whether you should use:
-------------------------------------------------------------------------------
The WHILE TRUE pattern:
while True:
...
if <event has occurred>:
break
...
-------------------------------------------------------------------------------
or
-------------------------------------------------------------------------------
The ITCH pattern:
Initialize as needed so that the CONDITION can be TESTED.
while <some CONDITION>: # Test the CONDITION, continue WHILE it is true.
...
...
CHange something that (eventually) affects the CONDITION.
(else otherwise you will be in an infinite loop)
-------------------------------------------------------------------------------
Ultimately you should be comfortable with both approaches.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Alexander Tabuyo.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
def main():
""" Calls the TEST functions in this module. """
run_test_sum_until_prime_input()
run_test_next_prime()
run_test_prime_gap()
run_test_wait_for_sum_of_cubes()
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out: Returns True if the given integer is prime,
else returns False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, int(math.sqrt(n) + 0.1) + 1):
if n % k == 0:
return False
return True
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
def run_test_sum_until_prime_input():
""" Tests the wait_for_prime_input function by calling it. """
print()
print('--------------------------------------------------')
print('Testing the sum_until_prime_input function:')
print('--------------------------------------------------')
sum_until_prime_input()
def sum_until_prime_input():
"""
What comes in: Nothing.
What goes out: Nothing (i.e., None).
Side effects:
-- Repeatedly prompts the user for and inputs an integer
that is at least 2.
-- Stops when the input integer is prime.
-- Prints the sum of the input integers (including the prime one).
Example:
Here is a sample run, where the user input is to the right
of the colons:
Enter an integer greater than 1: 6
Enter an integer greater than 1: 100
Enter an integer greater than 1: 50
Enter an integer greater than 1: 11
The sum of the input integers is: 167
"""
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# The testing code is already written for you (above).
# -------------------------------------------------------------------------
total = 0
number = int(input('Enter a positive integer greater than 1:'))
while is_prime(number) is False:
total = total + number
print('total is:', total)
number = int(input('Enter a positive integer greater than 1:'))
total = total + number
print('total is:', total)
def run_test_next_prime():
""" Tests the next_prime function. """
# -------------------------------------------------------------------------
# DONE: 3. Implement this TEST function.
# It TESTS the wait_for_prime function defined below.
# Include at least ** 6 ** tests. (We supplied 5 tests for you.)
#
# As usual, include both EXPECTED and ACTUAL results in your test
# and compute the latter BY HAND (not by running your program).
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the next_prime function:')
print('--------------------------------------------------')
# Test 1:
print()
print('TEST STARTED! Has it ended?')
expected = 7
actual = next_prime(7)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 2:
print()
print('TEST STARTED! Has it ended?')
expected = 11
actual = next_prime(8)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 3:
print()
print('TEST STARTED! Has it ended?')
expected = 83
actual = next_prime(80)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 4:
print()
print('TEST STARTED! Has it ended?')
expected = 155921 + 86
actual = next_prime(155922)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 5:
print()
print('TEST STARTED! Has it ended?')
expected = 2
actual = next_prime(2)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# DONE 3 (continued):
# PUT YOUR TEST ** IN THE SPACE BETWEEN ** the
# print('TEST STARTED!' ...) and print('TEST ENDED') lines below.
# Test 6:
print()
print('TEST STARTED! Has it ended?')
expected = 3
actual = next_prime(3)
print('TEST ENDED!')
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
def next_prime(m):
"""
What comes in: An integer m that is at least 2.
What goes out: Returns the smallest prime number greater than
or equal to m.
Side effects: None.
Examples:
-- next_prime(7) returns 7
-- next_prime(8) returns 11
-- next_prime(80) returns 83
-- next_prime(155921) returns 156007 [trust me!]
Type hints:
:type m: int
"""
# -------------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# IMPLEMENTATION REQUIREMENT:
# -- Use (call) the is_prime function above appropriately.
# -------------------------------------------------------------------------
while True:
m = m + 1
while is_prime(m) is True:
return m
def run_test_prime_gap():
""" Tests the prime_gap function. """
print()
print('--------------------------------------------------')
print('Testing the prime_gap function:')
print('--------------------------------------------------')
# Test 1:
print()
print('TEST STARTED! Has it ended?')
expected = 2
actual = prime_gap(1)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 2:
print()
print('TEST STARTED! Has it ended?')
expected = 3
actual = prime_gap(2)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 3:
print()
print('TEST STARTED! Has it ended?')
expected = 7
actual = prime_gap(4)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 4:
print()
print('TEST STARTED! Has it ended?')
expected = 7
actual = prime_gap(3)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 5:
print()
print('TEST STARTED! Has it ended?')
expected = 23
actual = prime_gap(6)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 6:
print()
print('TEST STARTED! Has it ended?')
expected = 23
actual = prime_gap(5)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 7:
print()
print('TEST STARTED! Has it ended?')
expected = 89
actual = prime_gap(8)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 8:
print()
print('TEST STARTED! Has it ended?')
expected = 89
actual = prime_gap(7)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 9:
print()
print('TEST STARTED! Has it ended?')
expected = 19609
actual = prime_gap(52)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 10:
print()
print('TEST STARTED! Has it ended?')
expected = 19609
actual = prime_gap(45)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
def prime_gap(m):
"""
What comes in: An integer m that is at least 2.
What goes out:
Returns the smallest prime number whose "gap" is at least m,
where the "gap" of a prime number is the difference between
that prime number and the next-smallest prime number.
Side effects: None.
Examples:
-- prime_gap(1) returns 2, because the next prime after 2 is 3,
and so the gap for 2 is 3 - 2 = 1,
and 2 is the smallest prime with gap 1.
-- prime_gap(2) returns 3, because the next prime after 3 is 5,
and so the gap for 3 is 5 - 3 = 2,
and 3 is the smallest prime with gap 2.
-- prime_gap(3) returns 7, because the next prime after 7 is 11,
and so the gap for 7 is 11 - 7 = 4,
and 7 is the smallest prime with gap 3 or more.
(Note: There are no primes except 2 that have a gap that is odd.)
-- prime_gap(4) returns 7 for similar reasons.
-- prime_gap(6) returns 23, because the next prime after 23 is 29,
and so the gap for 23 is 29 - 23 = 6,
and 23 is the smallest prime with gap 6.
-- prime_gap(8) returns 89, because the next prime after 89 is 97,
and so the gap for 89 is 97 - 89 = 8,
and 89 is the smallest prime with gap 8.
-- prime_gap(52) returns 19609 [trust me!]
Type hints:
:type m: int
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# The testing code is already written for you (above).
#
# IMPLEMENTATION REQUIREMENT:
# -- Use (call) the *** next_prime *** function
# (that you implemented) appropriately.
# -------------------------------------------------------------------------
num1 = 2
num2 = 3
while num2 - num1 < m:
num1 = next_prime(num1)
num2 = next_prime(num2)
return num1
def run_test_wait_for_sum_of_cubes():
""" Tests the wait_for_sum_of_cubes function. """
# -------------------------------------------------------------------------
# DONE: 6. Implement this TEST function.
# It TESTS the wait_for_sum_of_cubes function defined below.
# Include at least ** 8 ** tests. (We supplied 6 tests for you.)
#
# As usual, include both EXPECTED and ACTUAL results in your test
# and compute the latter BY HAND (not by running your program).
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the wait_for_sum_of_cubes function:')
print('--------------------------------------------------')
# Test 1:
print()
print('TEST STARTED! Has it ended?')
expected = 2
actual = wait_for_sum_of_cubes(4.3)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 2:
print()
print('TEST STARTED! Has it ended?')
expected = 4
actual = wait_for_sum_of_cubes(58)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 3:
print()
print('TEST STARTED! Has it ended?')
expected = 8
actual = wait_for_sum_of_cubes(1000)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 4:
print()
print('TEST STARTED! Has it ended?')
expected = 8
actual = wait_for_sum_of_cubes(1296)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 5:
print()
print('TEST STARTED! Has it ended?')
expected = 9
actual = wait_for_sum_of_cubes(1296.000001)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 6:
print()
print('TEST STARTED! Has it ended?')
expected = 1
actual = wait_for_sum_of_cubes(-5.2)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# DONE 6 (continued):
# PUT YOUR TEST ** IN THE SPACE BETWEEN ** the
# print('TEST STARTED!' ...) and print('TEST ENDED') lines below.
#
# Use wait_for_sum_of_cubes(30.33) as your test here.
# Compute the expected answer BY HAND, as always.
# Test 7:
print()
print('TEST STARTED! Has it ended?')
print('TEST ENDED!')
# DONE 6 (continued):
# PUT YOUR TEST ** IN THE SPACE BETWEEN ** the
# print('TEST STARTED!' ...) and print('TEST ENDED') lines below.
# Test 8:
print()
print('TEST STARTED! Has it ended?')
expected = 3
actual = wait_for_sum_of_cubes(14)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
# Test 9:
print()
print('TEST STARTED! Has it ended?')
expected = 4
actual = wait_for_sum_of_cubes(40)
print('Expected:', expected)
print('Actual: ', actual)
print('TEST ENDED!')
def wait_for_sum_of_cubes(x):
"""
What comes in: A number x.
What goes out: Returns the smallest positive integer n
such that the sum
1 cubed + 2 cubed + 3 cubed + ... + n cubed
is greater than or equal to x.
Side effects: None.
Examples:
-- If x is 4.3, this function returns 2 because:
1 cubed = 1 which is less than 4.3
but
1 cubed + 2 cubed = 9 which is greater than or equal to 4.3
-- For similar reasons, if x is any number in the range (1, 9]
(that is, numbers greater than 1 but less than or equal to 9),
this function returns 2.
-- If x is 58, this function returns 4 because:
1 cubed + 2 cubed + 3 cubed = 36 which is less than 58
but
1 cubed + 2 cubed + 3 cubed + 4 cubed = 100
which is greater than or equal to 58
-- For similar reasons, if x is any number in the range (36, 100],
this function returns 4.
-- If x is 1000, this function returns 8 because:
1 + 8 + 27 + 64 + ... + (7**3) = 784
but
1 + 8 + 27 + 64 + ... + (8**3) = 1296
-- For similar reasons, f x is 1296, this function returns 8.
-- if x is -5.2 (or any number less than or equal to 1),
this function returns 1.
Type hints:
:type x: float [or an int]
"""
# -------------------------------------------------------------------------
# DONE: 7. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# IMPLEMENTATION REQUIREMENT:
# -- Solve this using the wait-until-event pattern.
#
# Note for the mathematically inclined: One could figure out
# (or look up) a formula that would allow a faster computation.
# But no fair using any such approach in this implementation.
# -------------------------------------------------------------------------
number = 1
cube = number ** 3
while cube < x:
number = number + 1
cube = cube + number ** 3
return number
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 31.861272
| 79
| 0.523101
|
acfca09417552b2b8ba84851588f1ee67b85c024
| 1,338
|
py
|
Python
|
tests/test_main.py
|
michalisioak/translate-python
|
8117a9686075496cf3c019e8e76b73b5ccadd2a6
|
[
"MIT"
] | 1
|
2020-12-20T23:15:02.000Z
|
2020-12-20T23:15:02.000Z
|
tests/test_main.py
|
michalisioak/translate-python
|
8117a9686075496cf3c019e8e76b73b5ccadd2a6
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
michalisioak/translate-python
|
8117a9686075496cf3c019e8e76b73b5ccadd2a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
try:
from unittest import mock
except Exception:
import mock
from translate.main import main
from .vcr_conf import vcr
response_template = (
'\nTranslation: {}\n'
'-------------------------\n'
'Translated by: MyMemory\n'
)
@vcr.use_cassette
def test_main_language_to_translate_required(cli_runner):
result = cli_runner.invoke(main, ['hello', 'world'], input='zh')
response = response_template.format('你好,世界')
assert 'Translate to []: zh\n{}'.format(response) == result.output
@vcr.use_cassette
def test_main_to_language(cli_runner):
result = cli_runner.invoke(main, ['-t', 'zh-TW', 'love'])
assert response_template.format('爱') == result.output
@vcr.use_cassette
def test_main_to_language_output_only(cli_runner):
result = cli_runner.invoke(main, ['-t', 'zh-TW', '-o', 'love'])
assert '爱\n' == result.output
@vcr.use_cassette
def test_main_from_language(cli_runner):
result = cli_runner.invoke(main, ['--from', 'ja', '--to', 'zh', '美'])
assert response_template.format('美') == result.output
@mock.patch('translate.main.__version__', '0.0.0')
def test_main_get_vesion(cli_runner):
result = cli_runner.invoke(main, ['--version'])
assert 'translate, version 0.0.0\n' == result.output
| 27.306122
| 73
| 0.686099
|
acfca0cb5e993c8c340edc31a1387136c91ac610
| 1,253
|
py
|
Python
|
2015/day14.py
|
tcbegley/advent-of-code
|
e293d06e9cd994b26c0d10619672a6d8d2d65377
|
[
"MIT"
] | 6
|
2021-12-05T11:21:17.000Z
|
2021-12-07T03:04:24.000Z
|
2015/day14.py
|
tcbegley/advent-of-code
|
e293d06e9cd994b26c0d10619672a6d8d2d65377
|
[
"MIT"
] | null | null | null |
2015/day14.py
|
tcbegley/advent-of-code
|
e293d06e9cd994b26c0d10619672a6d8d2d65377
|
[
"MIT"
] | null | null | null |
import re
import sys
NUMBERS = re.compile(r"\d+")
class Reindeer:
def __init__(self, speed, time, rest):
self.speed = speed
self.time = time
self.T = time + rest
self.loc = 0
self.score = 0
def advance(self, i):
if (i % self.T) < self.time:
self.loc += self.speed
def increment_score(self):
self.score += 1
def load_data(path):
with open(path) as f:
return [
[int(i) for i in NUMBERS.findall(line)]
for line in f.read().strip().split("\n")
]
def part_1(reindeer):
reindeer = [Reindeer(*r) for r in reindeer]
for i in range(2503):
for r in reindeer:
r.advance(i)
return max(r.loc for r in reindeer)
def part_2(reindeer):
reindeer = [Reindeer(*r) for r in reindeer]
for i in range(2503):
for r in reindeer:
r.advance(i)
max_loc = max(r.loc for r in reindeer)
for r in reindeer:
if r.loc == max_loc:
r.increment_score()
return max(r.score for r in reindeer)
if __name__ == "__main__":
reindeer = load_data(sys.argv[1])
print(f"Part 1: {part_1(reindeer)}")
print(f"Part 2: {part_2(reindeer)}")
| 20.540984
| 52
| 0.553871
|
acfca31fc559ccaf31aca6208b848d3d7a4f495f
| 51,103
|
py
|
Python
|
util/config/schema.py
|
ldelossa/quay
|
bd7252c536bcf8c49cca167d7c4b433489d36044
|
[
"Apache-2.0"
] | null | null | null |
util/config/schema.py
|
ldelossa/quay
|
bd7252c536bcf8c49cca167d7c4b433489d36044
|
[
"Apache-2.0"
] | null | null | null |
util/config/schema.py
|
ldelossa/quay
|
bd7252c536bcf8c49cca167d7c4b433489d36044
|
[
"Apache-2.0"
] | null | null | null |
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should
# not be documented for external users. These will generally be used for internal test or only
# given to customers when they have been briefed on the side effects of using them.
INTERNAL_ONLY_PROPERTIES = {
"__module__",
"__doc__",
"create_transaction",
"SESSION_COOKIE_NAME",
"SESSION_COOKIE_HTTPONLY",
"SESSION_COOKIE_SAMESITE",
"DATABASE_SECRET_KEY",
"V22_NAMESPACE_BLACKLIST",
"MAXIMUM_CNR_LAYER_SIZE",
"OCI_NAMESPACE_WHITELIST",
"FEATURE_GENERAL_OCI_SUPPORT",
"FEATURE_HELM_OCI_SUPPORT",
"FEATURE_NAMESPACE_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_ACTION_COUNTER",
"APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST",
"APP_REGISTRY_SHOW_PACKAGE_CACHE_WHITELIST",
"FEATURE_MANIFEST_SIZE_BACKFILL",
"TESTING",
"SEND_FILE_MAX_AGE_DEFAULT",
"DISABLED_FOR_AUDIT_LOGS",
"DISABLED_FOR_PULL_LOGS",
"FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES",
"FEATURE_CLEAR_EXPIRED_RAC_ENTRIES",
"ACTION_LOG_MAX_PAGE",
"NON_RATE_LIMITED_NAMESPACES",
"REPLICATION_QUEUE_NAME",
"DOCKERFILE_BUILD_QUEUE_NAME",
"CHUNK_CLEANUP_QUEUE_NAME",
"SECURITY_SCANNER_ISSUER_NAME",
"NOTIFICATION_QUEUE_NAME",
"REPOSITORY_GC_QUEUE_NAME",
"NAMESPACE_GC_QUEUE_NAME",
"EXPORT_ACTION_LOGS_QUEUE_NAME",
"SECSCAN_V4_NOTIFICATION_QUEUE_NAME",
"FEATURE_BILLING",
"BILLING_TYPE",
"INSTANCE_SERVICE_KEY_LOCATION",
"INSTANCE_SERVICE_KEY_REFRESH",
"INSTANCE_SERVICE_KEY_SERVICE",
"INSTANCE_SERVICE_KEY_KID_LOCATION",
"INSTANCE_SERVICE_KEY_EXPIRATION",
"UNAPPROVED_SERVICE_KEY_TTL_SEC",
"EXPIRED_SERVICE_KEY_TTL_SEC",
"REGISTRY_JWT_AUTH_MAX_FRESH_S",
"SERVICE_LOG_ACCOUNT_ID",
"BUILDLOGS_OPTIONS",
"LIBRARY_NAMESPACE",
"STAGGER_WORKERS",
"QUEUE_WORKER_METRICS_REFRESH_SECONDS",
"PUSH_TEMP_TAG_EXPIRATION_SEC",
"GARBAGE_COLLECTION_FREQUENCY",
"PAGE_TOKEN_KEY",
"BUILD_MANAGER",
"JWTPROXY_AUDIENCE",
"JWTPROXY_SIGNER",
"SECURITY_SCANNER_INDEXING_MIN_ID",
"SECURITY_SCANNER_V4_REINDEX_THRESHOLD",
"STATIC_SITE_BUCKET",
"LABEL_KEY_RESERVED_PREFIXES",
"TEAM_SYNC_WORKER_FREQUENCY",
"JSONIFY_PRETTYPRINT_REGULAR",
"TUF_GUN_PREFIX",
"LOGGING_LEVEL",
"SIGNED_GRANT_EXPIRATION_SEC",
"PROMETHEUS_PUSHGATEWAY_URL",
"DB_TRANSACTION_FACTORY",
"NOTIFICATION_SEND_TIMEOUT",
"QUEUE_METRICS_TYPE",
"MAIL_FAIL_SILENTLY",
"LOCAL_OAUTH_HANDLER",
"USE_CDN",
"ANALYTICS_TYPE",
"LAST_ACCESSED_UPDATE_THRESHOLD_S",
"GREENLET_TRACING",
"EXCEPTION_LOG_TYPE",
"SENTRY_DSN",
"SENTRY_PUBLIC_DSN",
"BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT",
"THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT",
"IP_DATA_API_KEY",
"SECURITY_SCANNER_ENDPOINT_BATCH",
"SECURITY_SCANNER_API_TIMEOUT_SECONDS",
"SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS",
"SECURITY_SCANNER_ENGINE_VERSION_TARGET",
"SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS",
"SECURITY_SCANNER_API_VERSION",
"REPO_MIRROR_INTERVAL",
"DATA_MODEL_CACHE_CONFIG",
# TODO: move this into the schema once we support signing in QE.
"FEATURE_SIGNING",
"TUF_SERVER",
"V1_ONLY_DOMAIN",
"LOGS_MODEL",
"LOGS_MODEL_CONFIG",
"APP_REGISTRY_RESULTS_LIMIT",
"V3_UPGRADE_MODE", # Deprecated old flag
}
CONFIG_SCHEMA = {
"type": "object",
"description": "Schema for Quay configuration",
"required": [
"PREFERRED_URL_SCHEME",
"SERVER_HOSTNAME",
"DB_URI",
"AUTHENTICATION_TYPE",
"DISTRIBUTED_STORAGE_CONFIG",
"BUILDLOGS_REDIS",
"USER_EVENTS_REDIS",
"DISTRIBUTED_STORAGE_PREFERENCE",
"DEFAULT_TAG_EXPIRATION",
"TAG_EXPIRATION_OPTIONS",
],
"properties": {
"REGISTRY_STATE": {
"type": "string",
"description": "The state of the registry.",
"enum": ["normal", "readonly"],
"x-example": "readonly",
},
# Hosting.
"PREFERRED_URL_SCHEME": {
"type": "string",
"description": "The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`",
"enum": ["http", "https"],
"x-example": "https",
},
"SERVER_HOSTNAME": {
"type": "string",
"description": "The URL at which Quay is accessible, without the scheme.",
"x-example": "quay.io",
},
"EXTERNAL_TLS_TERMINATION": {
"type": "boolean",
"description": "If TLS is supported, but terminated at a layer before Quay, must be true.",
"x-example": True,
},
# SSL/TLS.
"SSL_CIPHERS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL ciphers to enabled and disabled",
"x-example": ["CAMELLIA", "!3DES"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers",
},
"SSL_PROTOCOLS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL protocols to enabled and disabled",
"x-example": ["TLSv1.1", "TLSv1.2"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols",
},
# User-visible configuration.
"REGISTRY_TITLE": {
"type": "string",
"description": "If specified, the long-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "Corp Container Service",
},
"REGISTRY_TITLE_SHORT": {
"type": "string",
"description": "If specified, the short-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "CCS",
},
"CONTACT_INFO": {
"type": "array",
"uniqueItems": True,
"description": "If specified, contact information to display on the contact page. "
+ "If only a single piece of contact information is specified, the contact footer will link directly.",
"items": [
{
"type": "string",
"pattern": "^mailto:(.)+$",
"x-example": "mailto:support@quay.io",
"description": "Adds a link to send an e-mail",
},
{
"type": "string",
"pattern": "^irc://(.)+$",
"x-example": "irc://chat.freenode.net:6665/quay",
"description": "Adds a link to visit an IRC chat room",
},
{
"type": "string",
"pattern": "^tel:(.)+$",
"x-example": "tel:+1-888-930-3475",
"description": "Adds a link to call a phone number",
},
{
"type": "string",
"pattern": "^http(s)?://(.)+$",
"x-example": "https://twitter.com/quayio",
"description": "Adds a link to a defined URL",
},
],
},
"SEARCH_RESULTS_PER_PAGE": {
"type": "number",
"description": "Number of results returned per page by search page. Defaults to 10",
"x-example": 10,
},
"SEARCH_MAX_RESULT_PAGE_COUNT": {
"type": "number",
"description": "Maximum number of pages the user can paginate in search before they are limited. Defaults to 10",
"x-example": 10,
},
# E-mail.
"FEATURE_MAILING": {
"type": "boolean",
"description": "Whether emails are enabled. Defaults to True",
"x-example": True,
},
"MAIL_SERVER": {
"type": "string",
"description": "The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.",
"x-example": "smtp.somedomain.com",
},
"MAIL_USE_TLS": {
"type": "boolean",
"description": "If specified, whether to use TLS for sending e-mails.",
"x-example": True,
},
"MAIL_PORT": {
"type": "number",
"description": "The SMTP port to use. If not specified, defaults to 587.",
"x-example": 588,
},
"MAIL_USERNAME": {
"type": ["string", "null"],
"description": "The SMTP username to use when sending e-mails.",
"x-example": "myuser",
},
"MAIL_PASSWORD": {
"type": ["string", "null"],
"description": "The SMTP password to use when sending e-mails.",
"x-example": "mypassword",
},
"MAIL_DEFAULT_SENDER": {
"type": ["string", "null"],
"description": "If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `support@quay.io`.",
"x-example": "support@myco.com",
},
# Database.
"DB_URI": {
"type": "string",
"description": "The URI at which to access the database, including any credentials.",
"x-example": "mysql+pymysql://username:password@dns.of.database/quay",
"x-reference": "https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495",
},
"DB_CONNECTION_ARGS": {
"type": "object",
"description": "If specified, connection arguments for the database such as timeouts and SSL.",
"properties": {
"threadlocals": {
"type": "boolean",
"description": "Whether to use thread-local connections. Should *ALWAYS* be `true`",
},
"autorollback": {
"type": "boolean",
"description": "Whether to use auto-rollback connections. Should *ALWAYS* be `true`",
},
"ssl": {
"type": "object",
"description": "SSL connection configuration",
"properties": {
"ca": {
"type": "string",
"description": "*Absolute container path* to the CA certificate to use for SSL connections",
"x-example": "conf/stack/ssl-ca-cert.pem",
},
},
"required": ["ca"],
},
},
"required": ["threadlocals", "autorollback"],
},
"ALLOW_PULLS_WITHOUT_STRICT_LOGGING": {
"type": "boolean",
"description": "If true, pulls in which the pull audit log entry cannot be written will "
+ "still succeed. Useful if the database can fallback into a read-only state "
+ "and it is desired for pulls to continue during that time. Defaults to False.",
"x-example": True,
},
# Storage.
"FEATURE_STORAGE_REPLICATION": {
"type": "boolean",
"description": "Whether to automatically replicate between storage engines. Defaults to False",
"x-example": False,
},
"FEATURE_PROXY_STORAGE": {
"type": "boolean",
"description": "Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False",
"x-example": False,
},
"MAXIMUM_LAYER_SIZE": {
"type": "string",
"description": "Maximum allowed size of an image layer. Defaults to 20G",
"x-example": "100G",
"pattern": "^[0-9]+(G|M)$",
},
"DISTRIBUTED_STORAGE_CONFIG": {
"type": "object",
"description": "Configuration for storage engine(s) to use in Quay. Each key is a unique ID"
+ " for a storage engine, with the value being a tuple of the type and "
+ " configuration for that engine.",
"x-example": {
"local_storage": ["LocalStorage", {"storage_path": "some/path/"}],
},
"items": {
"type": "array",
},
},
"DISTRIBUTED_STORAGE_PREFERENCE": {
"type": "array",
"description": "The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to "
+ "use. A preferred engine means it is first checked for pullig and images are "
+ "pushed to it.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS": {
"type": "array",
"description": "The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose "
+ "images should be fully replicated, by default, to all other storage engines.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"USERFILES_LOCATION": {
"type": "string",
"description": "ID of the storage engine in which to place user-uploaded files",
"x-example": "s3_us_east",
},
"USERFILES_PATH": {
"type": "string",
"description": "Path under storage in which to place user-uploaded files",
"x-example": "userfiles",
},
"ACTION_LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If action log archiving is enabled, the storage engine in which to place the "
+ "archived data.",
"x-example": "s3_us_east",
},
"ACTION_LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If action log archiving is enabled, the path in storage in which to place the "
+ "archived data.",
"x-example": "archives/actionlogs",
},
"ACTION_LOG_ROTATION_THRESHOLD": {
"type": "string",
"description": "If action log archiving is enabled, the time interval after which to "
+ "archive data.",
"x-example": "30d",
},
"LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If builds are enabled, the storage engine in which to place the "
+ "archived build logs.",
"x-example": "s3_us_east",
},
"LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If builds are enabled, the path in storage in which to place the "
+ "archived build logs.",
"x-example": "archives/buildlogs",
},
# Authentication.
"AUTHENTICATION_TYPE": {
"type": "string",
"description": "The authentication engine to use for credential authentication.",
"x-example": "Database",
"enum": ["Database", "LDAP", "JWT", "Keystone", "OIDC", "AppToken"],
},
"SUPER_USERS": {
"type": "array",
"description": "Quay usernames of those users to be granted superuser privileges",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"DIRECT_OAUTH_CLIENTID_WHITELIST": {
"type": "array",
"description": "A list of client IDs of *Quay-managed* applications that are allowed "
+ "to perform direct OAuth approval without user approval.",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html",
"uniqueItems": True,
"items": {
"type": "string",
},
},
# Redis.
"BUILDLOGS_REDIS": {
"type": "object",
"description": "Connection information for Redis for build logs caching",
"required": ["host"],
"properties": {
"host": {
"type": "string",
"description": "The hostname at which Redis is accessible",
"x-example": "my.redis.cluster",
},
"port": {
"type": "number",
"description": "The port at which Redis is accessible",
"x-example": 1234,
},
"password": {
"type": "string",
"description": "The password to connect to the Redis instance",
"x-example": "mypassword",
},
},
},
"USER_EVENTS_REDIS": {
"type": "object",
"description": "Connection information for Redis for user event handling",
"required": ["host"],
"properties": {
"host": {
"type": "string",
"description": "The hostname at which Redis is accessible",
"x-example": "my.redis.cluster",
},
"port": {
"type": "number",
"description": "The port at which Redis is accessible",
"x-example": 1234,
},
"password": {
"type": "string",
"description": "The password to connect to the Redis instance",
"x-example": "mypassword",
},
},
},
# OAuth configuration.
"GITHUB_LOGIN_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using GitHub (Enterprise) as an external login provider",
"required": ["CLIENT_ID", "CLIENT_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-auth.html",
"properties": {
"GITHUB_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) being hit",
"x-example": "https://github.com/",
},
"API_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com",
"x-example": "https://api.github.com/",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG",
"x-example": "0e8dbe15c4c7630b6780",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"ORG_RESTRICT": {
"type": "boolean",
"description": "If true, only users within the organization whitelist can login using this provider",
"x-example": True,
},
"ALLOWED_ORGANIZATIONS": {
"type": "array",
"description": "The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option",
"uniqueItems": True,
"items": {
"type": "string",
},
},
},
},
"BITBUCKET_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using BitBucket for build triggers",
"required": ["CONSUMER_KEY", "CONSUMER_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html",
"properties": {
"CONSUMER_KEY": {
"type": "string",
"description": "The registered consumer key (client ID) for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CONSUMER_SECRET": {
"type": "string",
"description": "The registered consumer secret (client secret) for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"GITHUB_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using GitHub (Enterprise) for build triggers",
"required": ["GITHUB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-build.html",
"properties": {
"GITHUB_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) being hit",
"x-example": "https://github.com/",
},
"API_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com",
"x-example": "https://api.github.com/",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG",
"x-example": "0e8dbe15c4c7630b6780",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
},
},
"GOOGLE_LOGIN_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using Google for external authentication",
"required": ["CLIENT_ID", "CLIENT_SECRET"],
"properties": {
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"GITLAB_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using Gitlab (Enterprise) for external authentication",
"required": ["GITLAB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"],
"properties": {
"GITLAB_ENDPOINT": {
"type": "string",
"description": "The endpoint at which Gitlab(Enterprise) is running",
"x-example": "https://gitlab.com",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"BRANDING": {
"type": ["object", "null"],
"description": "Custom branding for logos and URLs in the Quay UI",
"required": ["logo"],
"properties": {
"logo": {
"type": "string",
"description": "Main logo image URL",
"x-example": "/static/img/quay-horizontal-color.svg",
},
"footer_img": {
"type": "string",
"description": "Logo for UI footer",
"x-example": "/static/img/RedHat.svg",
},
"footer_url": {
"type": "string",
"description": "Link for footer image",
"x-example": "https://redhat.com",
},
},
},
"DOCUMENTATION_ROOT": {"type": "string", "description": "Root URL for documentation links"},
# Health.
"HEALTH_CHECKER": {
"description": "The configured health check.",
"x-example": ("RDSAwareHealthCheck", {"access_key": "foo", "secret_key": "bar"}),
},
# Metrics.
"PROMETHEUS_NAMESPACE": {
"type": "string",
"description": "The prefix applied to all exposed Prometheus metrics. Defaults to `quay`",
"x-example": "myregistry",
},
# Misc configuration.
"BLACKLIST_V2_SPEC": {
"type": "string",
"description": "The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`",
"x-reference": "http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec",
"x-example": "<1.8.0",
},
"USER_RECOVERY_TOKEN_LIFETIME": {
"type": "string",
"description": "The length of time a token for recovering a user accounts is valid. Defaults to 30m.",
"x-example": "10m",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"SESSION_COOKIE_SECURE": {
"type": "boolean",
"description": "Whether the `secure` property should be set on session cookies. "
+ "Defaults to False. Recommended to be True for all installations using SSL.",
"x-example": True,
"x-reference": "https://en.wikipedia.org/wiki/Secure_cookies",
},
"PUBLIC_NAMESPACES": {
"type": "array",
"description": "If a namespace is defined in the public namespace list, then it will appear on *all*"
+ " user's repository list pages, regardless of whether that user is a member of the namespace."
+ ' Typically, this is used by an enterprise customer in configuring a set of "well-known"'
+ " namespaces.",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"AVATAR_KIND": {
"type": "string",
"description": "The types of avatars to display, either generated inline (local) or Gravatar (gravatar)",
"enum": ["local", "gravatar"],
},
"V2_PAGINATION_SIZE": {
"type": "number",
"description": "The number of results returned per page in V2 registry APIs",
"x-example": 100,
},
"ENABLE_HEALTH_DEBUG_SECRET": {
"type": ["string", "null"],
"description": "If specified, a secret that can be given to health endpoints to see full debug info when"
+ "not authenticated as a superuser",
"x-example": "somesecrethere",
},
"BROWSER_API_CALLS_XHR_ONLY": {
"type": "boolean",
"description": "If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.",
"x-example": False,
},
# Time machine and tag expiration settings.
"FEATURE_CHANGE_TAG_EXPIRATION": {
"type": "boolean",
"description": "Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.",
"x-example": False,
},
"DEFAULT_TAG_EXPIRATION": {
"type": "string",
"description": "The default, configurable tag expiration time for time machine. Defaults to `2w`.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"TAG_EXPIRATION_OPTIONS": {
"type": "array",
"description": "The options that users can select for expiration of tags in their namespace (if enabled)",
"items": {
"type": "string",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
},
# Team syncing.
"FEATURE_TEAM_SYNCING": {
"type": "boolean",
"description": "Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)",
"x-example": True,
},
"TEAM_RESYNC_STALE_TIME": {
"type": "string",
"description": "If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)",
"x-example": "2h",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP": {
"type": "boolean",
"description": "If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.",
"x-example": True,
},
# Security scanning.
"FEATURE_SECURITY_SCANNER": {
"type": "boolean",
"description": "Whether to turn of/off the security scanner. Defaults to False",
"x-example": False,
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/security-scanning.html",
},
"FEATURE_SECURITY_NOTIFICATIONS": {
"type": "boolean",
"description": "If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False",
"x-example": False,
},
"SECURITY_SCANNER_ENDPOINT": {
"type": "string",
"pattern": "^http(s)?://(.)+$",
"description": "The endpoint for the V2 security scanner",
"x-example": "http://192.168.99.101:6060",
},
"SECURITY_SCANNER_V4_ENDPOINT": {
"type": ["string", "null"],
"pattern": "^http(s)?://(.)+$",
"description": "The endpoint for the V4 security scanner",
"x-example": "http://192.168.99.101:6060",
},
"SECURITY_SCANNER_INDEXING_INTERVAL": {
"type": "number",
"description": "The number of seconds between indexing intervals in the security scanner. Defaults to 30.",
"x-example": 30,
},
"SECURITY_SCANNER_V4_PSK": {
"type": "string",
"description": "A base64 encoded string used to sign JWT(s) on Clair V4 requests. If 'None' jwt signing will not occur.",
"x-example": "PSK",
},
# Repository mirroring
"REPO_MIRROR_INTERVAL": {
"type": "number",
"description": "The number of seconds between checking for repository mirror candidates. Defaults to 30.",
"x-example": 30,
},
# Build
"FEATURE_GITHUB_BUILD": {
"type": "boolean",
"description": "Whether to support GitHub build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_BITBUCKET_BUILD": {
"type": "boolean",
"description": "Whether to support Bitbucket build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_GITLAB_BUILD": {
"type": "boolean",
"description": "Whether to support GitLab build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_BUILD_SUPPORT": {
"type": "boolean",
"description": "Whether to support Dockerfile build. Defaults to True",
"x-example": True,
},
"DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT": {
"type": ["number", "null"],
"description": "If not None, the default maximum number of builds that can be queued in a namespace.",
"x-example": 20,
},
"SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD": {
"type": ["number", "null"],
"description": "If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.",
"x-example": 10,
},
"SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD": {
"type": ["number", "null"],
"description": "If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.",
"x-example": 50,
},
# Login
"FEATURE_GITHUB_LOGIN": {
"type": "boolean",
"description": "Whether GitHub login is supported. Defaults to False",
"x-example": False,
},
"FEATURE_GOOGLE_LOGIN": {
"type": "boolean",
"description": "Whether Google login is supported. Defaults to False",
"x-example": False,
},
# Recaptcha
"FEATURE_RECAPTCHA": {
"type": "boolean",
"description": "Whether Recaptcha is necessary for user login and recovery. Defaults to False",
"x-example": False,
"x-reference": "https://www.google.com/recaptcha/intro/",
},
"RECAPTCHA_SITE_KEY": {
"type": ["string", "null"],
"description": "If recaptcha is enabled, the site key for the Recaptcha service",
},
"RECAPTCHA_SECRET_KEY": {
"type": ["string", "null"],
"description": "If recaptcha is enabled, the secret key for the Recaptcha service",
},
# External application tokens.
"FEATURE_APP_SPECIFIC_TOKENS": {
"type": "boolean",
"description": "If enabled, users can create tokens for use by the Docker CLI. Defaults to True",
"x-example": False,
},
"APP_SPECIFIC_TOKEN_EXPIRATION": {
"type": ["string", "null"],
"description": "The expiration for external app tokens. Defaults to None.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"EXPIRED_APP_SPECIFIC_TOKEN_GC": {
"type": ["string", "null"],
"description": "Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
# Feature Flag: Garbage collection.
"FEATURE_GARBAGE_COLLECTION": {
"type": "boolean",
"description": "Whether garbage collection of repositories is enabled. Defaults to True",
"x-example": False,
},
# Feature Flag: Rate limits.
"FEATURE_RATE_LIMITS": {
"type": "boolean",
"description": "Whether to enable rate limits on API and registry endpoints. Defaults to False",
"x-example": True,
},
# Feature Flag: Aggregated log retrieval.
"FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL": {
"type": "boolean",
"description": "Whether to allow retrieval of aggregated log counts. Defaults to True",
"x-example": True,
},
# Feature Flag: Log export.
"FEATURE_LOG_EXPORT": {
"type": "boolean",
"description": "Whether to allow exporting of action logs. Defaults to True",
"x-example": True,
},
# Feature Flag: User last accessed.
"FEATURE_USER_LAST_ACCESSED": {
"type": "boolean",
"description": "Whether to record the last time a user was accessed. Defaults to True",
"x-example": True,
},
# Feature Flag: Permanent Sessions.
"FEATURE_PERMANENT_SESSIONS": {
"type": "boolean",
"description": "Whether sessions are permanent. Defaults to True",
"x-example": True,
},
# Feature Flag: Super User Support.
"FEATURE_SUPER_USERS": {
"type": "boolean",
"description": "Whether super users are supported. Defaults to True",
"x-example": True,
},
# Feature Flag: Anonymous Users.
"FEATURE_ANONYMOUS_ACCESS": {
"type": "boolean",
"description": " Whether to allow anonymous users to browse and pull public repositories. Defaults to True",
"x-example": True,
},
# Feature Flag: User Creation.
"FEATURE_USER_CREATION": {
"type": "boolean",
"description": "Whether users can be created (by non-super users). Defaults to True",
"x-example": True,
},
# Feature Flag: Invite Only User Creation.
"FEATURE_INVITE_ONLY_USER_CREATION": {
"type": "boolean",
"description": "Whether users being created must be invited by another user. Defaults to False",
"x-example": False,
},
# Feature Flag: Encrypted Basic Auth.
"FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH": {
"type": "boolean",
"description": "Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False",
"x-example": False,
},
# Feature Flag: Direct Login.
"FEATURE_DIRECT_LOGIN": {
"type": "boolean",
"description": "Whether users can directly login to the UI. Defaults to True",
"x-example": True,
},
# Feature Flag: Advertising V2.
"FEATURE_ADVERTISE_V2": {
"type": "boolean",
"description": "Whether the v2/ endpoint is visible. Defaults to True",
"x-example": True,
},
# Feature Flag: Log Rotation.
"FEATURE_ACTION_LOG_ROTATION": {
"type": "boolean",
"description": "Whether or not to rotate old action logs to storage. Defaults to False",
"x-example": False,
},
# Feature Flag: ACI Conversion.
"FEATURE_ACI_CONVERSION": {
"type": "boolean",
"description": "Whether to enable conversion to ACIs. Defaults to False",
"x-example": False,
},
# Feature Flag: Library Support.
"FEATURE_LIBRARY_SUPPORT": {
"type": "boolean",
"description": 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True',
"x-example": True,
},
# Feature Flag: Require Team Invite.
"FEATURE_REQUIRE_TEAM_INVITE": {
"type": "boolean",
"description": "Whether to require invitations when adding a user to a team. Defaults to True",
"x-example": True,
},
# Feature Flag: Collecting and Supporting Metadata.
"FEATURE_USER_METADATA": {
"type": "boolean",
"description": "Whether to collect and support user metadata. Defaults to False",
"x-example": False,
},
# Feature Flag: Support App Registry.
"FEATURE_APP_REGISTRY": {
"type": "boolean",
"description": "Whether to enable support for App repositories. Defaults to False",
"x-example": False,
},
# Feature Flag: Read only app registry.
"FEATURE_READONLY_APP_REGISTRY": {
"type": "boolean",
"description": "Whether to App repositories are read-only. Defaults to False",
"x-example": True,
},
# Feature Flag: Public Reposiotires in _catalog Endpoint.
"FEATURE_PUBLIC_CATALOG": {
"type": "boolean",
"description": "If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False",
"x-example": False,
},
# Feature Flag: Reader Build Logs.
"FEATURE_READER_BUILD_LOGS": {
"type": "boolean",
"description": "If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False",
"x-example": False,
},
# Feature Flag: Usernames Autocomplete.
"FEATURE_PARTIAL_USER_AUTOCOMPLETE": {
"type": "boolean",
"description": "If set to true, autocompletion will apply to partial usernames. Defaults to True",
"x-example": True,
},
# Feature Flag: User log access.
"FEATURE_USER_LOG_ACCESS": {
"type": "boolean",
"description": "If set to true, users will have access to audit logs for their namespace. Defaults to False",
"x-example": True,
},
# Feature Flag: User renaming.
"FEATURE_USER_RENAME": {
"type": "boolean",
"description": "If set to true, users can rename their own namespace. Defaults to False",
"x-example": True,
},
# Feature Flag: Username confirmation.
"FEATURE_USERNAME_CONFIRMATION": {
"type": "boolean",
"description": "If set to true, users can confirm their generated usernames. Defaults to True",
"x-example": False,
},
# Feature Flag: V1 push restriction.
"FEATURE_RESTRICTED_V1_PUSH": {
"type": "boolean",
"description": "If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push. Defaults to True",
"x-example": False,
},
# Feature Flag: Support Repository Mirroring.
"FEATURE_REPO_MIRROR": {
"type": "boolean",
"description": "Whether to enable support for repository mirroring. Defaults to False",
"x-example": False,
},
"REPO_MIRROR_TLS_VERIFY": {
"type": "boolean",
"description": "Require HTTPS and verify certificates of Quay registry during mirror. Defaults to True",
"x-example": True,
},
"REPO_MIRROR_SERVER_HOSTNAME": {
"type": "string",
"description": "Replaces the SERVER_HOSTNAME as the destination for mirroring. Defaults to unset",
"x-example": "openshift-quay-service",
},
# Feature Flag: V1 push restriction.
"V1_PUSH_WHITELIST": {
"type": "array",
"description": "The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true.",
"x-example": ["some", "namespaces"],
},
# Logs model
"LOGS_MODEL": {
"type": "string",
"description": "Logs model for action logs",
"enum": ["database", "transition_reads_both_writes_es", "elasticsearch"],
"x-example": "database",
},
"LOGS_MODEL_CONFIG": {
"type": "object",
"description": "Logs model config for action logs",
"x-reference": "https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html",
"properties": {
"producer": {
"type": "string",
"description": "Logs producer if logging to Elasticsearch",
"enum": ["kafka", "elasticsearch", "kinesis_stream"],
"x-example": "kafka",
},
"elasticsearch_config": {
"type": "object",
"description": "Elasticsearch cluster configuration",
"properties": {
"host": {
"type": "string",
"description": "Elasticsearch cluster endpoint",
"x-example": "host.elasticsearch.example",
},
"port": {
"type": "number",
"description": "Elasticsearch cluster endpoint port",
"x-example": 1234,
},
"access_key": {
"type": "string",
"description": "Elasticsearch user (or IAM key for AWS ES)",
"x-example": "some_string",
},
"secret_key": {
"type": "string",
"description": "Elasticsearch password (or IAM secret for AWS ES)",
"x-example": "some_secret_string",
},
"aws_region": {
"type": "string",
"description": "Amazon web service region",
"x-example": "us-east-1",
},
"use_ssl": {
"type": "boolean",
"description": "Use ssl for Elasticsearch. Defaults to True",
"x-example": True,
},
"index_prefix": {
"type": "string",
"description": "Elasticsearch's index prefix",
"x-example": "logentry_",
},
"index_settings": {
"type": "object",
"description": "Elasticsearch's index settings",
},
},
},
"kafka_config": {
"type": "object",
"description": "Kafka cluster configuration",
"properties": {
"bootstrap_servers": {
"type": "array",
"description": "List of Kafka brokers to bootstrap the client from",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"topic": {
"type": "string",
"description": "Kafka topic to publish log entries to",
"x-example": "logentry",
},
"max_block_seconds": {
"type": "number",
"description": "Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable",
"x-example": 10,
},
},
},
"kinesis_stream_config": {
"type": "object",
"description": "AWS Kinesis Stream configuration",
"properties": {
"stream_name": {
"type": "string",
"description": "Kinesis stream to send action logs to",
"x-example": "logentry-kinesis-stream",
},
"aws_region": {
"type": "string",
"description": "AWS region",
"x-example": "us-east-1",
},
"aws_access_key": {
"type": "string",
"description": "AWS access key",
"x-example": "some_access_key",
},
"aws_secret_key": {
"type": "string",
"description": "AWS secret key",
"x-example": "some_secret_key",
},
"connect_timeout": {
"type": "number",
"description": "Number of seconds before timeout when attempting to make a connection",
"x-example": 5,
},
"read_timeout": {
"type": "number",
"description": "Number of seconds before timeout when reading from a connection",
"x-example": 5,
},
"retries": {
"type": "number",
"description": "Max number of attempts made on a single request",
"x-example": 5,
},
"max_pool_connections": {
"type": "number",
"description": "The maximum number of connections to keep in a connection pool",
"x-example": 10,
},
},
},
},
},
# Feature Flag: Blacklist Email Domains
"FEATURE_BLACKLISTED_EMAILS": {
"type": "boolean",
"description": "If set to true, no new User accounts may be created if their email domain is blacklisted.",
"x-example": False,
},
# Blacklisted Email Domains
"BLACKLISTED_EMAIL_DOMAINS": {
"type": "array",
"description": "The array of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true.",
"x-example": ["example.com", "example.org"],
},
"FRESH_LOGIN_TIMEOUT": {
"type": "string",
"description": "The time after which a fresh login requires users to reenter their password",
"x-example": "5m",
},
# Webhook blacklist.
"WEBHOOK_HOSTNAME_BLACKLIST": {
"type": "array",
"description": "The set of hostnames to disallow from webhooks when validating, beyond localhost",
"x-example": ["somexternaldomain.com"],
},
},
}
| 44.016365
| 172
| 0.513179
|
acfca358d63cb04f0e997bb0987e4620b73f860d
| 2,193
|
py
|
Python
|
tests/test_resize_scipy.py
|
shuohan/resize
|
8b1f9191d398504c082723623ef9131412cd0710
|
[
"MIT"
] | null | null | null |
tests/test_resize_scipy.py
|
shuohan/resize
|
8b1f9191d398504c082723623ef9131412cd0710
|
[
"MIT"
] | null | null | null |
tests/test_resize_scipy.py
|
shuohan/resize
|
8b1f9191d398504c082723623ef9131412cd0710
|
[
"MIT"
] | 1
|
2022-03-23T13:39:48.000Z
|
2022-03-23T13:39:48.000Z
|
#!/usr/bin/env python
import numpy as np
from resize.scipy import resize
def test_scipy():
# Same FOV
x1 = np.arange(6)[:, None, None]
x2 = np.arange(7)[None, :, None]
x3 = np.arange(5)[None, None, :]
x = (x1 + x2 + x3).astype(float)
dxyz = (1.5, 2, 0.3)
y, coords = resize(x, dxyz, order=1, return_coords=True)
assert y.shape == (4, 4, 17)
assert np.array_equal(y[:, 0, 0], [0.25, 1.75, 3.25, 4.75])
assert np.array_equal(y[0, :, 0], [0.25, 2.25, 4.25, 6.25])
assert np.allclose(y[0, 0, :], [0.25, 0.25, 0.45, 0.75,
1.05, 1.35, 1.65, 1.95,
2.25, 2.55, 2.85, 3.15,
3.45, 3.75, 4.05, 4.25, 4.25])
y2 = resize(x, dxyz, order=1, target_shape=(3, 8, 12))
assert y2.shape == (3, 8, 12)
assert np.allclose(y2[:, 0, 0], [1.35, 2.85, 4.35])
assert np.allclose(y2[0, :, 0], [1.35, 1.35, 1.35, 3.35, 5.35, 7.35, 7.35, 7.35])
assert np.allclose(y2[0, 0, :], [1.35, 1.65, 1.95, 2.25,
2.55, 2.85, 3.15, 3.45,
3.75, 4.05, 4.35, 4.65])
x1 = np.arange(5).astype(float)
y1 = resize(x1, (0.7, ), order=1)
assert y1.shape == (7, )
assert np.allclose(y1, np.array((0, 0.6, 1.3, 2.0, 2.7, 3.4, 4)))
# Align first
y = resize(x, dxyz, order=1, same_fov=False)
assert y.shape == (4, 4, 14)
assert np.allclose(y[:, 0, 0], [0, 1.5, 3, 4.5])
assert np.allclose(y[0, :, 0], [0, 2, 4, 6])
assert np.allclose(y[0, 0, :], [0, 0.3, 0.6, 0.9, 1.2, 1.5, 1.8,
2.1, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9])
y1 = resize(x1, (0.7, ), order=1, same_fov=False)
assert np.allclose(y1, [0, 0.7, 1.4, 2.1, 2.8, 3.5])
y = resize(x, dxyz, order=1, same_fov=False, target_shape=(3, 8, 12))
assert np.allclose(y[:, 0, 0], [0, 1.5, 3])
assert np.allclose(y[0, :, 0], [0, 2, 4, 6, 6, 6, 6, 6])
assert np.allclose(y[0, 0, :], [0, 0.3, 0.6, 0.9, 1.2, 1.5, 1.8,
2.1, 2.4, 2.7, 3.0, 3.3])
print('successful')
if __name__ == '__main__':
test_scipy()
| 36.55
| 85
| 0.465572
|
acfca379ed9cad84a42531894abecd7926bb6555
| 7,613
|
py
|
Python
|
modules/processing/procmemory.py
|
xuna123/Bold-Falcon
|
bef7dfc3103143bd51ca82838565877097fecc49
|
[
"BSD-3-Clause"
] | 1
|
2021-06-22T05:33:08.000Z
|
2021-06-22T05:33:08.000Z
|
modules/processing/procmemory.py
|
xuna123/Bold-Falcon
|
bef7dfc3103143bd51ca82838565877097fecc49
|
[
"BSD-3-Clause"
] | null | null | null |
modules/processing/procmemory.py
|
xuna123/Bold-Falcon
|
bef7dfc3103143bd51ca82838565877097fecc49
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import hashlib
import logging
import os
import re
import struct
from lib.cuckoo.common.abstracts import Processing
from lib.cuckoo.common.objects import File
try:
import pefile
HAVE_PEFILE = True
except ImportError:
HAVE_PEFILE = False
log = logging.getLogger(__name__)
PAGE_READONLY = 0x00000002
PAGE_READWRITE = 0x00000004
PAGE_WRITECOPY = 0x00000008
PAGE_EXECUTE = 0x00000010
PAGE_EXECUTE_READ = 0x00000020
PAGE_EXECUTE_READWRITE = 0x00000040
PAGE_EXECUTE_WRITECOPY = 0x00000080
page_access = {
PAGE_READONLY: "r",
PAGE_READWRITE: "rw",
PAGE_WRITECOPY: "rwc",
PAGE_EXECUTE: "rx",
PAGE_EXECUTE_READ: "rx",
PAGE_EXECUTE_READWRITE: "rwx",
PAGE_EXECUTE_WRITECOPY: "rwxc",
}
class ProcessMemory(Processing):
"""Analyze process memory dumps."""
def read_dump(self, filepath):
f = open(filepath, "rb")
while True:
buf = f.read(24)
if not buf:
break
row = struct.unpack("QIIII", buf)
addr, size, state, typ, protect = row
yield {
"addr": "0x%08x" % addr,
"end": "0x%08x" % (addr + size),
"size": size,
"type": typ,
"protect": page_access.get(protect),
"offset": f.tell(),
}
f.seek(size, 1)
def create_idapy(self, process):
i = open(process["file"], "rb")
o = open(process["file"].replace(".dmp", ".py"), "wb")
print>>o, "from idaapi import add_segm, mem2base, autoMark, AU_CODE"
print>>o, "from idaapi import set_processor_type, SETPROC_ALL"
print>>o, "set_processor_type('80386r', SETPROC_ALL)"
for idx, region in enumerate(process["regions"]):
i.seek(region["offset"])
if not region["protect"]:
section = "unk_%d" % idx
type_ = "DATA"
elif "x" in region["protect"]:
section = "text_%d" % idx
type_ = "CODE"
elif "w" in region["protect"]:
section = "data_%d" % idx
type_ = "DATA"
else:
section = "rdata_%d" % idx
type_ = "DATA"
print>>o, "add_segm(0, %s, %s, '%s', '%s')" % (
region["addr"], region["end"], section, type_
)
print>>o, "mem2base('%s'.decode('base64'), %s)" % (
i.read(region["size"]).encode("base64").replace("\n", ""),
region["addr"]
)
if type_ == "CODE":
print>>o, "autoMark(%s, AU_CODE)" % region["addr"]
def _fixup_pe_header(self, pe):
"""Fixes the PE header from an in-memory representation to an
on-disk representation."""
for section in pe.sections:
section.PointerToRawData = section.VirtualAddress
section.SizeOfRawData = max(
section.Misc_VirtualSize, section.SizeOfRawData
)
reloc = pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_BASERELOC"]
if len(pe.OPTIONAL_HEADER.DATA_DIRECTORY) < reloc:
return
reloc = pe.OPTIONAL_HEADER.DATA_DIRECTORY[reloc]
if not reloc.VirtualAddress or not reloc.Size:
return
# Disable relocations as those have already been applied.
reloc.VirtualAddress = reloc.Size = 0
pe.FILE_HEADER.Characteristics |= \
pefile.IMAGE_CHARACTERISTICS["IMAGE_FILE_RELOCS_STRIPPED"]
def dump_images(self, process, drop_dlls=False):
"""Dump executable images from this process memory dump."""
buf = open(process["file"], "rb").read()
images, capture, regions, end, pe = [], False, [], None, None
for r in process["regions"]:
off, size = r["offset"], r["size"]
if capture:
if int(r["end"], 16) > end:
images.append((pe, regions))
capture = False
else:
regions.append(r)
continue
# We're going to take a couple of assumptions for granted here.
# Namely, the PE header is fully intact, has not been tampered
# with, and the DOS header, the NT header, and the Optional header
# all remain in the first page/chunk of this PE file.
if buf[off:off+2] != "MZ":
continue
pe = pefile.PE(data=buf[off:off+size], fast_load=True)
# Enable the capture of memory regions.
capture, regions = True, [r]
end = int(r["addr"], 16) + pe.OPTIONAL_HEADER.SizeOfImage
# If present, also process the last loaded executable.
if capture and regions:
images.append((pe, regions))
for pe, regions in images:
img = []
# Skip DLLs if requested to do so (the default).
if pe.is_dll() and not drop_dlls:
continue
self._fixup_pe_header(pe)
img.append(pe.write())
for r in regions:
img.append(buf[r["offset"]:r["offset"]+r["size"]])
sha1 = hashlib.sha1("".join(img)).hexdigest()
if pe.is_dll():
filename = "%s-%s.dll_" % (process["pid"], sha1[:16])
elif pe.is_exe():
filename = "%s-%s.exe_" % (process["pid"], sha1[:16])
else:
log.warning(
"Unknown injected executable for pid=%s", process["pid"]
)
continue
filepath = os.path.join(self.pmemory_path, filename)
open(filepath, "wb").write("".join(img))
yield File(filepath).get_all()
def run(self):
"""Run analysis.
@return: structured results.
"""
self.key = "procmemory"
results = []
if self.options.get("extract_img") and not HAVE_PEFILE:
log.warning(
"In order to extract PE files from memory dumps it is "
"required to have pefile installed (`pip install pefile`)."
)
if os.path.exists(self.pmemory_path):
for dmp in os.listdir(self.pmemory_path):
if not dmp.endswith(".dmp"):
continue
dump_path = os.path.join(self.pmemory_path, dmp)
dump_file = File(dump_path)
pid, num = map(int, re.findall("(\\d+)", dmp))
proc = dict(
file=dump_path, pid=pid, num=num,
yara=dump_file.get_yara("memory"),
urls=list(dump_file.get_urls()),
regions=list(self.read_dump(dump_path)),
)
if self.options.get("idapro"):
self.create_idapy(proc)
if self.options.get("extract_img") and HAVE_PEFILE:
proc["extracted"] = list(self.dump_images(proc))
if self.options.get("dump_delete"):
try:
os.remove(dump_path)
except OSError:
log.error("Unable to delete memory dump file at path \"%s\"", dump_path)
results.append(proc)
results.sort(key=lambda x: (x["pid"], x["num"]))
return results
| 33.1
| 96
| 0.535663
|
acfca4ba8bfed505a973f1d64e2f8d42b825e03b
| 1,385
|
py
|
Python
|
src/task_manager_turtlesim_smach/missions/mission_cond.py
|
IronClimber/ros_task_manager
|
6edd69777eb2007072475f4f42ec76d89cc13d28
|
[
"BSD-2-Clause"
] | 6
|
2019-12-06T14:47:53.000Z
|
2020-06-08T06:51:44.000Z
|
src/task_manager_turtlesim_smach/missions/mission_cond.py
|
IronClimber/ros_task_manager
|
6edd69777eb2007072475f4f42ec76d89cc13d28
|
[
"BSD-2-Clause"
] | 1
|
2019-12-02T15:09:39.000Z
|
2019-12-02T15:09:39.000Z
|
src/task_manager_turtlesim_smach/missions/mission_cond.py
|
IronClimber/ros_task_manager
|
6edd69777eb2007072475f4f42ec76d89cc13d28
|
[
"BSD-2-Clause"
] | 2
|
2020-02-02T08:06:24.000Z
|
2020-02-10T09:25:16.000Z
|
#!/usr/bin/python
# ROS specific imports
import roslib; roslib.load_manifest('task_manager_turtlesim')
import rospy
from math import *
from task_manager_lib.TaskSmach import *
rospy.init_node('task_client')
mi = MissionStateMachine()
wp = [ [1., 9., pi/2, 0, 0, 255],
[9., 9., 0., 0, 255, 255],
[9., 1., -pi/2, 0, 255, 0],
[1., 1., -pi, 255, 255, 0]]
# Create a SMACH state machine
sm = mi.createSequence()
# Add states to the container
with sm:
init = mi.seq_task("Wait",duration=1.0)
mi.seq_task("SetPen",on=False)
mi.seq_task("GoTo",goal_x=1.0,goal_y=1.0)
mi.seq_task("Clear")
sm_con = mi.createConcurrence('normal_seq')
with sm_con:
mi.concurrent_task("WaitForROI",foreground=False,roi_x=9.,roi_y=6.,roi_radius=1.0)
sm_sub = mi.createSequence()
with sm_sub:
for i,p in enumerate(wp):
mi.seq_task("Wait",duration=0.2)
mi.seq_task("SetPen",on=True,r=p[3],g=p[4],b=p[5])
mi.seq_task("GoTo",goal_x=p[0],goal_y=p[1])
smach.Concurrence.add('normal_seq',sm_sub)
smach.Sequence.add('Concurrence',sm_con)
mi.seq_task("Wait",duration=2.0)
mi.seq_task("SetPen",on=False)
mi.seq_task("GoTo",goal_x=5.0,goal_y=5.0)
mi.seq_task("ReachAngle",target=-pi/2,transitions={'TASK_COMPLETED':init})
mi.run(sm)
rospy.loginfo("Mission completed")
| 28.854167
| 90
| 0.637545
|
acfca4e1ed7b1885f4035e2ae7574ae6ecb089bc
| 16,824
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/backup/custom_help.py
|
wenjie1070116/azure-cli
|
edfaf8154d53475e35c497cd403962e2e2b74a28
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/backup/custom_help.py
|
wenjie1070116/azure-cli
|
edfaf8154d53475e35c497cd403962e2e2b74a28
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/backup/custom_help.py
|
wenjie1070116/azure-cli
|
edfaf8154d53475e35c497cd403962e2e2b74a28
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
import json
import re
import os
from datetime import datetime, timedelta
from six.moves.urllib.parse import urlparse # pylint: disable=import-error
from knack.log import get_logger
from msrest.paging import Paged
from msrestazure.tools import parse_resource_id, is_valid_resource_id
from azure.mgmt.recoveryservicesbackup.models import OperationStatusValues, JobStatus
from azure.cli.core.util import CLIError
from azure.cli.command_modules.backup._client_factory import (
job_details_cf, protection_container_refresh_operation_results_cf,
backup_operation_statuses_cf, protection_container_operation_results_cf,
backup_crr_job_details_cf, crr_operation_status_cf)
from azure.cli.core.azclierror import ResourceNotFoundError, ValidationError
logger = get_logger(__name__)
fabric_name = "Azure"
os_windows = 'Windows'
os_linux = 'Linux'
password_offset = 33
password_length = 15
backup_management_type_map = {"AzureVM": "AzureIaasVM", "AzureWorkload": "AzureWorkLoad",
"AzureStorage": "AzureStorage", "MAB": "MAB"}
# Client Utilities
def is_native_name(name):
return ";" in name
def is_id(identity):
return "/" in identity
def is_sql(resource_type):
return resource_type.lower() == 'sqldatabase'
def is_hana(resource_type):
return resource_type.lower() == 'saphanadatabase'
def is_wl_container(name):
return 'vmappcontainer' in name.lower()
def is_range_valid(start_date, end_date):
if start_date > end_date:
raise CLIError("""Start date must be earlier than end date.""")
def get_resource_id(resource_id):
return "/".join(resource_id.split('/')[3:])
def get_containers(client, container_type, status, resource_group_name, vault_name, container_name=None):
filter_dict = {
'backupManagementType': container_type,
'status': status
}
if container_name and not is_native_name(container_name):
filter_dict['friendlyName'] = container_name
filter_string = get_filter_string(filter_dict)
paged_containers = client.list(vault_name, resource_group_name, filter_string)
containers = get_list_from_paged_response(paged_containers)
if container_name and is_native_name(container_name):
return [container for container in containers if container.name == container_name]
return containers
def get_resource_name_and_rg(resource_group_name, name_or_id):
if is_valid_resource_id(name_or_id):
id_parts = parse_resource_id(name_or_id)
name = id_parts['name']
resource_group = id_parts['resource_group']
else:
name = name_or_id
resource_group = resource_group_name
return name, resource_group
def validate_container(container):
validate_object(container, "Container not found. Please provide a valid container_name.")
def validate_item(item):
validate_object(item, "Item not found. Please provide a valid item_name.")
def validate_policy(policy):
validate_object(policy, "Policy not found. Please provide a valid policy_name.")
def validate_protectable_item(protectable_item):
validate_object(protectable_item, "Protectable item not found. Please provide a valid protectable_item_name.")
def validate_azurefileshare_item(azurefileshare_item):
validate_object(azurefileshare_item, "Azure File Share item not found. Please provide a valid azure_file_share.")
def validate_object(obj, error_message):
if obj is None:
raise ResourceNotFoundError(error_message)
# def get_pipeline_response(pipeline_response, _0, _1):
# return pipeline_response
def get_target_path(resource_type, path, logical_name, data_directory_paths):
for filepath in data_directory_paths:
if filepath.type == resource_type:
data_directory_path = filepath
# Extracts the file extension type if it exists otherwise returns empty string
file_type = '.' + path.split('\\')[-1].split('.')[1] if len(path.split('\\')[-1].split('.')) > 1 else ""
file_name = logical_name + '_' + str(int(time.time())) + file_type
return data_directory_path.path + file_name
# Tracking Utilities
# pylint: disable=inconsistent-return-statements
def track_backup_ilr(cli_ctx, result, vault_name, resource_group):
operation_status = track_backup_operation(cli_ctx, resource_group, result, vault_name)
if operation_status.properties:
recovery_target = operation_status.properties.recovery_target
return recovery_target.client_scripts
# pylint: disable=inconsistent-return-statements
def track_backup_job(cli_ctx, result, vault_name, resource_group):
job_details_client = job_details_cf(cli_ctx)
operation_status = track_backup_operation(cli_ctx, resource_group, result, vault_name)
if operation_status.properties:
job_id = operation_status.properties.job_id
job_details = job_details_client.get(vault_name, resource_group, job_id)
return job_details
def track_backup_operation(cli_ctx, resource_group, result, vault_name):
backup_operation_statuses_client = backup_operation_statuses_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Azure-AsyncOperation'])
operation_status = backup_operation_statuses_client.get(vault_name, resource_group, operation_id)
while operation_status.status == OperationStatusValues.in_progress.value:
time.sleep(5)
operation_status = backup_operation_statuses_client.get(vault_name, resource_group, operation_id)
return operation_status
def track_backup_crr_job(cli_ctx, result, azure_region, resource_id):
crr_job_details_client = backup_crr_job_details_cf(cli_ctx)
operation_status = track_backup_crr_operation(cli_ctx, result, azure_region)
if operation_status.properties:
job_id = operation_status.properties.job_id
job_details = crr_job_details_client.get(azure_region, resource_id, job_id)
return job_details
def track_backup_crr_operation(cli_ctx, result, azure_region):
crr_operation_statuses_client = crr_operation_status_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Azure-AsyncOperation'])
operation_status = crr_operation_statuses_client.get(azure_region, operation_id)
while operation_status.status == OperationStatusValues.in_progress.value:
time.sleep(5)
operation_status = crr_operation_statuses_client.get(azure_region, operation_id)
return operation_status
def track_refresh_operation(cli_ctx, result, vault_name, resource_group):
protection_container_refresh_operation_results_client = protection_container_refresh_operation_results_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Location'])
result = protection_container_refresh_operation_results_client.get(vault_name, resource_group,
fabric_name, operation_id,
raw=True)
while result.response.status_code == 202:
time.sleep(5)
result = protection_container_refresh_operation_results_client.get(vault_name, resource_group,
fabric_name, operation_id,
raw=True)
def track_register_operation(cli_ctx, result, vault_name, resource_group, container_name):
protection_container_operation_results_client = protection_container_operation_results_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Location'])
result = protection_container_operation_results_client.get(vault_name, resource_group,
fabric_name, container_name,
operation_id, raw=True)
while result.response.status_code == 202:
time.sleep(5)
result = protection_container_operation_results_client.get(vault_name, resource_group,
fabric_name, container_name,
operation_id, raw=True)
# def track_mab_unregister_operation(cli_ctx, result, vault_name, resource_group, container_name):
# protection_container_operation_results_client = protection_container_operation_results_cf(cli_ctx)
# operation_id = get_operation_id_from_header(result.http_response.headers['Location'])
# result = protection_container_operation_results_client.get(vault_name, resource_group,
# fabric_name, container_name,
# operation_id, raw=True)
# while result.response.status_code == 202:
# time.sleep(5)
# result = protection_container_operation_results_client.get(vault_name, resource_group,
# fabric_name, container_name,
# operation_id, raw=True)
def track_inquiry_operation(cli_ctx, result, vault_name, resource_group, container_name):
protection_container_operation_results_client = protection_container_operation_results_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Location'])
result = protection_container_operation_results_client.get(vault_name, resource_group,
fabric_name, container_name,
operation_id, raw=True)
while result.response.status_code == 202:
time.sleep(5)
result = protection_container_operation_results_client.get(vault_name, resource_group,
fabric_name, container_name,
operation_id, raw=True)
def job_in_progress(job_status):
return job_status in [JobStatus.in_progress.value, JobStatus.cancelling.value]
# List Utilities
def get_list_from_paged_response(obj_list):
return list(obj_list) if isinstance(obj_list, Paged) else obj_list
def get_none_one_or_many(obj_list):
if not obj_list:
return None
if len(obj_list) == 1:
return obj_list[0]
return obj_list
def get_filter_string(filter_dict):
filter_list = []
for k, v in sorted(filter_dict.items()):
filter_segment = None
if isinstance(v, str):
filter_segment = "{} eq '{}'".format(k, v)
elif isinstance(v, datetime):
filter_segment = "{} eq '{}'".format(k, v.strftime('%Y-%m-%d %I:%M:%S %p')) # yyyy-MM-dd hh:mm:ss tt
elif isinstance(v, bool):
filter_segment = "{} eq '{}'".format(k, str(v))
if filter_segment is not None:
filter_list.append(filter_segment)
filter_string = " and ".join(filter_list)
return None if not filter_string else filter_string
def get_query_dates(end_date, start_date):
query_start_date = None
query_end_date = None
if start_date and end_date:
query_start_date = start_date
query_end_date = end_date
elif not start_date and end_date:
query_end_date = end_date
query_start_date = query_end_date - timedelta(days=30)
elif start_date and not end_date:
query_start_date = start_date
query_end_date = query_start_date + timedelta(days=30)
return query_end_date, query_start_date
# JSON Utilities
def get_container_from_json(client, container):
return get_object_from_json(client, container, 'ProtectionContainerResource')
def get_vault_from_json(client, vault):
return get_object_from_json(client, vault, 'Vault')
def get_vm_from_json(client, vm):
return get_object_from_json(client, vm, 'VirtualMachine')
def get_policy_from_json(client, policy):
return get_object_from_json(client, policy, 'ProtectionPolicyResource')
def get_item_from_json(client, item):
return get_object_from_json(client, item, 'ProtectedItemResource')
def get_protectable_item_from_json(client, item):
return get_object_from_json(client, item, 'WorkloadProtectableItemResource')
def get_job_from_json(client, job):
return get_object_from_json(client, job, 'JobResource')
def get_recovery_point_from_json(client, recovery_point):
return get_object_from_json(client, recovery_point, 'RecoveryPointResource')
def get_or_read_json(json_or_file):
json_obj = None
if is_json(json_or_file):
json_obj = json.loads(json_or_file)
elif os.path.exists(json_or_file):
with open(json_or_file) as f:
json_obj = json.load(f)
if json_obj is None:
raise ValidationError(
"""
The variable passed should be in valid JSON format and be supplied by az backup CLI commands.
Make sure that you use output of relevant 'az backup show' commands and the --out is 'json'
(use -o json for explicit JSON output) while assigning value to this variable.
Take care to edit only the values and not the keys within the JSON file or string.
""")
return json_obj
def get_object_from_json(client, json_or_file, class_name):
# Determine if input is json or file
json_obj = get_or_read_json(json_or_file)
# Deserialize json to object
param = client._deserialize(class_name, json_obj) # pylint: disable=protected-access
if param is None:
raise ValidationError(
"""
The variable passed should be in valid JSON format and be supplied by az backup CLI commands.
Make sure that you use output of relevant 'az backup show' commands and the --out is 'json'
(use -o json for explicit JSON output) while assigning value to this variable.
Take care to edit only the values and not the keys within the JSON file or string.
""")
return param
def is_json(content):
try:
json.loads(content)
except ValueError:
return False
return True
# ID Utilities
def get_protection_container_uri_from_id(arm_id):
m = re.search('(?<=/protectionContainers/)[^/]+', arm_id)
return m.group(0)
def get_protectable_item_uri_from_id(arm_id):
m = re.search('(?<=protectableItems/)[^/]+', arm_id)
return m.group(0)
def get_protected_item_uri_from_id(arm_id):
m = re.search('(?<=protectedItems/)[^/]+', arm_id)
return m.group(0)
def get_vm_name_from_vm_id(arm_id):
m = re.search('(?<=virtualMachines/)[^/]+', arm_id)
return m.group(0)
def get_resource_group_from_id(arm_id):
m = re.search('(?<=/resourceGroups/)[^/]+', arm_id)
return m.group(0)
def get_operation_id_from_header(header):
parse_object = urlparse(header)
return parse_object.path.split("/")[-1]
def get_vault_from_arm_id(arm_id):
m = re.search('(?<=/vaults/)[^/]+', arm_id)
return m.group(0)
def validate_and_extract_container_type(container_name, backup_management_type):
if not is_native_name(container_name) and backup_management_type is None:
raise CLIError("""--backup-management-type is required when providing container's friendly name.""")
if not is_native_name(container_name) and backup_management_type is not None:
if backup_management_type in backup_management_type_map.values():
return backup_management_type
return backup_management_type_map[backup_management_type]
container_type = container_name.split(";")[0]
container_type_mappings = {"IaasVMContainer": "AzureIaasVM", "StorageContainer": "AzureStorage",
"VMAppContainer": "AzureWorkload", "Windows": "MAB"}
if container_type in container_type_mappings:
return container_type_mappings[container_type]
return None
def validate_update_policy_request(existing_policy, new_policy):
existing_backup_management_type = existing_policy.properties.backup_management_type
new_backup_management_type = new_policy.properties.backup_management_type
if existing_backup_management_type != new_backup_management_type:
raise CLIError("BackupManagementType cannot be different than the existing type.")
| 38.410959
| 118
| 0.696505
|
acfca565b67f4221586ad2a819342aee78a8e9b5
| 779
|
py
|
Python
|
libexploit/payloads/validation.py
|
kkirsche/libexploit
|
d6e8dec9c09fde23d0ca4e1b3d6676573f89eef4
|
[
"BSD-2-Clause"
] | null | null | null |
libexploit/payloads/validation.py
|
kkirsche/libexploit
|
d6e8dec9c09fde23d0ca4e1b3d6676573f89eef4
|
[
"BSD-2-Clause"
] | null | null | null |
libexploit/payloads/validation.py
|
kkirsche/libexploit
|
d6e8dec9c09fde23d0ca4e1b3d6676573f89eef4
|
[
"BSD-2-Clause"
] | null | null | null |
from sys import exit
from libexploit.io.stdout import init_logger
def __validate(payload, expected_len, err_msg):
logger = init_logger()
payload_len = len(payload)
if payload_len != expected_len:
logger.error(msg.format(
p=payload_len, e=expected_len))
exit(1)
def validate_payload_length(payload, expected_len):
__validate(payload, expected_len,
'payload length was: {p}, expected payload length: {e}')
def validate_nseh_offset(payload, expected_len):
__validate(payload, expected_len,
'Next SEH offset was: {p}, expected nSEH offset: {e}')
def validate_seh_offset(payload, expected_len):
__validate(payload, expected_len,
'SEH offset was: {p}, expected SEH offset: {e}')
| 27.821429
| 71
| 0.679076
|
acfca5cc06dfaf8c24fe607d643cc44a3addd267
| 15,603
|
py
|
Python
|
locora/grid_solvent/spatial.py
|
wutobias/locora
|
07f907ea9d86a3607973f5861ca332a01added54
|
[
"MIT"
] | null | null | null |
locora/grid_solvent/spatial.py
|
wutobias/locora
|
07f907ea9d86a3607973f5861ca332a01added54
|
[
"MIT"
] | null | null | null |
locora/grid_solvent/spatial.py
|
wutobias/locora
|
07f907ea9d86a3607973f5861ca332a01added54
|
[
"MIT"
] | null | null | null |
import numpy as np
from locora.utils.constants import DEG2RAD, RAD2DEG
### Written by T. Wulsdorf, AG Klebe Marburg University
### 08/2016
class field(object):
"""
This is a class for operation on generic
scalar fields.
They are described in cartesian as well as
in fractional space. That means that we need
origin, frac2real/real2frac matrix(vector)
and/or grid spacing vectors vector.
Tobias Wulsdorf, AG Klebe, 08/2016
"""
def __init__(self, Bins, Frac2Real=None, Delta=None, Origin=None, Center=None):
if type(Frac2Real) == type(None) and type(Delta) == type(None):
raise ValueError("Must provide Frac2Real or Delta.")
if type(Frac2Real) != type(None) and type(Delta) != type(None):
raise ValueError("Must provide either Frac2Real or Delta.")
if type(Frac2Real) == type(None):
self.delta = Delta
self.frac2real = np.eye(3,3) * self.delta
else:
self.frac2real = Frac2Real
self.delta = np.linalg.norm(self.frac2real, axis=0)
self.real2frac = np.linalg.inv(self.frac2real)
self.bins = Bins
self.rotation_matrix = np.eye(3,3)
self.translation_vector = np.zeros(3)
if type(Origin) == type(None) and type(Center) == type(None):
raise ValueError("Must provide origin or Center.")
if type(Origin) != type(None) and type(Center) != type(None):
raise ValueError("Must provide either origin or center.")
if type(Center) == type(None):
self.origin = Origin
self.center = self.get_real(self.bins/2)
else:
self.center = Center
#First we need an auxiliary origin at (0,0,0)
self.origin = np.zeros(3)
#Second translate origin according center displacement
self.origin = self.center - self.get_real(self.bins/2)
self.dim = np.array([ np.linalg.norm(self.get_real([self.bins[0], 0., 0.])-self.origin),
np.linalg.norm(self.get_real([0., self.bins[1], 0.])-self.origin),
np.linalg.norm(self.get_real([0., 0., self.bins[2]])-self.origin)
])
def translate(self, vector=np.zeros(3)):
"""
Translatation vector of unit cell origin
"""
self.translation_vector += vector
def rotate(self, matrix=np.eye(3,3)):
"""
Rotate the unit cell vectors.
"""
rotate_check(matrix)
self.rotation_matrix = matrix.dot(self.rotation_matrix)
def translate_global(self, vector=np.zeros(3)):
"""
Translate global coordinate system
along vector.
"""
self.origin += vector
def rotate_global(self, reference_point=np.zeros(3), matrix=np.eye(3,3)):
"""
Rotate global coordinate system around
reference point.
"""
rotate_check(matrix)
self.origin = do_rotation(self.origin, reference_point, matrix)
self.rotate(matrix)
self.translation_vector = do_rotation(self.translation_vector, np.zeros(3), matrix)
def get_nice_frac2real(self):
return self.rotation_matrix.dot(self.frac2real)
def get_nice_real2frac(self):
return np.linalg.inv(self.get_nice_frac2real())
def get_voxel_volume(self):
"""
Returns the volume per grid voxel.
"""
return np.absolute(np.cross(self.frac2real[:,0], self.frac2real[:,1]).dot(self.frac2real[:,2]))
def get_frac(self, real_array):
#Convert to initial real space by inverse translation and rotation
initial_reals = do_rotation(real_array, self.origin + self.translation_vector, np.linalg.inv(self.rotation_matrix))
#Remove origin
initial_reals -= (self.origin + self.translation_vector)
#Convert to initial fractional space
return initial_reals.dot(self.real2frac)
def get_real(self, frac_array):
#Convert to real space
reals = np.array(frac_array).dot(self.frac2real)
#Perform rotation translation
return do_rotation(reals, np.zeros(3), self.rotation_matrix) + self.origin + self.translation_vector
def get_centers(self):
return self.get_real(make_grid((np.arange(self.bins[0]),\
np.arange(self.bins[1]),\
np.arange(self.bins[2]))))
def get_centers_real(self):
return self.get_centers()
def get_centers_frac(self):
return make_grid((np.arange(self.bins[0]),\
np.arange(self.bins[1]),\
np.arange(self.bins[2])))
def guess_field(crds, delta=np.array([0.5,0.5,0.5])):
_c = np.mean(crds, axis=0)
_min = np.min((_c - crds), axis=0)
_max = np.max((_c - crds), axis=0)
_b = np.rint(np.abs(_max - _min)/delta + (5.0 / delta) )
del _min, _max
return field(Bins=_b, Delta=delta, Center=_c)
def rotate_check(matrix):
if not (0.99 < np.linalg.det(matrix) < 1.01):
raise Warning("Warning: Determinant of rotation matrix is %s. Should be close to +1.0." %np.linalg.det(matrix))
def do_rotation (crds, origin, rot_mat):
return (crds - origin).dot(rot_mat) + origin
def set_quaternion(q_dist, theta, phi, psi):
"""
Retrieve the quaternion coordinates from Euler angles theta, phi and psi.
"""
cos_theta = np.cos(.5*theta)
sin_theta = np.sin(.5*theta)
cos_phipsi = np.cos(.5*(phi+psi))
sin_phipsi = np.sin(.5*(phi+psi))
q_dist[:, 0] = cos_theta*cos_phipsi
q_dist[:, 1] = sin_theta*cos_phipsi
q_dist[:, 2] = sin_theta*sin_phipsi
q_dist[:, 3] = cos_theta*sin_phipsi
q_norm = np.linalg.norm(q_dist, axis=1)
q_dist = np.einsum('ij,i->ij', q_dist, 1./q_norm)
def set_euler(O_crds, H1_crds, H2_crds, xx, yy, zz, theta, phi, psi):
"""
Retrieve Euler angles for water molecules with oxygen coordinates
O_crds and hydrogen coordinates H1_crds,H2_crds.
The frame of reference coordinate axes are given by xx, yy and zz.
"""
### For definitions of euler angle see Supporting Material in
### E. P. Raman, A. D. MacKerell, J. Am. Chem. Soc. 2015, 150127114301002.
### Further reading on Euler angle calculation:
### J. Diebel, 2006, "Representing attitude: Euler angles, unit quaternions, and rotation vectors".
### water coordinate system (all normalized):
### O-H1 --> X
### (O-H1)x(O-H2) --> Z
### XxZ --> Y
###
### Line of nodes 'N' is the vector on the intersection between the xy plane of
### the water coordinate system and the lab coordinate system.
xx1_wat = H1_crds - O_crds
xx1_norm = np.linalg.norm(xx1_wat, axis=1)
xx1_wat = np.einsum('ij,i->ij', xx1_wat, 1./xx1_norm)
xx2_wat = H2_crds - O_crds
xx2_norm = np.linalg.norm(xx2_wat, axis=1)
xx2_wat = np.einsum('ij,i->ij', xx2_wat, 1./xx2_norm)
zz_wat = np.cross(xx1_wat, xx2_wat)
zz_norm = np.linalg.norm(zz_wat, axis=1)
zz_wat = np.einsum('ij,i->ij', zz_wat, 1./zz_norm)
yy_wat = np.cross(xx1_wat, zz_wat)
yy_norm = np.linalg.norm(yy_wat, axis=1)
yy_wat = np.einsum('ij,i->ij', yy_wat, 1./yy_norm)
N_xy = np.cross(zz, zz_wat)
N_xy_norm = np.linalg.norm(N_xy, axis=1)
N_xy = np.einsum('ij,i->ij', N_xy, 1./N_xy_norm)
### Angle theta
### Angle between zz-axis vector and z-axis of the lab coordinate system.
theta_dot = np.einsum('ij,j->i', zz_wat, zz)
### Angle phi
### Angle between line of nodes (xy lab frame / xy water frame) and xx-axis of lab frame
phi_dot = np.einsum('ij,j->i', N_xy, xx)
### Angle psi
### Angle between line of nodes (xy lab frame / xy water frame) and xx-axis in wat frame
psi_dot = np.einsum('ij,ij->i', xx1_wat, N_xy)
### dot products should be within [-1,+1]. However they might be slightly out of bounds (due to
### round-offs I guess). So bring them into bounds.
theta_dot_check_upper = np.where(theta_dot > 1. )
theta_dot_check_lower = np.where(theta_dot < -1. )
phi_dot_check_upper = np.where(phi_dot > 1. )
phi_dot_check_lower = np.where(phi_dot < -1. )
psi_dot_check_upper = np.where(psi_dot > 1. )
psi_dot_check_lower = np.where(psi_dot < -1. )
theta_dot[theta_dot_check_upper] = 1.
theta_dot[theta_dot_check_lower] = -1.
phi_dot[phi_dot_check_upper] = 1.
phi_dot[phi_dot_check_lower] = -1.
psi_dot[psi_dot_check_upper] = 1.
psi_dot[psi_dot_check_lower] = -1.
### Calculate angle from arccos function
theta[:] = np.arccos(theta_dot)
phi[:] = np.arccos(phi_dot)
psi[:] = np.arccos(psi_dot)
### We must flip some of the psi and phi angles. The problem is, that we cannot
### make a difference between -psi and +psi, which however are two physically
### different observations.
###
### Phi Correction
zz_tmp_labframe = np.cross(N_xy, xx)
zz_tmp_labframe_norm = np.linalg.norm(zz_tmp_labframe, axis=1)
zz_tmp_labframe = np.einsum('ij,i->ij', zz_tmp_labframe, 1./zz_tmp_labframe_norm)
zz_tmp_labframe_dot = np.einsum('ij,j->i', zz_tmp_labframe, zz)
phi[np.where(zz_tmp_labframe_dot < 0.)[0]] *= -1.
### Psi Correction
test1 = np.copy(psi)
zz_tmp_watframe = np.cross(N_xy, xx1_wat)
zz_tmp_watframe_norm = np.linalg.norm(zz_tmp_watframe, axis=1)
zz_tmp_watframe = np.einsum('ij,i->ij', zz_tmp_watframe, 1./zz_tmp_watframe_norm)
zz_tmp_watframe_dot = np.einsum('ij,ij->i', zz_tmp_watframe, zz_wat)
psi[np.where(zz_tmp_watframe_dot < 0.)[0]] *= -1.
theta *= RAD2DEG
phi *= RAD2DEG
psi *= RAD2DEG
def make_grid(arrays, out=None):
"""
!!! Adapted from:
!!! http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> make_grid(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
make_grid(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
def bounding_box_frac(frac_structure, delta=np.ones(3), _buffer=0., verbose=False):
"""
Input is structure in cart. or frac. coordinates as
nx3 array (n= number of coordinates).
Output is coordinate meshgrid array with coordinates of
bounding box lattice as integers.
"""
bounding_min = np.array( [ np.min(frac_structure[:,0]),
np.min(frac_structure[:,1]),
np.min(frac_structure[:,2]) ], dtype=int )
bounding_max = np.array( [ np.max(frac_structure[:,0]),
np.max(frac_structure[:,1]),
np.max(frac_structure[:,2]) ], dtype=int )
bounding_min -= int(np.round(_buffer))
bounding_max += int(np.round(_buffer))
if verbose:
print "Bounding min. ", bounding_min
print "Bounding max. ", bounding_max
print np.arange(bounding_min[2], bounding_max[2]+1, delta[2], dtype=int )
return make_grid ( [ np.arange(bounding_min[0], bounding_max[0]+1, delta[0], dtype=int ),
np.arange(bounding_min[1], bounding_max[1]+1, delta[1], dtype=int ),
np.arange(bounding_min[2], bounding_max[2]+1, delta[2], dtype=int ) ] )
def py_axis_paral(query, target, verbose=0):
vec_paral = np.zeros(3)
n_crds = query.shape[0]
n_comb = 0
for idx1 in range(n_crds):
for idx2 in range(n_crds):
if idx1 >= idx2:
continue
if verbose:
print( "Iteration (%d %d)" %(idx1, idx2))
v = query[idx2] - query[idx1]
v /= np.linalg.norm(v)
# First vector found gives reference orientation
if idx1 == 0 and idx2 == 1:
ref = v
else:
# Reverse vector if direction opposite to reference vector
if np.dot(ref, v) < 0:
v *= -1
vec_paral += v
n_comb += 1
vec_paral /= float(n_comb)
vec_paral /= np.linalg.norm(vec_paral)
target[0] = vec_paral[0]
target[1] = vec_paral[1]
target[2] = vec_paral[2]
def py_axis_ortho(query, target, verbose=0):
vec_ortho = np.zeros(3, dtype=float)
n_crds = query.shape[0]
n_comb = 0
for idx1 in range(n_crds):
for idx2 in range(n_crds):
if idx1 >= idx2:
continue
for idx3 in range(n_crds):
if idx2 >= idx3:
continue
v1 = query[idx3] - query[idx1]
v2 = query[idx2] - query[idx1]
n = np.cross(v1, v2)
n /= np.linalg.norm(n)
#First vector found gives reference orientation
if idx2 == 1 and idx3 == 2:
ref = n
else:
# Reverse vector if direction opposite to reference vector
if np.dot(ref, n) < 0:
n *= -1
vec_ortho += n
n_comb += 1
# Bionominal coefficient, this is "ziehen ohne zuruecklegen ohne beachtung der reihenfolge"
vec_ortho /= n_comb
vec_ortho /= np.linalg.norm(vec_ortho)
target[0] = vec_ortho[0]
target[1] = vec_ortho[1]
target[2] = vec_ortho[2]
def quaternion_symmetric_normed_difference(q1,q2):
__lower = np.linalg.norm(q1-q2, axis=-1)
__upper = np.linalg.norm(q1+q2, axis=-1)
return 2.*np.minimum(__lower, __upper)
def euler_difference2(x,y):
### x[0]: Theta
### x[1]: phi
### x[2]: Psi
### Euler coordinates must be in radian units!!
diff_crds = np.zeros_like(x)
diff_crds[0] = np.absolute(np.cos(x[0])-np.cos(y[0]))
__phi_diff_lower = np.absolute(x[1]-y[1])
__phi_diff_upper = 2.*np.pi - __phi_diff_lower
__psi_diff_lower = np.absolute(x[2]-y[2])
__psi_diff_upper = 2.*np.pi - __psi_diff_lower
diff_crds[1] = np.minimum(__phi_diff_lower, __phi_diff_upper)
diff_crds[2] = np.minimum(__psi_diff_lower, __psi_diff_upper)
return np.linalg.norm(diff_crds, axis=0)
| 31.206
| 123
| 0.579248
|
acfca671feed19c576f8ae324be3f60babc858f5
| 839
|
py
|
Python
|
views.py
|
gitter-badger/boyd_bot_messenger
|
1636e4373db5fac981903273ba196c3e45d8a0a5
|
[
"MIT"
] | null | null | null |
views.py
|
gitter-badger/boyd_bot_messenger
|
1636e4373db5fac981903273ba196c3e45d8a0a5
|
[
"MIT"
] | null | null | null |
views.py
|
gitter-badger/boyd_bot_messenger
|
1636e4373db5fac981903273ba196c3e45d8a0a5
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template
from flask_wtf import FlaskForm
from wtforms.validators import DataRequired
from wtforms import StringField, PasswordField, SubmitField, HiddenField
pages = Blueprint("pages", __name__, template_folder="templates")
class RegisterForm(FlaskForm):
reg_id = HiddenField("reg_id")
uni_id = StringField("University ID", validators=[DataRequired()])
uni_pw = PasswordField("Password", validators=[DataRequired()])
submit = SubmitField("Login")
@pages.route("/")
def index():
return render_template("index.html")
@pages.route("/privacy")
def privacy():
return render_template("privacy.html")
@pages.route("/terms")
def terms():
return render_template("terms.html")
@pages.app_errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
| 23.971429
| 72
| 0.743743
|
acfca7a0f9958283bd873b2b8b9e3e6b3cbc8d87
| 8,478
|
py
|
Python
|
setup.py
|
LEGEND-LX/utra-x-spahinx
|
bfd6255c9c950eb037169abc30020aa2f5c027b8
|
[
"BSD-2-Clause"
] | 2
|
2021-09-17T15:08:20.000Z
|
2021-10-01T16:59:44.000Z
|
setup.py
|
LEGEND-LX/java
|
dface7d2950ceef744256eb74b168a8f25e551ab
|
[
"BSD-2-Clause"
] | 1
|
2021-09-19T06:03:11.000Z
|
2021-09-19T06:03:29.000Z
|
setup.py
|
LEGEND-LX/java
|
dface7d2950ceef744256eb74b168a8f25e551ab
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import sys
from distutils import log
from io import StringIO
from setuptools import find_packages, setup
import sphinx
with open('README.rst') as f:
long_desc = f.read()
if sys.version_info < (3, 5):
print('ERROR: Sphinx requires at least Python 3.5 to run.')
sys.exit(1)
install_requires = [
'sphinxcontrib-applehelp',
'sphinxcontrib-devhelp',
'sphinxcontrib-jsmath',
'sphinxcontrib-htmlhelp',
'sphinxcontrib-serializinghtml',
'sphinxcontrib-qthelp',
'Jinja2>=2.3',
'Pygments>=2.0',
'docutils>=0.12',
'snowballstemmer>=1.1',
'babel>=1.3',
'alabaster>=0.7,<0.8',
'imagesize',
'requests>=2.5.0',
'setuptools',
'packaging',
]
extras_require = {
# Environment Marker works for wheel 0.24 or later
':sys_platform=="win32"': [
'colorama>=0.3.5',
],
'docs': [
'sphinxcontrib-websupport',
],
'lint': [
'flake8>=3.5.0',
'isort',
'mypy>=0.800',
'docutils-stubs',
],
'test': [
'pytest',
'pytest-cov',
'html5lib',
"typed_ast; python_version < '3.8'",
'cython',
],
}
# Provide a "compile_catalog" command that also creates the translated
# JavaScript files if Babel is available.
cmdclass = {}
class Tee:
def __init__(self, stream):
self.stream = stream
self.buffer = StringIO()
def write(self, s):
self.stream.write(s)
self.buffer.write(s)
def flush(self):
self.stream.flush()
try:
from json import dump
from babel.messages.frontend import compile_catalog
from babel.messages.pofile import read_po
except ImportError:
pass
else:
class compile_catalog_plusjs(compile_catalog):
"""
An extended command that writes all message strings that occur in
JavaScript files to a JavaScript file along with the .mo file.
Unfortunately, babel's setup command isn't built very extensible, so
most of the run() code is duplicated here.
"""
def run(self):
try:
sys.stderr = Tee(sys.stderr)
compile_catalog.run(self)
finally:
if sys.stderr.buffer.getvalue():
print("Compiling failed.")
sys.exit(1)
if isinstance(self.domain, list):
for domain in self.domain:
self._run_domain_js(domain)
else:
self._run_domain_js(self.domain)
def _run_domain_js(self, domain):
po_files = []
js_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.po')))
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.js'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES',
domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
js_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
domain + '.js'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
js_files.append(self.output_file)
else:
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.js'))
for js_file, (locale, po_file) in zip(js_files, po_files):
with open(po_file, encoding='utf8') as infile:
catalog = read_po(infile, locale)
if catalog.fuzzy and not self.use_fuzzy:
continue
log.info('writing JavaScript strings in catalog %r to %r',
po_file, js_file)
jscatalog = {}
for message in catalog:
if any(x[0].endswith(('.js', '.js_t', '.html'))
for x in message.locations):
msgid = message.id
if isinstance(msgid, (list, tuple)):
msgid = msgid[0]
jscatalog[msgid] = message.string
with open(js_file, 'wt', encoding='utf8') as outfile:
outfile.write('Documentation.addTranslations(')
dump({
'messages': jscatalog,
'plural_expr': catalog.plural_expr,
'locale': str(catalog.locale)
}, outfile, sort_keys=True, indent=4)
outfile.write(');')
cmdclass['compile_catalog'] = compile_catalog_plusjs
setup(
name='Sphinx',
version=sphinx.__version__,
url='http://sphinx-doc.org/',
download_url='https://pypi.org/project/Sphinx/',
license='BSD',
author='Georg Brandl',
author_email='georg@python.org',
description='Python documentation generator',
long_description=long_desc,
long_description_content_type='text/x-rst',
project_urls={
"Code": "https://github.com/sphinx-doc/sphinx",
"Issue tracker": "https://github.com/sphinx-doc/sphinx/issues",
},
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Setuptools Plugin',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'Framework :: Sphinx :: Theme',
'Topic :: Documentation',
'Topic :: Documentation :: Sphinx',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Printing',
'Topic :: Software Development',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Markup',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: LaTeX',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(exclude=['tests', 'utils']),
package_data = {
'sphinx': ['py.typed'],
},
include_package_data=True,
entry_points={
'console_scripts': [
'sphinx-build = sphinx.cmd.build:main',
'sphinx-quickstart = sphinx.cmd.quickstart:main',
'sphinx-apidoc = sphinx.ext.apidoc:main',
'sphinx-autogen = sphinx.ext.autosummary.generate:main',
],
'distutils.commands': [
'build_sphinx = sphinx.setup_command:BuildDoc',
],
},
python_requires=">=3.5",
install_requires=install_requires,
extras_require=extras_require,
cmdclass=cmdclass,
)
| 33.509881
| 80
| 0.522293
|
acfca83ac6cd5ef23c4e8fa2f2b656018ee59441
| 1,448
|
py
|
Python
|
api/anubis/lms/forum.py
|
Racheltrq/Anubis
|
20eabe5651cee4ca5dc2f2b9bb531724aad1cf37
|
[
"MIT"
] | null | null | null |
api/anubis/lms/forum.py
|
Racheltrq/Anubis
|
20eabe5651cee4ca5dc2f2b9bb531724aad1cf37
|
[
"MIT"
] | null | null | null |
api/anubis/lms/forum.py
|
Racheltrq/Anubis
|
20eabe5651cee4ca5dc2f2b9bb531724aad1cf37
|
[
"MIT"
] | null | null | null |
from typing import List
from anubis.models import (
db,
User,
Course,
InCourse,
ForumPost,
ForumCategory,
ForumPostInCategory,
ForumPostUpvote,
ForumPostComment,
ForumPostViewed,
)
from anubis.utils.auth.user import current_user
from anubis.utils.http import req_assert
def verify_post(post_id: str) -> ForumPost:
post: ForumPost = ForumPost.query.filter(
ForumPost.id == post_id,
).first()
req_assert(post is not None, message='Post does not exist')
return post
def verify_post_owner(post_id: str) -> ForumPost:
post: ForumPost = verify_post(post_id)
req_assert(post.owner_id == current_user.id, message='Post does not exist')
return post
def verify_post_comment(comment_id: str) -> ForumPostComment:
comment: ForumPostComment = ForumPostComment.query.filter(
ForumPostComment.id == comment_id,
).first()
req_assert(comment is not None, message='Comment does not exist')
return comment
def verify_post_comment_owner(comment_id: str) -> ForumPostComment:
comment: ForumPostComment = verify_post_comment(comment_id)
req_assert(comment.owner_id == current_user.id, message='Comment does not exist')
return comment
def get_post_comments(post: ForumPost) -> List[ForumPostComment]:
comments: List[ForumPostComment] = ForumPostComment.query.filter(
ForumPostComment.post_id == post.id,
).all()
return comments
| 27.846154
| 85
| 0.724448
|
acfca979dacd8bdda6ecd5367d32052116111451
| 544
|
py
|
Python
|
backend/blog_backend/manage.py
|
bluemonkey9241/django-django-react1
|
a2219b08f9d33452a82954b8a40c02621778f917
|
[
"MIT"
] | 62
|
2019-03-19T02:03:16.000Z
|
2022-03-17T06:51:08.000Z
|
backend/blog_backend/manage.py
|
bluemonkey9241/django-django-react1
|
a2219b08f9d33452a82954b8a40c02621778f917
|
[
"MIT"
] | 15
|
2020-07-20T08:55:52.000Z
|
2022-03-15T20:14:05.000Z
|
backend/blog_backend/manage.py
|
bluemonkey9241/django-django-react1
|
a2219b08f9d33452a82954b8a40c02621778f917
|
[
"MIT"
] | 37
|
2018-08-17T15:55:50.000Z
|
2022-02-24T16:58:01.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog_backend.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34
| 76
| 0.689338
|
acfcab52f8088182a6067ce7f8ce5e21df55115d
| 1,178
|
py
|
Python
|
PYTHON/FCFS.py
|
kunalmpandey/open-source-contribution
|
7ad65c40720cab0b0b80d466af9dc276cf235686
|
[
"MIT"
] | 2
|
2022-03-10T17:37:24.000Z
|
2022-03-10T17:40:05.000Z
|
PYTHON/FCFS.py
|
kunalmpandey/open-source-contribution
|
7ad65c40720cab0b0b80d466af9dc276cf235686
|
[
"MIT"
] | null | null | null |
PYTHON/FCFS.py
|
kunalmpandey/open-source-contribution
|
7ad65c40720cab0b0b80d466af9dc276cf235686
|
[
"MIT"
] | 1
|
2021-12-29T16:13:52.000Z
|
2021-12-29T16:13:52.000Z
|
from prettytable import PrettyTable
x = PrettyTable()
pid = list(map(int, input("Enter process ids: ").split()))
arrival = list(map(int, input("Enter arrival time: ").split()))
burst = list(map(int, input("Enter the burst time: ").split()))
for i in range(len(pid)-1):
for j in range(i, len(pid)):
if arrival[j] < arrival[i]:
pid[j], pid[i] = pid[i], pid[j]
arrival[j], arrival[i] = arrival[i], arrival[j]
burst[j], burst[i] = burst[i], burst[j]
ct = 0
wt = [0]*len(pid)
for i in range(len(pid)):
if arrival[i] <= ct:
wt[i] = ct - arrival[i]
ct = ct + burst[i]
else:
wt[i] = 0
ct = arrival[i] + burst[i]
# wt = [0]
# for i in range(len(pid)-1):
# wt.append(wt[i] + burst[i])
tt = []
for i in range(len(pid)):
tt.append(wt[i] + burst[i])
x.field_names = ["Process id", "Burst time", "Waiting time", "Turnaround time"]
for i in range(len(pid)):
x.add_row([pid[i], burst[i], wt[i], tt[i]])
print(x)
avg_wt = sum(wt)/len(pid)
print("Average waiting time:", avg_wt)
avg_tt = sum(tt)/len(pid)
print("Average turn around time:", avg_tt)
| 28.047619
| 80
| 0.55348
|
acfcacb1300baff6da3c5f91b3e800e9f145db79
| 295
|
py
|
Python
|
collectors/admin.py
|
zxyctn/PhenObs
|
c5ed2e2fdd6a1bee5085c1336dfba31bf9e6abdf
|
[
"BSD-3-Clause"
] | null | null | null |
collectors/admin.py
|
zxyctn/PhenObs
|
c5ed2e2fdd6a1bee5085c1336dfba31bf9e6abdf
|
[
"BSD-3-Clause"
] | 44
|
2021-10-19T15:59:57.000Z
|
2022-03-23T14:39:30.000Z
|
collectors/admin.py
|
zxyctn/PhenObs
|
c5ed2e2fdd6a1bee5085c1336dfba31bf9e6abdf
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from .models import Collector
class CollectorAdmin(admin.ModelAdmin):
list_display = (
"id",
"user_id",
"gardens",
)
search_fields = ("id", "user_id")
list_per_page = 25
admin.site.register(Collector, CollectorAdmin)
| 17.352941
| 46
| 0.650847
|
acfcace9fcf0f65600410a008570a11362682443
| 3,838
|
py
|
Python
|
src/pallas/csv.py
|
akamai/pallas
|
63aba588be3ab97b5e6183e4c560a45c82870cdc
|
[
"Apache-2.0"
] | 7
|
2020-04-03T19:40:35.000Z
|
2022-01-03T17:28:58.000Z
|
src/pallas/csv.py
|
akamai/pallas
|
63aba588be3ab97b5e6183e4c560a45c82870cdc
|
[
"Apache-2.0"
] | 1
|
2022-01-05T13:29:18.000Z
|
2022-01-05T13:29:18.000Z
|
src/pallas/csv.py
|
akamai/pallas
|
63aba588be3ab97b5e6183e4c560a45c82870cdc
|
[
"Apache-2.0"
] | 1
|
2020-10-23T11:31:54.000Z
|
2020-10-23T11:31:54.000Z
|
# Copyright 2020 Akamai Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CSV reading and writing.
Athena uses a custom CSV format:
- All values are quoted. Quotes in values are doubled.
- Empty unquoted strings denotes a missing values.
Unfortunately, the format cannot be parsed using Python builtin CSV module.
"""
from __future__ import annotations
import io
import re
from typing import Iterable, Iterator, Optional, Sequence, TextIO
CSVValue = Optional[str]
CSVRow = Sequence[CSVValue]
control_re = re.compile(r'\n|,|"')
quote_re = re.compile(r'"')
def _encode_value(value: CSVValue) -> str:
if value is None:
return ""
quoted = value.replace('"', '""')
return f'"{quoted}"'
def _decode_value(raw: str) -> CSVValue:
if raw == "":
return None
if not (raw.startswith('"') and raw.endswith('"')):
raise ValueError(f"Value not quoted: {raw}")
parts = raw[1:-1].split('""')
if any('"' in part for part in parts):
raise ValueError(f"Invalid quoting: {raw}")
return '"'.join(parts)
def _tokenize(stream: TextIO) -> Iterator[tuple[str, str]]:
"""
Tokenize CSV input.
Yields (value, control) pairs, where:
- Value is raw field values, including all quoting.
- Control is one of:
"," (field separator), "\n" (line separator), "" (end of file).
"""
buffer = "" # Chunk of text read from the stream.
pos = 0 # Position in the buffer.
value = "" # Last field, possibly read in multiple chunks.
quoted = False # True when inside quoted value.
while True:
if not pos < len(buffer):
buffer = stream.read(io.DEFAULT_BUFFER_SIZE)
pos = 0
if not buffer:
break # End of file
pattern = quote_re if quoted else control_re
match = pattern.search(buffer, pos)
if match:
start, end = match.span()
value += buffer[pos:start]
control = buffer[start:end]
pos = end
else:
value += buffer[pos:]
control = ""
pos = len(buffer)
if control == '"':
value += control
quoted = not quoted
elif control:
yield value, control
value = ""
if quoted:
raise ValueError("Unterminated quoted value.")
yield value, ""
def read_csv(stream: TextIO) -> Iterator[CSVRow]:
"""
Read CSV using format that Athena uses.
All values are quoted (quotes are doubled).
Empty unquoted string denotes ``None``.
:param stream: readable file-like
:return: sequence of records
"""
row = []
for value, control in _tokenize(stream):
if control:
row.append(_decode_value(value))
if control == "\n":
yield tuple(row)
row = []
if row:
raise ValueError("Missing trailing newline.")
def write_csv(data: Iterable[CSVRow], stream: TextIO) -> None:
"""
Write CSV using format that Athena uses.
All values are quoted (quotes are doubled).
Empty unquoted string denotes ``None``.
:param data: sequence of records
:param stream: writable file-like
"""
for row in data:
line = ",".join(_encode_value(v) for v in row)
stream.write(line + "\n")
| 28.857143
| 75
| 0.620375
|
acfcacf5ce9eba1e421ff59ec538f54d9d013c4d
| 2,388
|
py
|
Python
|
new/move.py
|
roboapex/nrc2021-fe-hexacore
|
a17e46c58fd9367084555eac86a6ba373003fc71
|
[
"MIT"
] | 1
|
2021-07-16T07:30:43.000Z
|
2021-07-16T07:30:43.000Z
|
new/move.py
|
roboapex/nrc2021-fe-hexacore
|
a17e46c58fd9367084555eac86a6ba373003fc71
|
[
"MIT"
] | null | null | null |
new/move.py
|
roboapex/nrc2021-fe-hexacore
|
a17e46c58fd9367084555eac86a6ba373003fc71
|
[
"MIT"
] | null | null | null |
# Python Script
# https://www.electronicshub.org/raspberry-pi-l298n-interface-tutorial-control-dc-motor-l298n-raspberry-pi/
# https://www.youtube.com/watch?v=2bganVdLg5Q
import RPi.GPIO as GPIO
from time import sleep
in1 = 24
in2 = 23
in3 = 17
in4 = 27
en2 = 22
en1 = 25
temp1=1
GPIO.setmode(GPIO.BCM)
GPIO.setup(in1,GPIO.OUT)
GPIO.setup(in2,GPIO.OUT)
GPIO.setup(en1,GPIO.OUT)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
GPIO.setup(in3,GPIO.OUT)
GPIO.setup(in4,GPIO.OUT)
GPIO.setup(en2,GPIO.OUT)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
p=GPIO.PWM(en1,1000)
p2=GPIO.PWM(en2,1000)
p.start(25)
p2.start(22)
print("\n")
print("The default speed & direction of motor is LOW & Forward.....")
print("r-run s-stop f-forward b-backward l-low m-medium h-high e-exit")
print("\n")
while(1):
x=input()
if x=='r':
print("run")
if(temp1==1):
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
print("forward")
x='z'
else:
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.HIGH)
print("backward")
x='z'
elif x=='s':
print("stop")
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
x='z'
elif x=='f':
print("forward")
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
temp1=1
x='z'
elif x=='b':
print("backward")
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.HIGH)
temp1=0
x='z'
elif x=='l':
print("low")
p.ChangeDutyCycle(25)
x='z'
elif x=='m':
print("medium")
p.ChangeDutyCycle(50)
x='z'
elif x=='h':
print("high")
p.ChangeDutyCycle(75)
p2.ChangeDutyCycle(75)
x='z'
elif x=='e':
GPIO.cleanup()
print("GPIO Clean up")
break
else:
print("<<< wrong data >>>")
print("please enter the defined data to continue.....")
| 21.908257
| 107
| 0.563233
|
acfcad1afbc4195575f3b1c885becc6f4c84380d
| 29,194
|
py
|
Python
|
code/network/network_creation_functions.py
|
JanaLasser/uni_SEIRX
|
9c4f16924ba78c85c7621b192f5c5f00ddd473b9
|
[
"MIT"
] | 1
|
2021-12-05T23:58:12.000Z
|
2021-12-05T23:58:12.000Z
|
code/network/network_creation_functions.py
|
JanaLasser/uni_SEIRX
|
9c4f16924ba78c85c7621b192f5c5f00ddd473b9
|
[
"MIT"
] | null | null | null |
code/network/network_creation_functions.py
|
JanaLasser/uni_SEIRX
|
9c4f16924ba78c85c7621b192f5c5f00ddd473b9
|
[
"MIT"
] | null | null | null |
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime
from os.path import join
semester_start = '2019-10-01'
semester_end = '2020-02-28'
study_map = {
'Bachelorstudium' : 'bachelor',
'Masterstudium' : 'master',
'Dr.-Studium d.technischen Wissenschaften' : 'PhD',
'Bachelorstudium Lehramt Sek (AB)' : 'bachelor',
'Erweiterungsstudium Bachelor (Sek. AB)' : 'bachelor',
'Besuch einzelner Lehrveranstaltungen' : 'non-degree programme',
'Universitätslehrgang' : 'non-degree programme',
'Dr.-Studium der Naturwissenschaften' : 'PhD',
'Lehramtsstudium' : 'bachelor & master',
'Humanmedizin' : 'bachelor & master',
'Doktoratsstudium' : 'PhD',
'Masterstudium Lehramt Sek (AB)' : 'master',
'Individuelles Masterstudium' : 'master',
'Dr.-Studium d.montanist. Wissenschaften' : 'PhD',
'Erweiterungsstudium' : 'bachelor & master',
'Rechtswissenschaften' : 'bachelor & master',
'PhD-Studium (Doctor of Philosophy)' : 'PhD',
'Masterstudium Übersetzen' : 'master',
'Individuelles Bachelorstudium' : 'bachelor',
'Dr.-Studium der Philosophie' : 'PhD',
'Bachelorst.Transkulturelle Kommunikation' : 'bachelor',
'Masterst. Übersetzen u.Dialogdolmetschen' : 'master',
'Dr.-Studium d.Sozial- u.Wirtschaftswiss.' : 'PhD',
'Erweiterungsstudium Master (Sek. AB)' : 'master',
'Erweiterungsstudium gemäß § 54c UG' : 'bachelor',
'Dr.-Studium der angew. med. Wissenschaft' : 'PhD',
'Dr.-Studium der Bodenkultur' : 'PhD',
'Bühnengestaltung' : 'bachelor & master',
'Pharmazie' : 'bachelor & master',
'Doctor of Philosophy-Doktoratsstudium' : 'PhD',
'Dr.-Studium der medizin. Wissenschaft' : 'PhD',
'Individuelles Diplomstudium' : 'bachelor & master',
'Maschinenbau' : 'bachelor & master'
}
def get_study_data(study_id, studies, students, lecturers, groups, rooms,
dates):
study_data = studies[['study_id', 'study_name']]\
.drop_duplicates()\
.set_index('study_id')
print('data for study {} ({})'\
.format(study_id, study_data.loc[study_id, 'study_name']))
# take all students from the sample study
sample_student_ids = studies[studies['study_id'] == study_id]\
['student_id'].unique()
# remove all the students that are not in the student data
sample_student_ids = list(set(sample_student_ids)\
.intersection(students['student_id']))
sample_students = students[students['student_id'].isin(sample_student_ids)]
print('\tthe study has {}/{} students'\
.format(len(sample_student_ids), len(students['student_id'].unique())))
# take all lectures in which the sample students participate
sample_lecture_ids = sample_students['lecture_id'].unique()
print('\tthe students participate in {}/{} available lectures'\
.format(len(sample_lecture_ids), len(students['lecture_id'].unique())))
# take all groups that are part of the sample lectures
sample_group_ids = sample_students['group_id'].unique()
print('\tthe lectures have {} groups and the sample students participate in {} of them'\
.format(len(groups[groups['lecture_id'].isin(sample_lecture_ids)]),
len(sample_group_ids)))
# take all lecturers which are teaching the sample groups
sample_lecturers = lecturers[lecturers['group_id'].isin(sample_group_ids)]
sample_lecturer_ids = sample_lecturers['lecturer_id'].unique()
print('\tthe groups are taught by {}/{} of the available lecturers'\
.format(len(sample_lecturer_ids), len(lecturers['lecturer_id'].unique())))
# take all rooms that sample groups are taught in
sample_room_ids = dates[dates['group_id']\
.isin(sample_group_ids)]['room_id'].unique()
print('\tthe groups are taught in {}/{} of the available rooms'\
.format(len(sample_room_ids), len(rooms['room_id'].unique())))
return (sample_student_ids, sample_students, sample_lecture_ids,
sample_group_ids, sample_lecturers, sample_lecturer_ids,
sample_room_ids)
def add_students(G, student_df, studies_df):
existing_students = set([n for n, data in G.nodes(data=True) if \
data['type'] == 'unistudent'])
new_students = set(student_df['student_id']).difference(existing_students)
student_ids = list(new_students)
student_df = student_df[student_df['student_id'].isin(new_students)]
studies_df = studies_df.set_index(['student_id', 'study_id'])
print('\tadding {} students'.format(len(student_ids)))
# Students can have more than one study. Find a student's main study
# by looking at the study id of the individual lectures they visit.
# A student's main study in the given semester is the study from which
# the majority of their lectures stems.
lecture_counts = student_df[['student_id', 'study_id', 'lecture_id']]\
.groupby(by=['student_id', 'study_id'])\
.agg('count')\
.rename(columns={'lecture_id':'lecture_count'})\
.sort_values(by='lecture_count', ascending=False)\
.reset_index()
main_studies = lecture_counts[['student_id', 'study_id']]\
.drop_duplicates(subset=['student_id'])\
.set_index('student_id')
# add information whether the student is a TU Graz or NaWi student
study_labels = pd.read_csv(join('../data/cleaned', 'study_labels.csv'))
label_map = {row['study_id']:row['study_label'] for i, row in \
study_labels.iterrows()}
main_studies['study_label'] = main_studies['study_id']\
.replace(label_map)
no_study_found = 0
for student_id in student_ids:
# get the main study and the term number for the main study
main_study = main_studies.loc[student_id, 'study_id']
study_type = main_studies.loc[student_id, 'study_label']
try:
term = studies_df.loc[student_id, main_study]['term_number']
except KeyError:
no_study_found += 1
term = np.nan
# add the student as a node to the network and all information
# we have for the student as node attributes.
# Note: the attribute "unit" is a meaningless artifact that we
# need to include to satisfy the design conditions of the contact
# network for the agent based simulation
G.add_node(student_id)
nx.set_node_attributes(G, {student_id:{
'type':'unistudent',
'main_study':main_study,
'study_type':study_type,
'term':term,
'unit':1}
})
print('\tno study found for {} students'.format(no_study_found))
def add_students_dummy(G, student_df):
existing_students = set([n for n, data in G.nodes(data=True) if \
data['type'] == 'unistudent'])
new_students = set(student_df['student_id']).difference(existing_students)
student_ids = list(new_students)
print('\tadding {} students'.format(len(student_ids)))
for student_id in student_ids:
G.add_node(student_id)
nx.set_node_attributes(G, {student_id:{'type':'unistudent'}})
def add_lecturers(G, lecturer_df, organisation_df):
existing_lecturers = set([n for n, data in G.nodes(data=True) if \
data['type'] == 'lecturer'])
new_lecturers = set(lecturer_df['lecturer_id']).difference(existing_lecturers)
lecturer_ids = list(new_lecturers)
print('\tadding {} lecturers'.format(len(lecturer_ids)))
# TODO: map units to camspuses
for lecturer_id in lecturer_ids:
G.add_node(lecturer_id)
orgs = organisation_df[organisation_df['lecturer_id'] == lecturer_id]\
.set_index('lecturer_id')
nx.set_node_attributes(G, {lecturer_id:{
'type':'lecturer',
'organisations':list(orgs.index),
'unit':1}
})
def add_lecturers_dummy(G, lecturer_df):
existing_lecturers = set([n for n, data in G.nodes(data=True) if \
data['type'] == 'lecturer'])
new_lecturers = set(lecturer_df['lecturer_id']).difference(existing_lecturers)
lecturer_ids = list(new_lecturers)
print('\tadding {} lecturers'.format(len(lecturer_ids)))
for lecturer_id in lecturer_ids:
G.add_node(lecturer_id)
nx.set_node_attributes(G, {lecturer_id:{'type':'lecturer'}})
def link_event_members(G, group1, group2, wd, day, duration, event_type,
lecture_type, link_type):
# student <-> student contacts
edge_keys = []
for n1 in group1:
for n2 in group2:
tmp = [n1, n2]
tmp.sort()
n1, n2 = tmp
key = '{}{}d{}'.format(n1, n2, wd)
# no self-loops
if n1 != n2:
# if edge hasn't already been counted for this lecture/group
if key not in edge_keys:
# if the students already have a connection on the same
# day through a different lecture or group
if G.has_edge(n1, n2, key=key):
G[n1][n2][key]['duration'] += duration
# if the edge is completely new
else:
G.add_edge(n1, n2, \
link_type = link_type,
event_type = event_type,
lecture_type = lecture_type,
day = day,
weekday = wd,
duration = duration,
key = key)
edge_keys.append(key)
def get_event_information(event_dates, events, students, lecturers, rooms, frac,
event_type):
assert len(event_dates) == 1
id_name = '{}_id'.format(event_type)
event_id = event_dates[id_name].values[0]
# figure out for how long [minutes] the event went on
duration = event_dates['duration'].values[0]
# figure out which lecture type the event belongs to
lecture_type = events[events["lecture_id"] == \
event_dates["lecture_id"].values[0]]["lecture_type"].values[0]
students_in_event = students[\
students[id_name] == event_id]['student_id'].unique()
lecturers_in_event = lecturers[\
lecturers[id_name] == event_id]['lecturer_id'].unique()
# if we do not allow for overbooking, we remove excess students that
# surpass the room's capacity, even if 100% occupancy is allowed
if frac != 'overbooked':
# figure out which room the group was taught in and how many seats
# that room has
room = event_dates['room_id'].values[0]
seats = rooms[rooms['room_id'] == room]['seats']
if len(seats) == 0 or seats.values[0] != seats.values[0]:
seats = np.nan
print('no seat information for room {} found'.format(room))
else:
seats = seats.values[0]
# remove a fraction of students from the lecture rooms
if seats == seats: # nan-check
# the number of students to remove is the difference between the
# students that signed up for the lecture and the capacity of the
# room, calculated as it's total capacity multiplied by an occupancy
# fraction
available_seats = int(np.floor((frac * seats)))
else:
available_seats = seats
students_to_remove = max(0, len(students_in_event) - available_seats)
if students_to_remove > 0:
print('removing {}/{} students from room with {:1.0f} seats (occupancy {:1.0f}%)'\
.format(students_to_remove, len(students_in_event),
seats, frac * 100))
students_in_event = np.random.choice(students_in_event,
len(students_in_event) - students_to_remove, replace=False)
return students_in_event, lecturers_in_event, duration, lecture_type
def link_event(G, students_in_event, lecturers_in_event, wd, day, duration,
event_type, lecture_type):
# student <-> student contacts
link_event_members(G, students_in_event, students_in_event, wd, day,
duration, event_type, lecture_type, 'student_student')
# student <-> lecturer contacts
link_event_members(G, students_in_event, lecturers_in_event, wd, day,
duration, event_type, lecture_type, 'student_lecturer')
# lecturer <-> lecturer contacts
link_event_members(G, lecturers_in_event, lecturers_in_event, wd, day,
duration, event_type, lecture_type, 'lecturer_lecturer')
def add_event_contacts(G, students, lecturers, events, dates, rooms, day, frac,
event_type):
wd = get_weekday(day)
day = str(day)
id_name = '{}_id'.format(event_type)
print(id_name)
new_id_name = 'new_{}_id'.format(event_type)
assert event_type in ['group', 'exam'], print('unexpected event encountered!')
day_dates = dates[dates['date'] == pd.to_datetime(day)]
event_ids = set(events[id_name])\
.intersection(set(dates[id_name]))
# most of the following complicated logic is due to the fact that [event]_id
# is not unique for different dates on the same day. There can be multiple
# instances of the same event (i.e. same ID) happen on the same day either
# at the same time or different times (hopefully not a combination). These
# cases need to be dealt with differently, to ensure no students or
# lecturers are cloned
for event_id in event_ids:
# identify all dates on a given day associated with a given event id
event_dates = day_dates[day_dates[id_name] == event_id]
# event did not take place on the given day
if len(event_dates) == 0:
pass
# simple case: one event ID is associated with one date
elif len(event_dates) == 1:
students_in_event, lecturers_in_event, duration, lecture_type = \
get_event_information(event_dates, events, students, lecturers,
rooms, frac, event_type)
link_event(G, students_in_event, lecturers_in_event, wd, day,
duration, event_type, lecture_type)
# multiple dates for a single event but all start at different times:
# assume that the same students went to all dates and add contact
# durations for all dates accordingly
elif len(event_dates.drop_duplicates(subset=['start_time'])) == \
len(event_dates):
for start_time in event_dates['start_time']:
sub_event_dates = event_dates[event_dates['start_time'] == \
start_time]
students_in_event, lecturers_in_event, duration, lecture_type = \
get_event_information(sub_event_dates, events, students,
lecturers, rooms, frac, event_type)
link_event(G, students_in_event, lecturers_in_event, wd, day,
duration, event_type, lecture_type)
# multiple dates for a single event but all start at the same time and
# in different rooms:
# split the students and lecturers into sub-events. This has already
# happened, the sub-event IDs are stored in the dates, students and
# lecturers data frames as "new_[event]_id", which is np.nan if there are
# no sub-events. We use these splits to distribute the students and
# lecturers to different rooms and only create contacts within these
# rooms
elif (len(event_dates.drop_duplicates(subset=['start_time'])) == 1) and \
(len(event_dates.drop_duplicates(subset=['room_id'])) == \
len(event_dates)):
# make sure every one of the sub-events as a new ID
assert len(event_dates) == len(event_dates[new_id_name].unique()),\
'not enough sub-events for {} {} on day {}' .format(event, event_id, day)
for room in event_dates['room_id']:
sub_event_dates = event_dates[event_dates['room_id'] == \
room].copy()
sub_event_dates = sub_event_dates\
.drop(columns=[id_name])\
.rename(columns={new_id_name:id_name})
students_in_event, lecturers_in_event, duration, lecture_type = \
get_event_information(sub_event_dates, events, students,
lecturers, rooms, frac, event_type)
link_event(G, students_in_event, lecturers_in_event, wd, day,
duration, event_type, lecture_type)
# some dates are at the same time, some at different times
elif (len(event_dates.drop_duplicates(subset=['start_time'])) > 1) and \
(len(event_dates.drop_duplicates(subset=['start_time'])) < \
len(event_dates)):
print('Dealing with edge case: {} {} on {}'\
.format(event_type, event_id, day))
# check if some dates are completely contained within other dates
to_drop = []
for n, row1 in event_dates.iterrows():
start_time1 = row1['start_time']
duration1 = row1['duration']
end_time1 = row1['end_time']
for m, row2 in event_dates.iterrows():
start_time2 = row2['start_time']
duration2 = row2['duration']
end_time2 = row2['end_time']
if (n != m) and (duration1 < duration2) and \
(start_time1 >= start_time2) and \
(end_time1 <= end_time2):
to_drop.append(n)
to_drop = list(set(to_drop))
for index in to_drop:
event_dates = event_dates.drop(index)
# split into events at the same time but in different rooms and
# events that start at different times
for start_time in event_dates['start_time'].unique():
tmp_event_dates = event_dates[event_dates['start_time'] == \
start_time].copy()
# events that occur at the same time
if len(tmp_event_dates) > 1:
# make sure that events starting at the same time occur in
# different rooms
assert len(tmp_event_dates['room_id'].unique()) > 1
tmp_event_dates = tmp_event_dates\
.drop(columns=[id_name])\
.rename(columns={new_id_name:id_name})
# iterate over all locations at which the event occurs
for room in tmp_event_dates['room_id']:
sub_event_dates = tmp_event_dates[\
tmp_event_dates['room_id'] == room].copy()
students_in_event, lecturers_in_event, duration, \
lecture_type = \
get_event_information(sub_event_dates, events, students,
lecturers, rooms, frac, event_type)
link_event(G, students_in_event, lecturers_in_event, wd,
day, duration, event_type, lecture_type)
# evebts that occur at a different time
else:
students_in_event, lecturers_in_event, duration, lecture_type = \
get_event_information(tmp_event_dates, events, students, \
lecturers, rooms, frac, event_type)
link_event(G, students_in_event, lecturers_in_event, wd,
day, duration, event_type, lecture_type)
else:
print('something happened that I didnt think could happen!')
def add_unistudent_contacts(G, level):
print('{:1.1f}% additional contacts between students'.format(level * 100))
students = [n[0] for n in G.nodes(data=True) if \
n[1]['type'] == 'unistudent']
weekdays = {1:'Monday', 2:'Tuesday', 3:'Wednesday', 4:'Thursday',
5:'Friday', 6:'Saturday', 7:'Sunday'}
for wd in range(1, len(weekdays) + 1):
print()
# all edges between students on a given weekday
keys = [e[2] for e in G.edges(keys=True, data=True) if \
(e[3]['weekday'] == wd) and \
(e[3]['link_type'] in ['student_student',\
'student_student_additional'])]
# we assume that the network only contains one week worth of data.
# therefore, there should only be one date associated with each weekday
days = set([e[2]['day'] for e in G.edges(data=True) if \
e[2]['weekday'] == wd])
assert len(days) == 1
day = list(days)[0]
# calculate the number of additional links to add as fraction of the
# existing links
N_unistudent_edges = len(keys)
N_additional_links = round(level * N_unistudent_edges)
print('\t{}: adding {} additional links between students'\
.format(weekdays[wd], N_additional_links))
# add new links to the network between randomly chosen students (i.e.
# no dependence on campus, study or semester). If we pick the same
# student twice (as link source and target) or if the link already
# exists, we redraw nodes.
for i in range(N_additional_links):
s1 = -1
s2 = -1
while s1 == s2 or key in keys:
s1 = np.random.choice(students)
s2 = np.random.choice(students)
tmp = [s1, s2]
tmp.sort()
s1, s2 = tmp
key = '{}{}d{}'.format(s1, s2, wd)
# add new link to the network
G.add_edge(s1, s2, \
link_type = 'student_student_additional',
day = day,
weekday = wd,
group = np.nan,
key = key)
keys.append(key)
def remove_unistudent_contacts(G, level):
print('remove {:1.1f}% contacts between students'.format(level * 100))
students = [n[0] for n in G.nodes(data=True) if \
n[1]['type'] == 'unistudent']
weekdays = {1:'Monday', 2:'Tuesday', 3:'Wednesday', 4:'Thursday',
5:'Friday', 6:'Saturday', 7:'Sunday'}
for wd in range(1, len(weekdays) + 1):
print()
# all edges between students on a given weekday
edges = np.asarray([(e[0], e[1], e[2]) for e in \
G.edges(keys=True, data=True) if \
(e[3]['weekday'] == wd) and \
(e[3]['link_type'] == 'student_student_group')])
N_unistudent_edges = len(edges)
N_edges_to_remove = round(level * N_unistudent_edges)
print('\t{}: removing {} links between students'\
.format(weekdays[wd], N_edges_to_remove))
edges_to_remove_idx = np.random.choice(range(len(edges)),
size=N_edges_to_remove, replace=False)
edges_to_remove = edges[edges_to_remove_idx]
# since numpy arrays convert all contents to objects if one of the
# contained data type is an object (i.e. the key, which is a str), we
# need to convert the edge IDs into integers before we can remove them
# from the graph
edges_to_remove = [[int(e[0]), int(e[1]), e[2]] for e in edges_to_remove]
G.remove_edges_from(edges_to_remove)
def create_single_day_network(students, lecturers, studies, organisations,
groups, dates, rooms, day, frac=1):
G = nx.MultiGraph()
add_students(G, students, studies)
add_lecturers(G, lecturers, organisations)
add_group_contacts(G, students, lecturers, groups, dates, rooms, day,
frac)
return G
def create_network(students, lecturers, studies, organisations, groups, dates,
rooms, days, estudents, electurers, exams, edates, frac=1):
G = nx.MultiGraph()
# add students from lecture data
print('lectures')
add_students(G, students, studies)
add_lecturers(G, lecturers, organisations)
# add additional students from exam data
print('exams')
add_students(G, estudents, studies)
add_lecturers(G, electurers, organisations)
for day in days:
# add connections between students and lecturers that occur in exams
add_event_contacts(G, estudents, electurers, exams, edates, rooms, day,
frac, 'exam')
# add connections between students and lecturers that occur in lectures
add_event_contacts(G, students, lecturers, groups, dates, rooms, day,
frac, 'group')
return G
def map_contacts(G, contact_map, N_weekdays=7):
for wd in range(1, N_weekdays + 1):
for n1, n2, day in [(n1, n2, data['day']) \
for (n1, n2, data) in G.edges(data=True) if data['weekday'] == wd]:
tmp = [n1, n2]
tmp.sort()
n1, n2 = tmp
key = '{}{}d{}'.format(n1, n2, wd)
link_type = G[n1][n2][key]['link_type']
G[n1][n2][key]['contact_type'] = contact_map[link_type]
def get_weekday(date):
tmp = pd.to_datetime(date)
wd = datetime.datetime(tmp.year, tmp.month, tmp.day).weekday()
return wd + 1
def calculate_duration(row):
end = row['end_time']
start = row['start_time']
if end != end or start != start:
return np.nan
dummydate = datetime.date.today()
minutes = (datetime.datetime.combine(dummydate, end) - \
datetime.datetime.combine(dummydate, start)).seconds / 60
return minutes
def draw_uni_network(G, students, lecturers, day, study_id, dst):
weekdays = {1:'Monday', 2:'Tuesday', 3:'Wednesday', 4:'Thursday',
5:'Friday', 6:'Saturday', 7:'Sunday'}
H = nx.MultiGraph()
add_students_dummy(H, students)
pos_students = nx.spring_layout(H, k=0.1, seed=42)
H = nx.MultiGraph()
add_lecturers_dummy(H, lecturers)
pos_lecturers = nx.spring_layout(H, k=0.1, seed=42)
pos_students = nx.drawing.layout.rescale_layout_dict(pos_students, 2)
pos = pos_students
pos.update(pos_lecturers)
fig, ax = plt.subplots(figsize=(10, 10))
nx.draw_networkx_nodes(G, pos_students, ax=ax, node_size=20, alpha=0.5,
nodelist=students['student_id'].unique(),
node_color='b')
nx.draw_networkx_nodes(G, pos_lecturers, ax=ax, node_size=20, alpha=0.5,
nodelist=lecturers['lecturer_id'].unique(),
node_color='r')
student_edges = [(e[0], e[1]) for e in G.edges(data=True, keys=True) \
if e[3]['link_type'] == 'student_student_group']
student_lecturer_edges = [(e[0], e[1]) for e in G.edges(data=True, keys=True) \
if e[3]['link_type'] == 'student_lecturer_group']
nx.draw_networkx_edges(G, pos, ax=ax, alpha=0.5, edgelist=student_edges,
edge_color='b')
nx.draw_networkx_edges(G, pos, ax=ax, alpha=0.5, edgelist=student_lecturer_edges,
edge_color='purple')
wd = get_weekday(day)
print(weekdays[wd])
ax.set_title(weekdays[wd], fontsize=16)
plt.savefig(join(dst, '{}_{}.svg'.format(study_id.replace(' ', '_'),
str(pd.to_datetime(day).date()))))
plt.clf();
| 45.902516
| 94
| 0.57457
|
acfcadefa5ed73e64ba8e84392dfc7d731dab28b
| 830
|
py
|
Python
|
pyramid_nested_ner/training/optim.py
|
m-stoeckel/pyramid-nested-ner
|
ee169d3d84b4beeeecb3d5aeb5caa826166cce78
|
[
"MIT"
] | 6
|
2020-11-15T09:17:36.000Z
|
2021-12-10T18:30:58.000Z
|
pyramid_nested_ner/training/optim.py
|
m-stoeckel/pyramid-nested-ner
|
ee169d3d84b4beeeecb3d5aeb5caa826166cce78
|
[
"MIT"
] | 2
|
2021-03-31T01:06:43.000Z
|
2021-07-01T12:44:33.000Z
|
pyramid_nested_ner/training/optim.py
|
m-stoeckel/pyramid-nested-ner
|
ee169d3d84b4beeeecb3d5aeb5caa826166cce78
|
[
"MIT"
] | 3
|
2021-04-05T18:58:01.000Z
|
2021-10-11T03:57:17.000Z
|
import torch
def get_default_sgd_optim(params, lr=1e-2, momentum=0.9, inverse_time_lr_decay=True):
"""
Returns the default SGD optimizer used in the paper and the LR scheduler.
:param params:
:param lr:
:param momentum:
:param inverse_time_lr_decay:
:return:
"""
def inverse_time_decay(last_epoch, steps_per_epoch=235, decay_rate=0.05, decay_steps=1000):
if last_epoch and not last_epoch % 4:
return 1 / (1 + steps_per_epoch * last_epoch * decay_rate / decay_steps)
return 1
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=1e-6)
if inverse_time_lr_decay:
scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=inverse_time_decay)
else:
scheduler = None
return optimizer, scheduler
| 30.740741
| 102
| 0.7
|
acfcae4f80b115717657da2aa7d080b2c13b2eb9
| 291
|
py
|
Python
|
openapi_core/schema/schemas/types.py
|
gjo/openapi-core
|
cabe512fb043d3e95b93fbe7a20b8e2d095d7d99
|
[
"BSD-3-Clause"
] | null | null | null |
openapi_core/schema/schemas/types.py
|
gjo/openapi-core
|
cabe512fb043d3e95b93fbe7a20b8e2d095d7d99
|
[
"BSD-3-Clause"
] | null | null | null |
openapi_core/schema/schemas/types.py
|
gjo/openapi-core
|
cabe512fb043d3e95b93fbe7a20b8e2d095d7d99
|
[
"BSD-3-Clause"
] | null | null | null |
import attr
@attr.s(hash=True)
class Contribution(object):
src_prop_name = attr.ib()
src_prop_attr = attr.ib(default=None)
dest_prop_name = attr.ib(default=None)
is_list = attr.ib(default=False)
is_dict = attr.ib(default=False)
dest_default = attr.ib(default=None)
| 24.25
| 42
| 0.704467
|
acfcaee8b87909916cf67310d5fa05ed5a3dbfe2
| 1,711
|
py
|
Python
|
revel/setup.py
|
oberhamsi/FrameworkBenchmarks
|
660a66d51a9aad10b43c0660208fb13c098121af
|
[
"BSD-3-Clause"
] | 4
|
2015-01-22T02:13:03.000Z
|
2018-06-13T12:02:46.000Z
|
frameworks/Go/revel/setup.py
|
ratpack/FrameworkBenchmarks
|
81604309e46e382fe2ffb7970a87d728f20c8be6
|
[
"BSD-3-Clause"
] | null | null | null |
frameworks/Go/revel/setup.py
|
ratpack/FrameworkBenchmarks
|
81604309e46e382fe2ffb7970a87d728f20c8be6
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import os
import setup_util
import time
def start(args, logfile, errfile):
setup_util.replace_text("revel/src/benchmark/conf/app.conf", "tcp\(.*:3306\)", "tcp(" + args.database_host + ":3306)")
if os.name == 'nt':
env = os.environ.copy()
env["GOPATH"] = r"C:\FrameworkBenchmarks\revel"
subprocess.call("go get -u github.com/robfig/revel/revel", shell=True, cwd="revel", env=env, stderr=errfile, stdout=logfile)
subprocess.call(r"go build -o bin\revel.exe github.com/robfig/revel/revel", shell=True, cwd="revel", env=env, stderr=errfile, stdout=logfile)
subprocess.Popen(r"bin\revel.exe run benchmark prod".rsplit(" "), shell=True, cwd="revel", env=env, stderr=errfile, stdout=logfile)
return 0
os.environ["GOPATH"] = os.path.expanduser('~/FrameworkBenchmarks/revel')
subprocess.call("go get -u github.com/robfig/revel/revel", shell=True, cwd="revel", stderr=errfile, stdout=logfile)
subprocess.call("go build -o bin/revel github.com/robfig/revel/revel", shell=True, cwd="revel", stderr=errfile, stdout=logfile)
subprocess.Popen("bin/revel run benchmark prod".rsplit(" "), cwd="revel", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.call("taskkill /f /im benchmark.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("taskkill /f /im revel.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
return 0
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'revel' in line and 'run-tests' not in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
return 0
| 47.527778
| 145
| 0.700175
|
acfcaf0c0de73746d078562c5f62ebdd3632243b
| 1,491
|
py
|
Python
|
polling_stations/apps/data_importers/management/commands/import_darlington.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 29
|
2015-03-10T08:41:34.000Z
|
2022-01-12T08:51:38.000Z
|
polling_stations/apps/data_importers/management/commands/import_darlington.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 4,112
|
2015-04-01T21:27:38.000Z
|
2022-03-31T19:22:11.000Z
|
polling_stations/apps/data_importers/management/commands/import_darlington.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 31
|
2015-03-18T14:52:50.000Z
|
2022-02-24T10:31:07.000Z
|
from django.contrib.gis.geos import Point
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "DAL"
addresses_name = "2021-03-19T13:08:53.439791/darlington.gov.uk-1616156390000-.tsv"
stations_name = "2021-03-19T13:08:53.439791/darlington.gov.uk-1616156390000-.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
def station_record_to_dict(self, record):
# The Reading Room Neasham Darlington DL2 1PH
if record.polling_place_id == "6423":
record = record._replace(polling_place_postcode="DL2 1QX")
rec = super().station_record_to_dict(record)
rec["location"] = Point(-1.49308179, 54.48766450, srid=4326)
return rec
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"10003083588", # ROCK COTTAGE, PIERCEBRIDGE, DARLINGTON
"10003083581", # DENTON GRANGE WEST, PIERCEBRIDGE, DARLINGTON
"10003083582", # DENTON GRANGE WEST COTTAGE, PIERCEBRIDGE, DARLINGTON
"10003082878", # PRO SEW, 98 ELDON STREET, DARLINGTON
"10013318194", # 28A YARM ROAD, DARLINGTON
]:
return None
if record.addressline6 in ["DL2 2XX"]:
return None
return super().address_record_to_dict(record)
| 39.236842
| 86
| 0.665325
|
acfcaf31137290052c2e44ae4623b0d032b7a48a
| 687
|
py
|
Python
|
exercises/solution_02_14.py
|
tuomastik/spacy-course
|
bb7cba6aea221289cf078f36233813794c32f84c
|
[
"MIT"
] | 4
|
2019-12-31T05:45:44.000Z
|
2021-04-20T23:20:03.000Z
|
exercises/solution_02_14.py
|
tuomastik/spacy-course
|
bb7cba6aea221289cf078f36233813794c32f84c
|
[
"MIT"
] | null | null | null |
exercises/solution_02_14.py
|
tuomastik/spacy-course
|
bb7cba6aea221289cf078f36233813794c32f84c
|
[
"MIT"
] | 2
|
2019-10-05T15:13:14.000Z
|
2021-06-23T18:36:39.000Z
|
import json
from spacy.lang.en import English
with open("exercises/countries.json") as f:
COUNTRIES = json.loads(f.read())
nlp = English()
doc = nlp("Czech Republic may help Slovakia protect its airspace")
# Import the PhraseMatcher and initialize it
from spacy.matcher import PhraseMatcher
matcher = PhraseMatcher(nlp.vocab)
# Create pattern Doc objects and add them to the matcher
# This is the faster version of: [nlp(country) for country in COUNTRIES]
patterns = list(nlp.pipe(COUNTRIES))
matcher.add("COUNTRY", None, *patterns)
# Call the matcher on the test document and print the result
matches = matcher(doc)
print([doc[start:end] for match_id, start, end in matches])
| 29.869565
| 72
| 0.759825
|
acfcafa9a6c01cde5afc9d2ac749343f8f6e1b38
| 1,535
|
py
|
Python
|
Calendar.py
|
ValentinChareyre/adventofcode-2020
|
8b223ee4511dd4578e68f2adfe1c962185ef70d4
|
[
"MIT"
] | null | null | null |
Calendar.py
|
ValentinChareyre/adventofcode-2020
|
8b223ee4511dd4578e68f2adfe1c962185ef70d4
|
[
"MIT"
] | null | null | null |
Calendar.py
|
ValentinChareyre/adventofcode-2020
|
8b223ee4511dd4578e68f2adfe1c962185ef70d4
|
[
"MIT"
] | null | null | null |
import os
import glob
from os import system, name
from typing import List
def clear():
if name == 'nt': # windows
_ = system('cls')
else: # for mac and linux(here, os.name is 'posix')
_ = system('clear')
def get_calendar_directory() -> str:
calendar_file:str = os.path.realpath(__file__)
return os.path.dirname(calendar_file)
def get_calendar_programs() -> List[str]:
calendar_directory:str = get_calendar_directory()
python_scripts: List[str] = []
for dirpath, dirnames, filenames in os.walk(calendar_directory):
for filename in filenames:
if filename.startswith("Day") and filename.endswith(".py") and "-part" not in filename:
python_scripts.append(dirpath + "\\" + filename)
return python_scripts
def print_programs(programs: List[str]):
for program_index in range(len(programs)):
print(f" {program_index + 1}: {os.path.basename(programs[program_index])[:-3]}")
# ==================================================================== #
clear()
print("===== Advent of Code 2020 =====\n")
programs: List[str] = get_calendar_programs()
print_programs(programs)
programs_count = len(programs)
if programs_count > 0:
index = int(input("\nWhich script do you want to execute? (0 to exit): "))
if index > programs_count:
print("No script for that day!")
elif index > 0:
os.chdir(os.path.dirname(programs[index - 1]))
with open(programs[index - 1], "r") as script:
exec(script.read())
| 34.886364
| 99
| 0.624756
|
acfcb03ed9d3f34baf8d7528798db7a965f5a173
| 6,341
|
py
|
Python
|
test/functional/combine_logs.py
|
Bandexcoin-Project/bandexcoin
|
6d1a579479db00c8217c303c5bde594b4a5c9589
|
[
"MIT"
] | 1
|
2021-11-24T20:24:36.000Z
|
2021-11-24T20:24:36.000Z
|
test/functional/combine_logs.py
|
Bandexcoin-Project/bandexcoin
|
6d1a579479db00c8217c303c5bde594b4a5c9589
|
[
"MIT"
] | 1
|
2021-08-07T16:26:24.000Z
|
2021-08-07T17:05:20.000Z
|
test/functional/combine_logs.py
|
Bandexcoin-Project/bandexcoin
|
6d1a579479db00c8217c303c5bde594b4a5c9589
|
[
"MIT"
] | 1
|
2021-08-08T14:15:42.000Z
|
2021-08-08T14:15:42.000Z
|
#!/usr/bin/env python3
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "bandexcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
log_events = read_logs(testdir)
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| 39.385093
| 196
| 0.615045
|
acfcb0477ce16aaf5ed7e281433e1832d951c9cf
| 7,398
|
py
|
Python
|
tests/ut/python/dataset/test_random_invert.py
|
httpsgithu/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | 1
|
2022-03-30T03:43:29.000Z
|
2022-03-30T03:43:29.000Z
|
tests/ut/python/dataset/test_random_invert.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_random_invert.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomInvert in DE
"""
import numpy as np
import mindspore.dataset as ds
from mindspore.dataset.vision.transforms import Decode, Resize, RandomInvert, Invert
from mindspore import log as logger
from util import visualize_list, visualize_image, diff_mse
image_file = "../data/dataset/testImageNetData/train/class1/1_1.jpg"
data_dir = "../data/dataset/testImageNetData/train/"
def test_random_invert_pipeline(plot=False):
"""
Test RandomInvert pipeline
"""
logger.info("Test RandomInvert pipeline")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
transforms_original = [Decode(), Resize(size=[224, 224])]
ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original):
if idx == 0:
images_original = image.asnumpy()
else:
images_original = np.append(images_original,
image.asnumpy(),
axis=0)
# Randomly Inverted Images
data_set1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
transform_random_invert = [Decode(), Resize(size=[224, 224]), RandomInvert(0.6)]
ds_random_invert = data_set1.map(operations=transform_random_invert, input_columns="image")
ds_random_invert = ds_random_invert.batch(512)
for idx, (image, _) in enumerate(ds_random_invert):
if idx == 0:
images_random_invert = image.asnumpy()
else:
images_random_invert = np.append(images_random_invert,
image.asnumpy(),
axis=0)
if plot:
visualize_list(images_original, images_random_invert)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_random_invert[i], images_original[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
def test_random_invert_eager():
"""
Test RandomInvert eager.
"""
img = np.fromfile(image_file, dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = Decode()(img)
img_inverted = Invert()(img)
img_random_inverted = RandomInvert(1.0)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img_random_inverted), img_random_inverted.shape))
assert img_random_inverted.all() == img_inverted.all()
def test_random_invert_comp(plot=False):
"""
Test RandomInvert op compared with Invert op.
"""
random_invert_op = RandomInvert(prob=1.0)
invert_op = Invert()
dataset1 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
for item in dataset1.create_dict_iterator(num_epochs=1, output_numpy=True):
image = item['image']
dataset1.map(operations=random_invert_op, input_columns=['image'])
dataset2 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
dataset2.map(operations=invert_op, input_columns=['image'])
for item1, item2 in zip(dataset1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataset2.create_dict_iterator(num_epochs=1, output_numpy=True)):
image_random_inverted = item1['image']
image_inverted = item2['image']
mse = diff_mse(image_inverted, image_random_inverted)
assert mse == 0
logger.info("mse: {}".format(mse))
if plot:
visualize_image(image, image_random_inverted, mse, image_inverted)
def test_random_invert_invalid_prob():
"""
Test invalid prob. prob out of range.
"""
logger.info("test_random_invert_invalid_prob")
dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
try:
random_invert_op = RandomInvert(1.5)
dataset = dataset.map(operations=random_invert_op, input_columns=['image'])
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input prob is not within the required interval of [0.0, 1.0]." in str(e)
def test_random_invert_one_channel():
"""
Feature: RandomInvert
Description: test with one channel images
Expectation: raise errors as expected
"""
logger.info("test_random_invert_one_channel")
c_op = RandomInvert()
try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[Decode(), Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
data_set = data_set.map(operations=c_op, input_columns="image")
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "image shape is incorrect, expected num of channels is 3." in str(e)
def test_random_invert_four_dim():
"""
Feature: RandomInvert
Description: test with four dimension images
Expectation: raise errors as expected
"""
logger.info("test_random_invert_four_dim")
c_op = RandomInvert()
try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[Decode(), Resize((224, 224)), lambda img: np.array(img[2, 200, 10, 32])],
input_columns=["image"])
data_set = data_set.map(operations=c_op, input_columns="image")
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "image shape is not <H,W,C>" in str(e)
def test_random_invert_invalid_input():
"""
Feature: RandomInvert
Description: test with images in uint32 type
Expectation: raise errors as expected
"""
logger.info("test_random_invert_invalid_input")
c_op = RandomInvert()
try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[Decode(), Resize((224, 224)),
lambda img: np.array(img[2, 32, 3], dtype=uint32)], input_columns=["image"])
data_set = data_set.map(operations=c_op, input_columns="image")
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Cannot convert from OpenCV type, unknown CV type" in str(e)
if __name__ == "__main__":
test_random_invert_pipeline(plot=True)
test_random_invert_eager()
test_random_invert_comp(plot=True)
test_random_invert_invalid_prob()
test_random_invert_one_channel()
test_random_invert_four_dim()
test_random_invert_invalid_input()
| 37.363636
| 120
| 0.667883
|
acfcb079f2c0eb8898a1ddd5ccfa904ce09393f9
| 12,200
|
py
|
Python
|
api/chalice-api/tests.py
|
plugblockchain/explorer
|
7e550e36249b1aee7b3ffc91b5b8c2bafe343508
|
[
"Apache-2.0"
] | 2
|
2021-03-25T05:29:01.000Z
|
2022-02-04T06:19:39.000Z
|
api/chalice-api/tests.py
|
plugblockchain/explorer
|
7e550e36249b1aee7b3ffc91b5b8c2bafe343508
|
[
"Apache-2.0"
] | 16
|
2020-09-06T15:59:20.000Z
|
2022-02-12T17:42:49.000Z
|
api/chalice-api/tests.py
|
plugblockchain/explorer
|
7e550e36249b1aee7b3ffc91b5b8c2bafe343508
|
[
"Apache-2.0"
] | 1
|
2020-12-23T20:37:25.000Z
|
2020-12-23T20:37:25.000Z
|
import decimal
import json
import unittest
import chalice
import app
class TestApp(unittest.TestCase):
def test_debug(self):
# Debug disabled if omitted
self.assertFalse(app._is_debug())
self.assertFalse(app._is_debug(debug=None))
self.assertTrue(app._is_debug("True"))
self.assertTrue(app._is_debug("yes"))
self.assertTrue(app._is_debug("ON"))
self.assertTrue(app._is_debug(" 1 "))
self.assertFalse(app._is_debug("everything else"))
def test_get_page_no_query_params(self):
self.assertEqual(
app.DEFAULT_PAGE,
app._get_page(None),
)
def test_get_page_no_page_key(self):
self.assertEqual(
app.DEFAULT_PAGE,
app._get_page({}),
)
def test_get_page_not_int(self):
self.assertEqual(
app.DEFAULT_PAGE,
app._get_page({
"page": "hello"
}),
)
def test_get_page_invalid_types(self):
with self.assertRaises(TypeError):
app._get_page({"page": None})
with self.assertRaises(TypeError):
app._get_page({"page": []})
def test_get_page_ok(self):
self.assertEqual(
123,
app._get_page({
"page": "123"
}),
)
def test_get_limit_no_query_params(self):
self.assertEqual(
app.DEFAULT_LIMIT,
app._get_limit(None),
)
def test_get_limit_no_limit_key(self):
self.assertEqual(
app.DEFAULT_LIMIT,
app._get_limit({}),
)
def test_get_limit_not_int(self):
self.assertEqual(
app.DEFAULT_LIMIT,
app._get_limit({
"limit": "hello"
}),
)
def test_get_limit_invalid_types(self):
with self.assertRaises(TypeError):
app._get_limit({"limit": None})
with self.assertRaises(TypeError):
app._get_limit({"limit": []})
def test_get_limit_max(self):
for limit in (app.MAX_LIMIT, app.MAX_LIMIT + 1):
self.assertEqual(
app.MAX_LIMIT,
app._get_limit({
"limit": "%d" % limit,
}),
)
def test_get_limit_non_positive(self):
for limit in (0, -1):
self.assertEqual(
app.DEFAULT_LIMIT,
app._get_limit({
"limit": "%d" % limit,
}),
)
def test_get_limit_ok(self):
self.assertEqual(
100,
app._get_limit({
"limit": "100"
}),
)
def test_get_asset_id_no_query_params(self):
self.assertEqual(
"{}", # Default
app._get_asset_id(None),
)
def test_get_asset_id_no_asset_id_key(self):
self.assertEqual(
"{}", # Default
app._get_asset_id({
"hello": "world",
}),
)
def test_get_asset_id_non_digit(self):
self.assertEqual(
"{}", # Default
app._get_asset_id({
"asset_id": "hi", # Non-digit
}),
)
self.assertEqual(
"{}", # Default
app._get_asset_id(
{
"asset_id": "1,", # Trailing comma (i.e. non-digit)
}
),
)
def test_get_asset_id_empty(self):
self.assertEqual(
"{}", # Default
app._get_asset_id({
"asset_id": "",
}),
)
self.assertEqual(
"{}", # Default
app._get_asset_id({
"asset_id": " ",
}),
)
def test_get_asset_id_ok(self):
self.assertEqual(
'{10}',
app._get_asset_id({
"asset_id": "10",
}),
)
self.assertEqual(
'{10}',
app._get_asset_id({
"asset_id": " 10 ",
}),
)
self.assertEqual(
"{20,30}",
app._get_asset_id({
"asset_id": "20,30",
}),
)
self.assertEqual(
"{20,30}",
app._get_asset_id({
"asset_id": " 20, 30 ",
}),
)
def test_get_txn_flow_no_query_params(self):
self.assertEqual(
"{Outgoing,Incoming}", # Default
app._get_txn_flow(None),
)
def test_get_txn_flow_no_txn_flow_key(self):
self.assertEqual(
"{Outgoing,Incoming}", # Default
app._get_txn_flow({
"hello": "world",
}),
)
def test_get_txn_flow_empty(self):
self.assertEqual(
"{Outgoing,Incoming}", # Default
app._get_txn_flow({
"txn_flow": "",
}),
)
self.assertEqual(
"{Outgoing,Incoming}", # Default
app._get_txn_flow({
"txn_flow": " ",
}),
)
def test_get_txn_flow_ok(self):
self.assertEqual(
"{Incoming}",
app._get_txn_flow({
"txn_flow": "Incoming",
}),
)
self.assertEqual(
"{Incoming}",
app._get_txn_flow({
"txn_flow": " Incoming ",
}),
)
self.assertEqual(
"{Outgoing,Incoming}",
app._get_txn_flow({
"txn_flow": "Outgoing,Incoming",
}),
)
self.assertEqual(
"{Outgoing,Incoming}",
app._get_txn_flow({
"txn_flow": " Outgoing, Incoming ",
}),
)
def test_get_asset_type_no_query_params(self):
self.assertEqual(
"{Generic,User-generated}", # Default
app._get_asset_type(None),
)
def test_get_asset_type_no_asset_type_key(self):
self.assertEqual(
"{Generic,User-generated}", # Default
app._get_asset_type({
"hello": "world",
}),
)
def test_get_asset_type_empty(self):
self.assertEqual(
"{Generic,User-generated}", # Default
app._get_asset_type({
"asset_type": "",
}),
)
self.assertEqual(
"{Generic,User-generated}", # Default
app._get_asset_type({
"asset_type": " ",
}),
)
def test_get_asset_type_ok(self):
self.assertEqual(
"{Generic}",
app._get_asset_type({
"asset_type": "Generic",
}),
)
self.assertEqual(
"{Generic}",
app._get_asset_type({
"asset_type": " Generic ",
}),
)
self.assertEqual(
"{Generic,User-generated}",
app._get_asset_type({
"asset_type": "Generic,User-generated",
}),
)
self.assertEqual(
"{Generic,User-generated}",
app._get_asset_type({
"asset_type": " Generic, User-generated ",
}),
)
def test_get_start_time_no_query_params(self):
self.assertEqual(
0, # Default
app._get_start_time(None, 1550088010),
)
def test_get_start_time_non_digit(self):
with self.assertRaises(chalice.BadRequestError) as err:
app._get_start_time({"start_time": "not a digit"}, 1550088010)
err_status_code = 400
err_msg = "BadRequestError: Invalid start time type."
self.assertEqual(err.exception.STATUS_CODE, err_status_code)
self.assertEqual(str(err.exception), err_msg)
def test_get_start_time_less_than_zero(self):
with self.assertRaises(chalice.BadRequestError) as err:
app._get_start_time({"start_time": "-4"}, 1550088010)
err_status_code = 400
err_msg = "BadRequestError: The start time value cannot be negative."
self.assertEqual(err.exception.STATUS_CODE, err_status_code)
self.assertEqual(str(err.exception), err_msg)
def test_get_start_time_greater_than_current_time(self):
with self.assertRaises(chalice.BadRequestError) as err:
app._get_start_time({"start_time": "1550088020"}, 1550088010)
err_status_code = 400
err_msg = "BadRequestError: The start time cannot be later than now."
self.assertEqual(err.exception.STATUS_CODE, err_status_code)
self.assertEqual(str(err.exception), err_msg)
def test_get_start_time_ok(self):
self.assertEqual(
1550088000, # Default
app._get_start_time({
"start_time": "1550088000"
}, 1550088010),
)
def test_get_end_time_no_query_params(self):
self.assertEqual(
1550088010, # Default
app._get_end_time(None, 1550088010),
)
def test_get_end_time_non_digit(self):
with self.assertRaises(chalice.BadRequestError) as err:
app._get_end_time({"end_time": "not a digit"}, 1550088010)
err_status_code = 400
err_msg = "BadRequestError: Invalid end time type."
self.assertEqual(err.exception.STATUS_CODE, err_status_code)
self.assertEqual(str(err.exception), err_msg)
def test_get_end_time_less_than_zero(self):
with self.assertRaises(chalice.BadRequestError) as err:
app._get_end_time({"end_time": "-4"}, 1550088010)
err_status_code = 400
err_msg = "BadRequestError: The end time value cannot be negative."
self.assertEqual(err.exception.STATUS_CODE, err_status_code)
self.assertEqual(str(err.exception), err_msg)
def test_get_end_time_ok(self):
self.assertEqual(
1550088000, # Default
app._get_end_time({
"end_time": "1550088000"
}, 1550088010),
)
def test_is_hex_incorrect_prefix(self):
for x in "0123456789abcdefABCDEF":
self.assertFalse(app._is_hex(x, length=1))
def test_is_hex_block_hash_too_short(self):
value = "0x54d81a25b240c013d1c0d0d2e7f240ba062c9291aec0c2370eba43a6968acdd"
self.assertLess(len(value[2:]), 64)
self.assertFalse(app._is_hex(value))
def test_is_hex_block_hash_too_long(self):
value = "0x54d81a25b240c013d1c0d0d2e7f240ba062c9291aec0c2370eba43a6968acdda0"
self.assertGreater(len(value[2:]), 64)
self.assertFalse(app._is_hex(value))
def test_is_hex_ok_single_char(self):
for x in "0123456789abcdefABCDEF":
self.assertTrue(app._is_hex("0x%s" % x, length=1))
def test_is_hex_ok_block_hash(self):
value = "0x54d81a25b240c013d1c0d0d2e7f240ba062c9291aec0c2370eba43a6968acdda"
self.assertEqual(len(value[2:]), 64)
self.assertTrue(app._is_hex(value))
def test_custom_json_encoder(self):
d = {
"hello": "world",
"foo": 123,
"pi": 3.14,
}
jsonified = json.dumps(d, cls=app.CustomJsonEncoder)
self.assertEqual(
'{"hello": "world", "foo": 123, "pi": 3.14}',
jsonified,
)
def test_custom_json_encoder_decimal(self):
d = {
"value": decimal.Decimal(123),
}
jsonified = json.dumps(d, cls=app.CustomJsonEncoder)
self.assertEqual(
'{"value": "123"}',
jsonified,
)
d = {
"value": decimal.Decimal("123"),
}
jsonified = json.dumps(d, cls=app.CustomJsonEncoder)
self.assertEqual(
'{"value": "123"}',
jsonified,
)
d = {
"value": decimal.Decimal(123.5),
}
jsonified = json.dumps(d, cls=app.CustomJsonEncoder)
self.assertEqual(
'{"value": "123.5"}',
jsonified,
)
if __name__ == "__main__":
unittest.main()
| 28.045977
| 85
| 0.528197
|
acfcb0b9f4c57f0bc9f8f8590f0340143eb76de4
| 1,940
|
py
|
Python
|
docs/python/stack_instructions.py
|
Voldemort373/Notes-and-Reference
|
796885e315e9c349ff1cb37760abc56327547140
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 30
|
2018-11-12T09:03:45.000Z
|
2021-12-09T02:20:08.000Z
|
docs/python/stack_instructions.py
|
Voldemort373/Notes-and-Reference
|
796885e315e9c349ff1cb37760abc56327547140
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 36
|
2018-11-11T21:32:31.000Z
|
2019-02-02T16:18:11.000Z
|
docs/python/stack_instructions.py
|
Voldemort373/Notes-and-Reference
|
796885e315e9c349ff1cb37760abc56327547140
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 8
|
2018-11-14T17:09:21.000Z
|
2020-05-28T16:18:12.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from collections import deque # import statement
my_first_stack = deque() # this creates a new stack
my_first_stack.append("Good Omens") # these lines add two books
my_first_stack.append("Neverwhere")
# currently my_first_stack contains two elements:
# "Neverwhere"])
# deque(["Good Omens",
my_first_stack.append("The Name of the Rose") # add a new book
# now my_first_stack contains:
# "The Name of the Rose")]
# "Neverwhere",
# deque(["Good Omens",
my_first_stack.pop() # it removes the element on top of the stack
# my_first_stack became:
# "Neverwhere"])
# deque(["Good Omens",
my_second_stack = deque() # this creates a new stack
my_second_stack.append("American Gods") # these lines add two books
my_second_stack.append("Fragile Things")
# currently my_second_stack contains two elements:
# "Fragile Things"])
# deque(["American Gods",
# it add all the elements in my_second_stack on top of my_first_stack
my_first_stack.extend(my_second_stack)
# current status of my_first_stack:
# "Fragile Things"])
# "American Gods",
# "Neverwhere",
# deque(["Good Omens",
| 38.039216
| 84
| 0.735567
|
acfcb3b4eed8749555c6a68f81a13da6a446885a
| 7,821
|
py
|
Python
|
fedcloudclient/checkin.py
|
MartinBobak/fedcloudclient
|
54d6b4be5f3a6bfb13aaa8df799fda000649d48a
|
[
"MIT"
] | null | null | null |
fedcloudclient/checkin.py
|
MartinBobak/fedcloudclient
|
54d6b4be5f3a6bfb13aaa8df799fda000649d48a
|
[
"MIT"
] | null | null | null |
fedcloudclient/checkin.py
|
MartinBobak/fedcloudclient
|
54d6b4be5f3a6bfb13aaa8df799fda000649d48a
|
[
"MIT"
] | null | null | null |
"""
Implementation of "fedcloud token" commands for interactions with EGI Check-in and
access tokens
"""
import re
import sys
import time
from datetime import datetime
import click
import jwt
import liboidcagent as agent
import requests
from fedcloudclient.decorators import (
oidc_access_token_params,
oidc_params,
oidc_refresh_token_params,
)
# Minimal lifetime of the access token is 10s and max 24h
_MIN_ACCESS_TOKEN_TIME = 10
_MAX_ACCESS_TOKEN_TIME = 24 * 3600
def oidc_discover(oidc_url):
"""
Discover OIDC endpoints
:param oidc_url: CheckIn URL
:return: JSON object of OIDC configuration
"""
r = requests.get(oidc_url + "/.well-known/openid-configuration")
r.raise_for_status()
return r.json()
def token_refresh(oidc_client_id, oidc_client_secret, oidc_refresh_token, oidc_url):
"""
Helper function for retrieving JSON object with access token
:param oidc_client_id:
:param oidc_client_secret:
:param oidc_refresh_token:
:param oidc_url:
:return: JSON object with access token
"""
oidc_ep = oidc_discover(oidc_url)
refresh_data = {
"client_id": oidc_client_id,
"client_secret": oidc_client_secret,
"grant_type": "refresh_token",
"refresh_token": oidc_refresh_token,
"scope": "openid email profile offline_access",
}
r = requests.post(
oidc_ep["token_endpoint"],
auth=(oidc_client_id, oidc_client_secret),
data=refresh_data,
)
r.raise_for_status()
return r.json()
def refresh_access_token(
oidc_client_id, oidc_client_secret, oidc_refresh_token, oidc_url
):
"""
Retrieve access token in plain text (string)
:param oidc_client_id:
:param oidc_client_secret:
:param oidc_refresh_token:
:param oidc_url:
:return: access token
"""
return token_refresh(
oidc_client_id,
oidc_client_secret,
oidc_refresh_token,
oidc_url,
)["access_token"]
def get_access_token(
oidc_access_token,
oidc_refresh_token,
oidc_client_id,
oidc_client_secret,
oidc_url,
oidc_agent_account,
):
"""
Get access token
Generates new access token from oidc-agent or
refresh token (if given), or use existing token
Check expiration time of access token
Raise error if no valid token exists
:param oidc_access_token:
:param oidc_refresh_token:
:param oidc_client_id:
:param oidc_client_secret:
:param oidc_url:
:param oidc_agent_account:
:return: access token
"""
# First, try to get access token from oidc-agent
if oidc_agent_account:
try:
access_token = agent.get_access_token(
oidc_agent_account,
min_valid_period=30,
application_hint="fedcloudclient",
)
return access_token
except agent.OidcAgentError as e:
print("ERROR oidc-agent: {}".format(e))
# Then try refresh token
if oidc_refresh_token and oidc_client_id and oidc_client_secret and oidc_url:
print(
"Warning: Exposing refresh tokens is insecure and will be deprecated!",
file=sys.stderr,
)
return token_refresh(
oidc_client_id, oidc_client_secret, oidc_refresh_token, oidc_url
)["access_token"]
# Then finally access token
elif oidc_access_token:
# Check expiration time of access token
try:
payload = jwt.decode(oidc_access_token, options={"verify_signature": False})
except jwt.exceptions.InvalidTokenError:
raise SystemExit("Error: Invalid access token.")
expiration_timestamp = int(payload["exp"])
current_timestamp = int(time.time())
if current_timestamp > expiration_timestamp - _MIN_ACCESS_TOKEN_TIME:
raise SystemExit(
"The given access token has expired."
" Get new access token before continuing on operation"
)
if current_timestamp < expiration_timestamp - _MAX_ACCESS_TOKEN_TIME:
raise SystemExit(
"You probably use refresh tokens as access tokens."
" Get access tokens via `curl -X POST -u ...` command"
" in the last row of the page https://aai.egi.eu/fedcloud."
)
return oidc_access_token
else:
raise SystemExit(
"Error: An access token is needed for the operation. You can specify "
"access token directly via --oidc-access-token option or use oidc-agent "
"via --oidc-agent-account"
)
def token_list_vos(oidc_access_token, oidc_url):
"""
List VO memberships in EGI Check-in
:param oidc_access_token:
:param oidc_url:
:return: list of VO names
"""
oidc_ep = oidc_discover(oidc_url)
r = requests.get(
oidc_ep["userinfo_endpoint"],
headers={"Authorization": "Bearer %s" % oidc_access_token},
)
r.raise_for_status()
vos = set()
m = re.compile("urn:mace:egi.eu:group:(.+?):(.+:)*role=member#aai.egi.eu")
for claim in r.json().get("eduperson_entitlement", []):
vo = m.match(claim)
if vo:
vos.add(vo.groups()[0])
return sorted(vos)
@click.group()
def token():
"""
Get details of access/refresh tokens
"""
pass
@token.command()
@oidc_refresh_token_params
@oidc_access_token_params
def check(oidc_refresh_token, oidc_access_token):
"""
Check validity of access/refresh token
"""
if oidc_refresh_token:
try:
payload = jwt.decode(
oidc_refresh_token, options={"verify_signature": False}
)
except jwt.exceptions.InvalidTokenError:
raise SystemExit("Error: Invalid refresh token.")
expiration_timestamp = int(payload["exp"])
expiration_time = datetime.utcfromtimestamp(expiration_timestamp).strftime(
"%Y-%m-%d %H:%M:%S"
)
print("Refresh token is valid until %s UTC" % expiration_time)
current_timestamp = int(time.time())
if current_timestamp < expiration_timestamp:
print(
"Refresh token expires in %d days"
% ((expiration_timestamp - current_timestamp) // (24 * 3600))
)
else:
print("Refresh token has expired")
elif oidc_access_token:
try:
payload = jwt.decode(oidc_access_token, options={"verify_signature": False})
except jwt.exceptions.InvalidTokenError:
raise SystemExit("Error: Invalid access token.")
expiration_timestamp = int(payload["exp"])
expiration_time = datetime.utcfromtimestamp(expiration_timestamp).strftime(
"%Y-%m-%d %H:%M:%S"
)
print("Access token is valid until %s UTC" % expiration_time)
current_timestamp = int(time.time())
if current_timestamp < expiration_timestamp:
print(
"Access token expires in %d seconds"
% (expiration_timestamp - current_timestamp)
)
else:
print("Access token has expired")
else:
print("OIDC access token or refresh token required")
exit(1)
@token.command()
@oidc_params
def list_vos(
oidc_client_id,
oidc_client_secret,
oidc_refresh_token,
oidc_access_token,
oidc_url,
oidc_agent_account,
):
"""
List VO membership(s) of access token
"""
oidc_access_token = get_access_token(
oidc_access_token,
oidc_refresh_token,
oidc_client_id,
oidc_client_secret,
oidc_url,
oidc_agent_account,
)
vos = token_list_vos(oidc_access_token, oidc_url)
print("\n".join(vos))
| 28.133094
| 88
| 0.639816
|
acfcb5e32a383e781d119e90cc0657aa2194d6b3
| 673
|
py
|
Python
|
Chapter 13/hello_world2.py
|
nescience8/starting-out-with-python-global-4th-edition
|
c16f93b7cbb4c7ae7b57653a7190bf192fe6b472
|
[
"MIT"
] | 35
|
2019-05-03T00:30:31.000Z
|
2022-01-20T06:57:25.000Z
|
Chapter 13/hello_world2.py
|
nescience8/starting-out-with-python-global-4th-edition
|
c16f93b7cbb4c7ae7b57653a7190bf192fe6b472
|
[
"MIT"
] | null | null | null |
Chapter 13/hello_world2.py
|
nescience8/starting-out-with-python-global-4th-edition
|
c16f93b7cbb4c7ae7b57653a7190bf192fe6b472
|
[
"MIT"
] | 22
|
2020-05-13T21:20:02.000Z
|
2021-12-21T08:35:59.000Z
|
# This program displays two labels with text.
import tkinter
class MyGUI:
def __init__(self):
# Create the main window widget.
self.main_window = tkinter.Tk()
# Create two Label widget.
self.label1 = tkinter.Label(self.main_window, \
text='Hello World!')
self.label2 = tkinter.Label(self.main_window, \
text='This is my GUI program.')
# Call both Label widgets' pack method.
self.label1.pack()
self.label2.pack()
# Enter the tkinter main loop.
tkinter.mainloop()
# Create an instance of the MyGUI class.
my_gui = MyGUI()
| 25.884615
| 56
| 0.580981
|
acfcb5fa170f60a2d085e19b0d780459820cdd8f
| 1,147
|
py
|
Python
|
learning_log/urls.py
|
9527A/learning_log_django
|
d4780ae82b3e3e72ab2f6d9e26364b8ddc2a8706
|
[
"MIT"
] | 1
|
2019-06-10T11:37:48.000Z
|
2019-06-10T11:37:48.000Z
|
learning_log/urls.py
|
9527A/learning_log_django
|
d4780ae82b3e3e72ab2f6d9e26364b8ddc2a8706
|
[
"MIT"
] | null | null | null |
learning_log/urls.py
|
9527A/learning_log_django
|
d4780ae82b3e3e72ab2f6d9e26364b8ddc2a8706
|
[
"MIT"
] | null | null | null |
"""learning_log URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
# from django.conf.urls import include, url
from django.urls import path, include, re_path
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^users/', include('users.urls', namespace='users')),
path(r'', include('learning_logs.urls', namespace='learning_logs')),
# url(r'^admin/', include(admin.site.urls)),
# url(r'', include('learning_logs.urls', namespace='learning_logs')),Django2.0版本不用url而使用path,注意.另外path中的include里加namespace会报错
]
#
| 40.964286
| 129
| 0.709677
|
acfcb60f92fb82d806edd4ee422c352c47b9968c
| 7,610
|
py
|
Python
|
python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py
|
aasthajh/cucim
|
a95cc5c4ab25beffeac42d642dea8cb1bbf21408
|
[
"Apache-2.0"
] | 131
|
2021-04-09T19:02:10.000Z
|
2022-03-25T08:49:11.000Z
|
python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py
|
aasthajh/cucim
|
a95cc5c4ab25beffeac42d642dea8cb1bbf21408
|
[
"Apache-2.0"
] | 222
|
2021-04-12T07:15:14.000Z
|
2022-03-31T20:01:01.000Z
|
python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py
|
aasthajh/cucim
|
a95cc5c4ab25beffeac42d642dea8cb1bbf21408
|
[
"Apache-2.0"
] | 34
|
2021-04-09T18:54:13.000Z
|
2022-03-29T12:59:26.000Z
|
import itertools
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_almost_equal, assert_array_equal
from cucim.skimage._shared.testing import assert_no_warnings
from cucim.skimage.color.colorlabel import label2rgb
def test_deprecation_warning():
image = cp.ones((3, 3))
label = cp.ones((3, 3))
with pytest.warns(FutureWarning) as record:
label2rgb(image, label)
expected_msg = "The new recommended value"
assert str(record[0].message).startswith(expected_msg)
def test_shape_mismatch():
image = cp.ones((3, 3))
label = cp.ones((2, 2))
with pytest.raises(ValueError):
label2rgb(image, label, bg_label=-1)
def test_wrong_kind():
label = cp.ones((3, 3))
# Must not raise an error.
label2rgb(label, bg_label=-1)
# kind='foo' is wrong.
with pytest.raises(ValueError):
label2rgb(label, kind="foo", bg_label=-1)
def test_uint_image():
img = cp.random.randint(0, 255, (10, 10), dtype=cp.uint8)
labels = cp.zeros((10, 10), dtype=cp.int64)
labels[1:3, 1:3] = 1
labels[6:9, 6:9] = 2
output = label2rgb(labels, image=img, bg_label=0)
# Make sure that the output is made of floats and in the correct range
assert cp.issubdtype(output.dtype, cp.floating)
assert output.max() <= 1
def test_rgb():
image = cp.ones((1, 3))
label = cp.arange(3).reshape(1, -1)
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
# Set alphas just in case the defaults change
rgb = label2rgb(label, image=image, colors=colors, alpha=1,
image_alpha=1, bg_label=-1)
assert_array_almost_equal(rgb, [colors])
def test_alpha():
image = cp.random.uniform(size=(3, 3))
label = cp.random.randint(0, 9, size=(3, 3))
# If we set `alpha = 0`, then rgb should match image exactly.
rgb = label2rgb(label, image=image, alpha=0, image_alpha=1,
bg_label=-1)
assert_array_almost_equal(rgb[..., 0], image)
assert_array_almost_equal(rgb[..., 1], image)
assert_array_almost_equal(rgb[..., 2], image)
def test_no_input_image():
label = cp.arange(3).reshape(1, -1)
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
rgb = label2rgb(label, colors=colors, bg_label=-1)
assert_array_almost_equal(rgb, [colors])
def test_image_alpha():
image = cp.random.uniform(size=(1, 3))
label = cp.arange(3).reshape(1, -1)
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
# If we set `image_alpha = 0`, then rgb should match label colors exactly.
rgb = label2rgb(label, image=image, colors=colors, alpha=1,
image_alpha=0, bg_label=-1)
assert_array_almost_equal(rgb, [colors])
def test_color_names():
image = cp.ones((1, 3))
label = cp.arange(3).reshape(1, -1)
cnames = ['red', 'lime', 'blue']
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
# Set alphas just in case the defaults change
rgb = label2rgb(label, image=image, colors=cnames, alpha=1,
image_alpha=1, bg_label=-1)
assert_array_almost_equal(rgb, [colors])
def test_bg_and_color_cycle():
image = cp.zeros((1, 10)) # dummy image
label = cp.arange(10).reshape(1, -1)
colors = [(1, 0, 0), (0, 0, 1)]
bg_color = (0, 0, 0)
rgb = label2rgb(label, image=image, bg_label=0, bg_color=bg_color,
colors=colors, alpha=1)
assert_array_almost_equal(rgb[0, 0], bg_color)
for pixel, color in zip(rgb[0, 1:], itertools.cycle(colors)):
assert_array_almost_equal(pixel, color)
def test_negative_labels():
labels = cp.array([0, -1, -2, 0])
rout = cp.array([(0., 0., 0.), (0., 0., 1.), (1., 0., 0.), (0., 0., 0.)])
assert_array_almost_equal(
rout, label2rgb(labels, bg_label=0, alpha=1, image_alpha=1))
def test_nonconsecutive():
labels = cp.array([0, 2, 4, 0])
colors = [(1, 0, 0), (0, 0, 1)]
rout = cp.array([(1., 0., 0.), (0., 0., 1.), (1., 0., 0.), (1., 0., 0.)])
assert_array_almost_equal(
rout, label2rgb(labels, colors=colors, alpha=1,
image_alpha=1, bg_label=-1))
def test_label_consistency():
"""Assert that the same labels map to the same colors."""
label_1 = cp.arange(5).reshape(1, -1)
label_2 = cp.array([0, 1])
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (1, 0, 1)]
# Set alphas just in case the defaults change
rgb_1 = label2rgb(label_1, colors=colors, bg_label=-1)
rgb_2 = label2rgb(label_2, colors=colors, bg_label=-1)
for label_id in label_2.ravel():
assert_array_almost_equal(rgb_1[label_1 == label_id],
rgb_2[label_2 == label_id])
def test_leave_labels_alone():
labels = cp.array([-1, 0, 1])
labels_saved = labels.copy()
label2rgb(labels, bg_label=-1)
label2rgb(labels, bg_label=1)
assert_array_equal(labels, labels_saved)
# TODO: diagnose test error that occurs only with CUB enabled: CuPy bug?
def test_avg():
# label image
# fmt: off
label_field = cp.asarray([[1, 1, 1, 2],
[1, 2, 2, 2],
[3, 3, 4, 4]], dtype=np.uint8)
# color image
r = cp.asarray([[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 0., 0.]])
g = cp.asarray([[0., 0., 0., 1.],
[1., 1., 1., 0.],
[0., 0., 0., 0.]])
b = cp.asarray([[0., 0., 0., 1.],
[0., 1., 1., 1.],
[0., 0., 1., 1.]])
image = cp.dstack((r, g, b))
# reference label-colored image
rout = cp.asarray([[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0. , 0. , 0. , 0. ]]) # noqa
gout = cp.asarray([[0.25, 0.25, 0.25, 0.75],
[0.25, 0.75, 0.75, 0.75],
[0. , 0. , 0. , 0. ]]) # noqa
bout = cp.asarray([[0. , 0. , 0. , 1. ], # noqa
[0. , 1. , 1. , 1. ], # noqa
[0.0, 0.0, 1.0, 1.0]]) # noqa
expected_out = cp.dstack((rout, gout, bout))
# test standard averaging
out = label2rgb(label_field, image, kind='avg', bg_label=-1)
assert_array_equal(out, expected_out)
# test averaging with custom background value
out_bg = label2rgb(label_field, image, bg_label=2, bg_color=(0, 0, 0),
kind='avg')
expected_out_bg = expected_out.copy()
expected_out_bg[label_field == 2] = 0
assert_array_equal(out_bg, expected_out_bg)
# test default background color
out_bg = label2rgb(label_field, image, bg_label=2, kind='avg')
assert_array_equal(out_bg, expected_out_bg)
def test_negative_intensity():
labels = cp.arange(100).reshape(10, 10)
image = cp.full((10, 10), -1, dtype="float64")
with pytest.warns(UserWarning):
label2rgb(labels, image, bg_label=-1)
def test_bg_color_rgb_string():
img = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
labels = np.zeros((10, 10), dtype=np.int64)
labels[1:3, 1:3] = 1
labels[6:9, 6:9] = 2
img = cp.asarray(img)
labels = cp.asarray(labels)
output = label2rgb(labels, image=img, alpha=0.9, bg_label=0, bg_color='red')
assert output[0, 0, 0] > 0.9 # red channel
def test_avg_with_2d_image():
img = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
labels = np.zeros((10, 10), dtype=np.int64)
labels[1:3, 1:3] = 1
labels[6:9, 6:9] = 2
img = cp.asarray(img)
labels = cp.asarray(labels)
assert_no_warnings(label2rgb, labels, image=img, bg_label=0, kind='avg')
| 33.822222
| 80
| 0.579238
|
acfcb6e9a32e47bd6d4543bf2746e68857598624
| 2,334
|
py
|
Python
|
web/api/models/api_response_model.py
|
alexpotter1/vulndetect-ml
|
338fbf919b24520f9107a1604d1c8af48aadff76
|
[
"MIT"
] | 1
|
2020-02-25T01:53:23.000Z
|
2020-02-25T01:53:23.000Z
|
web/api/models/api_response_model.py
|
alexpotter1/vulndetect-ml
|
338fbf919b24520f9107a1604d1c8af48aadff76
|
[
"MIT"
] | null | null | null |
web/api/models/api_response_model.py
|
alexpotter1/vulndetect-ml
|
338fbf919b24520f9107a1604d1c8af48aadff76
|
[
"MIT"
] | 1
|
2020-10-24T15:30:38.000Z
|
2020-10-24T15:30:38.000Z
|
STATUS_CODES = {
200: '200 OK',
201: '201 Created',
400: '400 Bad Request',
404: '404 Not Found',
500: '500 Internal Server Error',
}
class APIResponse(object):
def __init__(self):
self._statusCode: str = None
self._message: str = None
self._isVulnerable: bool = None
self._vulnerabilityCategory: str = None
self._predictionConfidence: float = None
@property
def statusCode(self):
return self._statusCode
@statusCode.setter
def statusCode(self, value: int):
if value in STATUS_CODES:
self._statusCode = STATUS_CODES[value]
else:
self._statusCode = '501 Not Implemented'
def with_statusCode(self, value: int):
self.statusCode = value
return self
@property
def message(self):
return self._message
@message.setter
def message(self, value: str):
self._message = str(value)
def with_message(self, value: str):
self.message = value
return self
@property
def isVulnerable(self):
return self._isVulnerable
@isVulnerable.setter
def isVulnerable(self, value: bool):
self._isVulnerable = bool(value)
def with_isVulnerable(self, value: bool):
self.isVulnerable = value
return self
@property
def vulnerabilityCategory(self):
return self._vulnerabilityCategory
@vulnerabilityCategory.setter
def vulnerabilityCategory(self, value: str):
self._vulnerabilityCategory = str(value)
def with_vulnerabilityCategory(self, value: str):
self.vulnerabilityCategory = value
return self
@property
def predictionConfidence(self):
return self._predictionConfidence
@predictionConfidence.setter
def predictionConfidence(self, value: float):
self._predictionConfidence = float(value)
def with_predictionConfidence(self, value: float):
self.predictionConfidence = value
return self
def build(self):
return {
'status': self.statusCode,
'message': self.message,
'isVulnerable': self.isVulnerable,
'vulnerabilityCategory': self.vulnerabilityCategory,
'predictionConfidence': self.predictionConfidence
}
| 26.224719
| 64
| 0.639246
|
acfcb6fb0bdd6dc5bbb1a842c75ea481555b3427
| 5,430
|
py
|
Python
|
Lib/site-packages/hackedit/vendor/humanfriendly/sphinx.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
Lib/site-packages/hackedit/vendor/humanfriendly/sphinx.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/hackedit/vendor/humanfriendly/sphinx.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
# Human friendly input/output in Python.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: February 17, 2016
# URL: https://humanfriendly.readthedocs.org
"""
Customizations for and integration with the Sphinx_ documentation generator.
The :mod:`humanfriendly.sphinx` module uses the `Sphinx extension API`_ to
customize the process of generating Sphinx based Python documentation.
The most relevant functions to take a look at are :func:`setup()`,
:func:`enable_special_methods()` and :func:`enable_usage_formatting()`.
.. _Sphinx: http://www.sphinx-doc.org/
.. _Sphinx extension API: http://sphinx-doc.org/extdev/appapi.html
"""
# Standard library modules.
import logging
import types
# Modules included in our package.
from humanfriendly.usage import USAGE_MARKER, render_usage
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
def setup(app):
"""
Enable all of the provided Sphinx_ customizations.
:param app: The Sphinx application object.
The :func:`setup()` function makes it easy to enable all of the Sphinx
customizations provided by the :mod:`humanfriendly.sphinx` module with the
least amount of code. All you need to do is to add the module name to the
``extensions`` variable in your ``conf.py`` file:
.. code-block:: python
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
]
When Sphinx sees the :mod:`humanfriendly.sphinx` name it will import the
module and call its :func:`setup()` function.
At the time of writing this just calls :func:`enable_special_methods()` and
:func:`enable_usage_formatting()`, but of course more functionality may be
added at a later stage. If you don't like that idea you may be better of
calling the individual functions from your own ``setup()`` function.
"""
enable_special_methods(app)
enable_usage_formatting(app)
def enable_special_methods(app):
"""
Enable documenting "special methods" using the autodoc_ extension.
:param app: The Sphinx application object.
This function connects the :func:`special_methods_callback()` function to
``autodoc-skip-member`` events.
.. _autodoc: http://www.sphinx-doc.org/en/stable/ext/autodoc.html
"""
app.connect('autodoc-skip-member', special_methods_callback)
def special_methods_callback(app, what, name, obj, skip, options):
"""
Enable documenting "special methods" using the autodoc_ extension.
Refer to :func:`enable_special_methods()` to enable the use of this
function (you probably don't want to call
:func:`special_methods_callback()` directly).
This function implements a callback for ``autodoc-skip-member`` events to
include documented "special methods" (method names with two leading and two
trailing underscores) in your documentation. The result is similar to the
use of the ``special-members`` flag with one big difference: Special
methods are included but other types of members are ignored. This means
that attributes like ``__weakref__`` will always be ignored (this was my
main annoyance with the ``special-members`` flag).
The parameters expected by this function are those defined for Sphinx event
callback functions (i.e. I'm not going to document them here :-).
"""
if getattr(obj, '__doc__', None) and isinstance(obj, (types.FunctionType, types.MethodType)):
return False
else:
return skip
def enable_usage_formatting(app):
"""
Reformat human friendly usage messages to reStructuredText_.
:param app: The Sphinx application object (as given to ``setup()``).
This function connects the :func:`usage_message_callback()` function to
``autodoc-process-docstring`` events.
.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
"""
app.connect('autodoc-process-docstring', usage_message_callback)
def usage_message_callback(app, what, name, obj, options, lines):
"""
Reformat human friendly usage messages to reStructuredText_.
Refer to :func:`enable_usage_formatting()` to enable the use of this
function (you probably don't want to call :func:`usage_message_callback()`
directly).
This function implements a callback for ``autodoc-process-docstring`` that
reformats module docstrings using :func:`.render_usage()` so that Sphinx
doesn't mangle usage messages that were written to be human readable
instead of machine readable. Only module docstrings whose first line starts
with :data:`.USAGE_MARKER` are reformatted.
The parameters expected by this function are those defined for Sphinx event
callback functions (i.e. I'm not going to document them here :-).
"""
# Make sure we only modify the docstrings of modules.
if isinstance(obj, types.ModuleType) and lines:
# Make sure we only modify docstrings containing a usage message.
if lines[0].startswith(USAGE_MARKER):
# Convert the usage message to reStructuredText.
text = render_usage('\n'.join(lines))
# Clear the existing line buffer.
while lines:
lines.pop()
# Fill up the buffer with our modified docstring.
lines.extend(text.splitlines())
| 37.708333
| 97
| 0.712707
|
acfcb803241a02ecb8322eaf759b75c332d96ece
| 26,289
|
py
|
Python
|
mitmproxy/proxy/layers/http/_http2.py
|
nneonneo/mitmproxy
|
a201adebe174897c1c10e8d78028d44ea0b6fc92
|
[
"MIT"
] | null | null | null |
mitmproxy/proxy/layers/http/_http2.py
|
nneonneo/mitmproxy
|
a201adebe174897c1c10e8d78028d44ea0b6fc92
|
[
"MIT"
] | null | null | null |
mitmproxy/proxy/layers/http/_http2.py
|
nneonneo/mitmproxy
|
a201adebe174897c1c10e8d78028d44ea0b6fc92
|
[
"MIT"
] | null | null | null |
import collections
import time
from collections.abc import Sequence
from enum import Enum
from typing import ClassVar, Optional, Union
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
import h2.settings
import h2.stream
import h2.utilities
from mitmproxy import http, version
from mitmproxy.connection import Connection
from mitmproxy.net.http import status_codes, url
from mitmproxy.utils import human
from . import (
RequestData,
RequestEndOfMessage,
RequestHeaders,
RequestProtocolError,
ResponseData,
ResponseEndOfMessage,
ResponseHeaders,
RequestTrailers,
ResponseTrailers,
ResponseProtocolError,
)
from ._base import HttpConnection, HttpEvent, ReceiveHttp, format_error
from ._http_h2 import BufferedH2Connection, H2ConnectionLogger
from ...commands import CloseConnection, Log, SendData, RequestWakeup
from ...context import Context
from ...events import ConnectionClosed, DataReceived, Event, Start, Wakeup
from ...layer import CommandGenerator
from ...utils import expect
class StreamState(Enum):
EXPECTING_HEADERS = 1
HEADERS_RECEIVED = 2
CATCH_HYPER_H2_ERRORS = (ValueError, IndexError)
class Http2Connection(HttpConnection):
h2_conf: ClassVar[h2.config.H2Configuration]
h2_conf_defaults = dict(
header_encoding=False,
validate_outbound_headers=False,
# validate_inbound_headers is controlled by the validate_inbound_headers option.
normalize_inbound_headers=False, # changing this to True is required to pass h2spec
normalize_outbound_headers=False,
)
h2_conn: BufferedH2Connection
streams: dict[int, StreamState]
"""keep track of all active stream ids to send protocol errors on teardown"""
ReceiveProtocolError: type[Union[RequestProtocolError, ResponseProtocolError]]
ReceiveData: type[Union[RequestData, ResponseData]]
ReceiveTrailers: type[Union[RequestTrailers, ResponseTrailers]]
ReceiveEndOfMessage: type[Union[RequestEndOfMessage, ResponseEndOfMessage]]
def __init__(self, context: Context, conn: Connection):
super().__init__(context, conn)
if self.debug:
self.h2_conf.logger = H2ConnectionLogger(
f"{human.format_address(self.context.client.peername)}: "
f"{self.__class__.__name__}"
)
self.h2_conf.validate_inbound_headers = (
self.context.options.validate_inbound_headers
)
self.h2_conn = BufferedH2Connection(self.h2_conf)
self.streams = {}
def is_closed(self, stream_id: int) -> bool:
"""Check if a non-idle stream is closed"""
stream = self.h2_conn.streams.get(stream_id, None)
if (
stream is not None
and stream.state_machine.state is not h2.stream.StreamState.CLOSED
and self.h2_conn.state_machine.state
is not h2.connection.ConnectionState.CLOSED
):
return False
else:
return True
def is_open_for_us(self, stream_id: int) -> bool:
"""Check if we can write to a non-idle stream."""
stream = self.h2_conn.streams.get(stream_id, None)
if (
stream is not None
and stream.state_machine.state
is not h2.stream.StreamState.HALF_CLOSED_LOCAL
and stream.state_machine.state is not h2.stream.StreamState.CLOSED
and self.h2_conn.state_machine.state
is not h2.connection.ConnectionState.CLOSED
):
return True
else:
return False
def _handle_event(self, event: Event) -> CommandGenerator[None]:
if isinstance(event, Start):
self.h2_conn.initiate_connection()
yield SendData(self.conn, self.h2_conn.data_to_send())
elif isinstance(event, HttpEvent):
if isinstance(event, (RequestData, ResponseData)):
if self.is_open_for_us(event.stream_id):
self.h2_conn.send_data(event.stream_id, event.data)
elif isinstance(event, (RequestTrailers, ResponseTrailers)):
if self.is_open_for_us(event.stream_id):
trailers = [*event.trailers.fields]
self.h2_conn.send_headers(
event.stream_id, trailers, end_stream=True
)
elif isinstance(event, (RequestEndOfMessage, ResponseEndOfMessage)):
if self.is_open_for_us(event.stream_id):
self.h2_conn.end_stream(event.stream_id)
elif isinstance(event, (RequestProtocolError, ResponseProtocolError)):
if not self.is_closed(event.stream_id):
code = {
status_codes.CLIENT_CLOSED_REQUEST: h2.errors.ErrorCodes.CANCEL,
}.get(event.code, h2.errors.ErrorCodes.INTERNAL_ERROR)
stream: h2.stream.H2Stream = self.h2_conn.streams[event.stream_id]
send_error_message = (
isinstance(event, ResponseProtocolError)
and self.is_open_for_us(event.stream_id)
and not stream.state_machine.headers_sent
and event.code != status_codes.NO_RESPONSE
)
if send_error_message:
self.h2_conn.send_headers(
event.stream_id,
[
(b":status", b"%d" % event.code),
(b"server", version.MITMPROXY.encode()),
(b"content-type", b"text/html"),
],
)
self.h2_conn.send_data(
event.stream_id,
format_error(event.code, event.message),
end_stream=True,
)
else:
self.h2_conn.reset_stream(event.stream_id, code)
else:
raise AssertionError(f"Unexpected event: {event}")
data_to_send = self.h2_conn.data_to_send()
if data_to_send:
yield SendData(self.conn, data_to_send)
elif isinstance(event, DataReceived):
try:
try:
events = self.h2_conn.receive_data(event.data)
except CATCH_HYPER_H2_ERRORS as e: # pragma: no cover
# this should never raise a ValueError, but we triggered one while fuzzing:
# https://github.com/python-hyper/hyper-h2/issues/1231
# this stays here as defense-in-depth.
raise h2.exceptions.ProtocolError(
f"uncaught hyper-h2 error: {e}"
) from e
except h2.exceptions.ProtocolError as e:
events = [e]
for h2_event in events:
if self.debug:
yield Log(f"{self.debug}[h2] {h2_event}", "debug")
if (yield from self.handle_h2_event(h2_event)):
if self.debug:
yield Log(f"{self.debug}[h2] done", "debug")
return
data_to_send = self.h2_conn.data_to_send()
if data_to_send:
yield SendData(self.conn, data_to_send)
elif isinstance(event, ConnectionClosed):
yield from self.close_connection("peer closed connection")
else:
raise AssertionError(f"Unexpected event: {event!r}")
def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]:
"""returns true if further processing should be stopped."""
if isinstance(event, h2.events.DataReceived):
state = self.streams.get(event.stream_id, None)
if state is StreamState.HEADERS_RECEIVED:
yield ReceiveHttp(self.ReceiveData(event.stream_id, event.data))
elif state is StreamState.EXPECTING_HEADERS:
yield from self.protocol_error(
f"Received HTTP/2 data frame, expected headers."
)
return True
self.h2_conn.acknowledge_received_data(
event.flow_controlled_length, event.stream_id
)
elif isinstance(event, h2.events.TrailersReceived):
trailers = http.Headers(event.headers)
yield ReceiveHttp(self.ReceiveTrailers(event.stream_id, trailers))
elif isinstance(event, h2.events.StreamEnded):
state = self.streams.get(event.stream_id, None)
if state is StreamState.HEADERS_RECEIVED:
yield ReceiveHttp(self.ReceiveEndOfMessage(event.stream_id))
elif state is StreamState.EXPECTING_HEADERS:
raise AssertionError("unreachable")
if self.is_closed(event.stream_id):
self.streams.pop(event.stream_id, None)
elif isinstance(event, h2.events.StreamReset):
if event.stream_id in self.streams:
try:
err_str = h2.errors.ErrorCodes(event.error_code).name
except ValueError:
err_str = str(event.error_code)
err_code = {
h2.errors.ErrorCodes.CANCEL: status_codes.CLIENT_CLOSED_REQUEST,
}.get(event.error_code, self.ReceiveProtocolError.code)
yield ReceiveHttp(
self.ReceiveProtocolError(
event.stream_id,
f"stream reset by client ({err_str})",
code=err_code,
)
)
self.streams.pop(event.stream_id)
else:
pass # We don't track priority frames which could be followed by a stream reset here.
elif isinstance(event, h2.exceptions.ProtocolError):
yield from self.protocol_error(f"HTTP/2 protocol error: {event}")
return True
elif isinstance(event, h2.events.ConnectionTerminated):
yield from self.close_connection(f"HTTP/2 connection closed: {event!r}")
return True
# The implementation above isn't really ideal, we should probably only terminate streams > last_stream_id?
# We currently lack a mechanism to signal that connections are still active but cannot be reused.
# for stream_id in self.streams:
# if stream_id > event.last_stream_id:
# yield ReceiveHttp(self.ReceiveProtocolError(stream_id, f"HTTP/2 connection closed: {event!r}"))
# self.streams.pop(stream_id)
elif isinstance(event, h2.events.RemoteSettingsChanged):
pass
elif isinstance(event, h2.events.SettingsAcknowledged):
pass
elif isinstance(event, h2.events.PriorityUpdated):
pass
elif isinstance(event, h2.events.PingReceived):
pass
elif isinstance(event, h2.events.PingAckReceived):
pass
elif isinstance(event, h2.events.PushedStreamReceived):
yield Log(
"Received HTTP/2 push promise, even though we signalled no support.",
"error",
)
elif isinstance(event, h2.events.UnknownFrameReceived):
# https://http2.github.io/http2-spec/#rfc.section.4.1
# Implementations MUST ignore and discard any frame that has a type that is unknown.
yield Log(f"Ignoring unknown HTTP/2 frame type: {event.frame.type}")
else:
raise AssertionError(f"Unexpected event: {event!r}")
return False
def protocol_error(
self,
message: str,
error_code: int = h2.errors.ErrorCodes.PROTOCOL_ERROR,
) -> CommandGenerator[None]:
yield Log(f"{human.format_address(self.conn.peername)}: {message}")
self.h2_conn.close_connection(error_code, message.encode())
yield SendData(self.conn, self.h2_conn.data_to_send())
yield from self.close_connection(message)
def close_connection(self, msg: str) -> CommandGenerator[None]:
yield CloseConnection(self.conn)
for stream_id in self.streams:
yield ReceiveHttp(self.ReceiveProtocolError(stream_id, msg))
self.streams.clear()
self._handle_event = self.done # type: ignore
@expect(DataReceived, HttpEvent, ConnectionClosed, Wakeup)
def done(self, _) -> CommandGenerator[None]:
yield from ()
def normalize_h1_headers(
headers: list[tuple[bytes, bytes]], is_client: bool
) -> list[tuple[bytes, bytes]]:
# HTTP/1 servers commonly send capitalized headers (Content-Length vs content-length),
# which isn't valid HTTP/2. As such we normalize.
headers = h2.utilities.normalize_outbound_headers(
headers,
h2.utilities.HeaderValidationFlags(is_client, False, not is_client, False),
)
# make sure that this is not just an iterator but an iterable,
# otherwise hyper-h2 will silently drop headers.
headers = list(headers)
return headers
def normalize_h2_headers(headers: list[tuple[bytes, bytes]]) -> CommandGenerator[None]:
for i in range(len(headers)):
if not headers[i][0].islower():
yield Log(
f"Lowercased {repr(headers[i][0]).lstrip('b')} header as uppercase is not allowed with HTTP/2."
)
headers[i] = (headers[i][0].lower(), headers[i][1])
class Http2Server(Http2Connection):
h2_conf = h2.config.H2Configuration(
**Http2Connection.h2_conf_defaults,
client_side=False,
)
ReceiveProtocolError = RequestProtocolError
ReceiveData = RequestData
ReceiveTrailers = RequestTrailers
ReceiveEndOfMessage = RequestEndOfMessage
def __init__(self, context: Context):
super().__init__(context, context.client)
def _handle_event(self, event: Event) -> CommandGenerator[None]:
if isinstance(event, ResponseHeaders):
if self.is_open_for_us(event.stream_id):
headers = [
(b":status", b"%d" % event.response.status_code),
*event.response.headers.fields,
]
if event.response.is_http2:
if self.context.options.normalize_outbound_headers:
yield from normalize_h2_headers(headers)
else:
headers = normalize_h1_headers(headers, False)
self.h2_conn.send_headers(
event.stream_id,
headers,
end_stream=event.end_stream,
)
yield SendData(self.conn, self.h2_conn.data_to_send())
else:
yield from super()._handle_event(event)
def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]:
if isinstance(event, h2.events.RequestReceived):
try:
(
host,
port,
method,
scheme,
authority,
path,
headers,
) = parse_h2_request_headers(event.headers)
except ValueError as e:
yield from self.protocol_error(f"Invalid HTTP/2 request headers: {e}")
return True
request = http.Request(
host=host,
port=port,
method=method,
scheme=scheme,
authority=authority,
path=path,
http_version=b"HTTP/2.0",
headers=headers,
content=None,
trailers=None,
timestamp_start=time.time(),
timestamp_end=None,
)
self.streams[event.stream_id] = StreamState.HEADERS_RECEIVED
yield ReceiveHttp(
RequestHeaders(
event.stream_id, request, end_stream=bool(event.stream_ended)
)
)
return False
else:
return (yield from super().handle_h2_event(event))
class Http2Client(Http2Connection):
h2_conf = h2.config.H2Configuration(
**Http2Connection.h2_conf_defaults,
client_side=True,
)
ReceiveProtocolError = ResponseProtocolError
ReceiveData = ResponseData
ReceiveTrailers = ResponseTrailers
ReceiveEndOfMessage = ResponseEndOfMessage
our_stream_id: dict[int, int]
their_stream_id: dict[int, int]
stream_queue: collections.defaultdict[int, list[Event]]
"""Queue of streams that we haven't sent yet because we have reached MAX_CONCURRENT_STREAMS"""
provisional_max_concurrency: Optional[int] = 10
"""A provisional currency limit before we get the server's first settings frame."""
last_activity: float
"""Timestamp of when we've last seen network activity on this connection."""
def __init__(self, context: Context):
super().__init__(context, context.server)
# Disable HTTP/2 push for now to keep things simple.
# don't send here, that is done as part of initiate_connection().
self.h2_conn.local_settings.enable_push = 0
# hyper-h2 pitfall: we need to acknowledge here, otherwise its sends out the old settings.
self.h2_conn.local_settings.acknowledge()
self.our_stream_id = {}
self.their_stream_id = {}
self.stream_queue = collections.defaultdict(list)
def _handle_event(self, event: Event) -> CommandGenerator[None]:
# We can't reuse stream ids from the client because they may arrived reordered here
# and HTTP/2 forbids opening a stream on a lower id than what was previously sent (see test_stream_concurrency).
# To mitigate this, we transparently map the outside's stream id to our stream id.
if isinstance(event, HttpEvent):
ours = self.our_stream_id.get(event.stream_id, None)
if ours is None:
no_free_streams = self.h2_conn.open_outbound_streams >= (
self.provisional_max_concurrency
or self.h2_conn.remote_settings.max_concurrent_streams
)
if no_free_streams:
self.stream_queue[event.stream_id].append(event)
return
ours = self.h2_conn.get_next_available_stream_id()
self.our_stream_id[event.stream_id] = ours
self.their_stream_id[ours] = event.stream_id
event.stream_id = ours
for cmd in self._handle_event2(event):
if isinstance(cmd, ReceiveHttp):
cmd.event.stream_id = self.their_stream_id[cmd.event.stream_id]
yield cmd
can_resume_queue = self.stream_queue and self.h2_conn.open_outbound_streams < (
self.provisional_max_concurrency
or self.h2_conn.remote_settings.max_concurrent_streams
)
if can_resume_queue:
# popitem would be LIFO, but we want FIFO.
events = self.stream_queue.pop(next(iter(self.stream_queue)))
for event in events:
yield from self._handle_event(event)
def _handle_event2(self, event: Event) -> CommandGenerator[None]:
if isinstance(event, Wakeup):
send_ping_now = (
# add one second to avoid unnecessary roundtrip, we don't need to be super correct here.
time.time() - self.last_activity + 1
> self.context.options.http2_ping_keepalive
)
if send_ping_now:
# PING frames MUST contain 8 octets of opaque data in the payload.
# A sender can include any value it chooses and use those octets in any fashion.
self.last_activity = time.time()
self.h2_conn.ping(b"0" * 8)
data = self.h2_conn.data_to_send()
if data is not None:
yield Log(
f"Send HTTP/2 keep-alive PING to {human.format_address(self.conn.peername)}"
)
yield SendData(self.conn, data)
time_until_next_ping = self.context.options.http2_ping_keepalive - (
time.time() - self.last_activity
)
yield RequestWakeup(time_until_next_ping)
return
self.last_activity = time.time()
if isinstance(event, Start):
if self.context.options.http2_ping_keepalive > 0:
yield RequestWakeup(self.context.options.http2_ping_keepalive)
yield from super()._handle_event(event)
elif isinstance(event, RequestHeaders):
pseudo_headers = [
(b":method", event.request.data.method),
(b":scheme", event.request.data.scheme),
(b":path", event.request.data.path),
]
if event.request.authority:
pseudo_headers.append((b":authority", event.request.data.authority))
if event.request.is_http2:
hdrs = list(event.request.headers.fields)
if self.context.options.normalize_outbound_headers:
yield from normalize_h2_headers(hdrs)
else:
headers = event.request.headers
if not event.request.authority and "host" in headers:
headers = headers.copy()
pseudo_headers.append((b":authority", headers.pop(b"host")))
hdrs = normalize_h1_headers(list(headers.fields), True)
self.h2_conn.send_headers(
event.stream_id,
pseudo_headers + hdrs,
end_stream=event.end_stream,
)
self.streams[event.stream_id] = StreamState.EXPECTING_HEADERS
yield SendData(self.conn, self.h2_conn.data_to_send())
else:
yield from super()._handle_event(event)
def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]:
if isinstance(event, h2.events.ResponseReceived):
if (
self.streams.get(event.stream_id, None)
is not StreamState.EXPECTING_HEADERS
):
yield from self.protocol_error(f"Received unexpected HTTP/2 response.")
return True
try:
status_code, headers = parse_h2_response_headers(event.headers)
except ValueError as e:
yield from self.protocol_error(f"Invalid HTTP/2 response headers: {e}")
return True
response = http.Response(
http_version=b"HTTP/2.0",
status_code=status_code,
reason=b"",
headers=headers,
content=None,
trailers=None,
timestamp_start=time.time(),
timestamp_end=None,
)
self.streams[event.stream_id] = StreamState.HEADERS_RECEIVED
yield ReceiveHttp(
ResponseHeaders(event.stream_id, response, bool(event.stream_ended))
)
return False
elif isinstance(event, h2.events.RequestReceived):
yield from self.protocol_error(
f"HTTP/2 protocol error: received request from server"
)
return True
elif isinstance(event, h2.events.RemoteSettingsChanged):
# We have received at least one settings from now,
# which means we can rely on the max concurrency in remote_settings
self.provisional_max_concurrency = None
return (yield from super().handle_h2_event(event))
else:
return (yield from super().handle_h2_event(event))
def split_pseudo_headers(
h2_headers: Sequence[tuple[bytes, bytes]]
) -> tuple[dict[bytes, bytes], http.Headers]:
pseudo_headers: dict[bytes, bytes] = {}
i = 0
for (header, value) in h2_headers:
if header.startswith(b":"):
if header in pseudo_headers:
raise ValueError(f"Duplicate HTTP/2 pseudo header: {header!r}")
pseudo_headers[header] = value
i += 1
else:
# Pseudo-headers must be at the start, we are done here.
break
headers = http.Headers(h2_headers[i:])
return pseudo_headers, headers
def parse_h2_request_headers(
h2_headers: Sequence[tuple[bytes, bytes]]
) -> tuple[str, int, bytes, bytes, bytes, bytes, http.Headers]:
"""Split HTTP/2 pseudo-headers from the actual headers and parse them."""
pseudo_headers, headers = split_pseudo_headers(h2_headers)
try:
method: bytes = pseudo_headers.pop(b":method")
scheme: bytes = pseudo_headers.pop(
b":scheme"
) # this raises for HTTP/2 CONNECT requests
path: bytes = pseudo_headers.pop(b":path")
authority: bytes = pseudo_headers.pop(b":authority", b"")
except KeyError as e:
raise ValueError(f"Required pseudo header is missing: {e}")
if pseudo_headers:
raise ValueError(f"Unknown pseudo headers: {pseudo_headers}")
if authority:
host, port = url.parse_authority(authority, check=True)
if port is None:
port = 80 if scheme == b"http" else 443
else:
host = ""
port = 0
return host, port, method, scheme, authority, path, headers
def parse_h2_response_headers(
h2_headers: Sequence[tuple[bytes, bytes]]
) -> tuple[int, http.Headers]:
"""Split HTTP/2 pseudo-headers from the actual headers and parse them."""
pseudo_headers, headers = split_pseudo_headers(h2_headers)
try:
status_code: int = int(pseudo_headers.pop(b":status"))
except KeyError as e:
raise ValueError(f"Required pseudo header is missing: {e}")
if pseudo_headers:
raise ValueError(f"Unknown pseudo headers: {pseudo_headers}")
return status_code, headers
__all__ = [
"Http2Client",
"Http2Server",
]
| 41.662441
| 120
| 0.603066
|
acfcb9ac30d605f736c582456539db0a6ce553db
| 107
|
py
|
Python
|
tests/conftest.py
|
valentjedi/hackme
|
a61b02ea6a65cdcf907ec02a56541ea6851b04ea
|
[
"MIT"
] | 5
|
2018-03-02T06:59:02.000Z
|
2021-02-02T13:07:39.000Z
|
tests/conftest.py
|
valignatev/hackme
|
a61b02ea6a65cdcf907ec02a56541ea6851b04ea
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
valignatev/hackme
|
a61b02ea6a65cdcf907ec02a56541ea6851b04ea
|
[
"MIT"
] | null | null | null |
import pytest
from shoppy.app import app as shoppy_app
@pytest.fixture
def app():
return shoppy_app
| 11.888889
| 40
| 0.757009
|
acfcbc0cc09f69e9dcedd068b6d964ca0efd1cd3
| 3,728
|
py
|
Python
|
scripts/bioconductor/rootNodes.py
|
corneliusroemer/bioconda-recipes
|
e1eced9063e15f6a97ab2b8e42cf3e38af4c93ba
|
[
"MIT"
] | null | null | null |
scripts/bioconductor/rootNodes.py
|
corneliusroemer/bioconda-recipes
|
e1eced9063e15f6a97ab2b8e42cf3e38af4c93ba
|
[
"MIT"
] | null | null | null |
scripts/bioconductor/rootNodes.py
|
corneliusroemer/bioconda-recipes
|
e1eced9063e15f6a97ab2b8e42cf3e38af4c93ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import networkx as nx
from datetime import datetime, timedelta
import requests
from bioconda_utils import utils, graph
def getRepoData(ts):
res = []
for subdir in ["linux-64", "noarch", "osx-64"]:
r = requests.get(f"https://conda.anaconda.org/bioconda/{subdir}/repodata.json")
js = r.json()['packages']
s = set()
for k, v in r.json()['packages'].items():
if (not k.startswith('bioconductor-')) and (not k.startswith('r-')):
continue
if 'timestamp' in v:
if float(v['timestamp'])/1000 >= ts:
s.add(v['name'])
res.append(s)
return res
def printRootNodes(config_path, recipe_folder, sinceNDays, missing, rootNodes):
config = utils.load_config(config_path)
blacklist = utils.get_blacklist(config, recipe_folder)
recipes = utils.get_recipes(recipe_folder)
if sinceNDays:
timeStamp = datetime.timestamp(datetime.now() - timedelta(sinceNDays))
linux, noarch, osx = getRepoData(timeStamp)
arch = linux.intersection(osx)
ready = noarch.union(arch)
print("{} built in noarch and both archs combined: {} noarch, {} linux-64, {} osx-64".format(len(ready), len(noarch), len(linux), len(osx)))
dag, name2recipes = graph.build(recipes, config=config_path, blacklist=blacklist)
if not rootNodes:
root_nodes = sorted([(len(nx.algorithms.descendants(dag, k)), k) for k, v in dag.in_degree().items() if (k.startswith('bioconductor') or k.startswith('r-'))])
else:
root_nodes = sorted([(len(nx.algorithms.descendants(dag, k)), k) for k, v in dag.in_degree().items() if v == 0 and (k.startswith('bioconductor') or k.startswith('r-'))])
print("Package\tNumber of dependant packages")
for n in root_nodes:
# blacklisted packages also show up as root nodes with out degree 0
if n[1] in blacklist:
continue
if sinceNDays:
if n[1] in ready:
if not missing:
print("recipes/{}\t{}".format(n[1], n[0]))
elif missing:
print("recipes/{}\t{}".format(n[1], n[0]))
else:
print("recipes/{}\t{}".format(n[1], n[0]))
def main():
parser = argparse.ArgumentParser(description="""Print a list of (missing) Bioconductor and R packages in the DAG.
The children for each are also listed. Optionally, all root nodes (or only those
that haven't yet been built) can be printed. This is primarily useful during
Bioconductor builds in the bulk branch as build root nodes can be blacklisted
to spread the load more evenly over worker nodes. Note that the output is sorted
so the root nodes with the most children are at the end.""")
parser.add_argument("config_path", default="config.yml", help="Location of config.yml (default: %(default)s", nargs='?')
parser.add_argument("recipe_folder", default="recipes", help="Location of the recipes folder (default: %(default)s)", nargs='?')
parser.add_argument("--sinceNDays", metavar='n', type=int, help="If specified, only root nodes with packages that have been uploaded (or not) to noarch or (linux64 & osx64) in the past N days, where N is the value given.")
parser.add_argument('--rootNodes', action='store_true', help='Only print root nodes (i.e., those with no non-blacklisted Bioconda dependencies)')
parser.add_argument('--missing', action='store_true', help='Only print root nodes that are missing in at least one subdirectory')
args = parser.parse_args()
printRootNodes(args.config_path, args.recipe_folder, args.sinceNDays, args.missing, args.rootNodes)
if __name__ == '__main__':
main()
| 47.794872
| 226
| 0.661212
|
acfcbc1226064977e4bd08981608d0db2918cc5a
| 4,059
|
py
|
Python
|
calendar_util/util.py
|
christiankuhl/todolist
|
a10316f8b24c33da7d0b386a4fda5b4a9c128cae
|
[
"MIT"
] | null | null | null |
calendar_util/util.py
|
christiankuhl/todolist
|
a10316f8b24c33da7d0b386a4fda5b4a9c128cae
|
[
"MIT"
] | 5
|
2020-06-05T17:35:16.000Z
|
2021-09-07T10:20:36.000Z
|
calendar_util/util.py
|
christiankuhl/todolist
|
a10316f8b24c33da7d0b386a4fda5b4a9c128cae
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError
from calendar_util.constants import *
from calendar_util.models import Holidays
from collections import defaultdict
import datetime
from math import ceil
import calendar as cal
cal.setfirstweekday(cal.MONDAY)
def next_friday(date=None):
if not date:
date = datetime.datetime.now().date()
weekday = cal.weekday(year=date.year, month=date.month, day=date.day)
return (date + datetime.timedelta(days=(4-weekday) % 7))
def last_monday(date=None):
if not date:
date = datetime.datetime.now().date()
weekday = cal.weekday(year=date.year, month=date.month, day=date.day)
return (date - datetime.timedelta(days=weekday % 7))
def eod_today():
return datetime.datetime.now().replace(hour=17, minute=0, second=0, tzinfo=TIMEZONE)
def next_workday(date=None):
if not date:
date = eod_today()
weekday = cal.weekday(year=date.year, month=date.month, day=date.day)
day = date
while not (day > date and is_workday(day)):
day = day + datetime.timedelta(days=1)
return day
def is_workday(day):
weekday = cal.weekday(year=day.year, month=day.month, day=day.day)
return (weekday in range(5)) and day.date() not in Holidays()
def to_datetime(date_string, format=DATETIME_FORMAT):
date = datetime.datetime.strptime(date_string, format).replace(tzinfo=TIMEZONE)
if format in DATE_INPUT_FORMATS:
date = date.replace(hour=17, minute=0, second=0)
return date
def to_string(datetime_obj):
return datetime_obj.strftime(DATETIME_FORMAT)
def parse_duedate(date_string):
date = None
for format in DATETIME_INPUT_FORMATS:
try:
date = to_datetime(date_string)
break
except:
continue
if date:
return date
else:
raise ValidationError('Enter a valid date/time', code='invalid', params={})
def next_ultimo(frequency, today=None):
if not today:
today = eod_today()
if frequency == DAILY:
return next_workday(today)
elif frequency == MONTHLY:
return today.replace(day=cal.monthrange(today.year, today.month)[1])
elif frequency == QUARTERLY:
month = 3 * ceil(today.month/3)
day = cal.monthrange(today.year, month)[1]
return today.replace(day=day, month=month)
elif frequency == YEARLY:
return today.replace(day=31, month=12)
def specstr(frequency, date):
if frequency == DAILY:
return date.strftime("%d.%m.")
elif frequency == MONTHLY:
return date.strftime("%b")
elif frequency == QUARTERLY:
return "Q{}".format(ceil(date.month/3))
elif frequency == YEARLY:
return date.strftime("%Y")
def shift_date(date, shift, unit=DAILY):
if unit == DAILY:
return date + datetime.timedelta(days=shift)
elif unit == MONTHLY:
month = date.month - 1 + shift
year = date.year + month // 12
month = month % 12 + 1
day = min(date.day, cal.monthrange(year, month)[1])
return date.replace(year=year, month=month, day=day)
elif unit == YEARLY:
return date.replace(year=date.year + shift)
def to_workday(date):
if is_workday(date):
return date
else:
return next_workday(date)
def workday_offset(date, offset):
if offset == 0:
return to_workday(date)
sign = abs(offset) // offset
steps = 0
next_date = date
while steps < abs(offset):
next_date = shift_date(next_date, sign)
if is_workday(next_date):
steps += 1
return next_date
def weeknumbers(year):
result = defaultdict(list)
day = datetime.datetime(year=year, month=1, day=1)
while day.year == year:
week = day.isocalendar()[1]
if day.month == 12 and week == 1:
break
result[week].append(day.date())
day += datetime.timedelta(days=1)
return result
| 32.472
| 89
| 0.630944
|
acfcbc891a5e8dae71016722a43772e24295dd1c
| 25,037
|
py
|
Python
|
venv/Lib/site-packages/aniso8601/tests/test_duration.py
|
GabrielSilva2y3d/dev-api
|
39f67521e0509dd073a6e051c06aa533c821d681
|
[
"MIT"
] | 9
|
2019-05-29T23:50:28.000Z
|
2021-01-29T20:51:05.000Z
|
venv/Lib/site-packages/aniso8601/tests/test_duration.py
|
GabrielSilva2y3d/dev-api
|
39f67521e0509dd073a6e051c06aa533c821d681
|
[
"MIT"
] | 1
|
2021-06-02T03:57:18.000Z
|
2021-06-02T03:57:18.000Z
|
venv/Lib/site-packages/aniso8601/tests/test_duration.py
|
GabrielSilva2y3d/dev-api
|
39f67521e0509dd073a6e051c06aa533c821d681
|
[
"MIT"
] | 3
|
2020-05-25T02:38:08.000Z
|
2021-01-20T06:23:06.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError, NegativeDurationError
from aniso8601.duration import (parse_duration, _parse_duration_prescribed,
_parse_duration_combined,
_parse_duration_prescribed_notime,
_parse_duration_prescribed_time,
_parse_duration_element,
_has_any_component, _component_order_correct)
from aniso8601.tests.compat import mock
class TestDurationParserFunctions(unittest.TestCase):
def test_parse_duration(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT0.0000001S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': None, 'TnM': None,
'TnS': '0.0000001'}),
('PT2.0000048S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': None, 'TnM': None,
'TnS': '2.0000048'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P0003-06-04T12:30:05', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05'}),
('P0003-06-04T12:30:05.5', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05.5'}),
('P0001-02-03T14:43:59.9999997', {'PnY': '0001',
'PnM': '02',
'PnD': '03',
'TnH': '14',
'TnM': '43',
'TnS':
'59.9999997'}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.duration.PythonTimeBuilder,
'build_duration') as mockBuildDuration:
mockBuildDuration.return_value = testtuple[1]
result = parse_duration(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildDuration.assert_called_once_with(**testtuple[1])
def test_parse_duration_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'PnY': '1', 'PnM': '2', 'PnD': '3',
'TnH': '4', 'TnM': '54', 'TnS': '6'}
mockBuilder.build_duration.return_value = expectedargs
result = parse_duration('P1Y2M3DT4H54M6S', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_duration.assert_called_once_with(**expectedargs)
def test_parse_duration_badtype(self):
testtuples = (None, 1, False, 1.234)
for testtuple in testtuples:
with self.assertRaises(ValueError):
parse_duration(testtuple, builder=None)
def test_parse_duration_nop(self):
with self.assertRaises(ISOFormatError):
#Duration must start with a P
parse_duration('1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_weekcombination(self):
with self.assertRaises(ISOFormatError):
#Week designator cannot be combined with other time designators
#https://bitbucket.org/nielsenb/aniso8601/issues/2/week-designators-should-not-be-combinable
parse_duration('P1Y2W', builder=None)
def test_parse_duration_negative(self):
with self.assertRaises(NegativeDurationError):
parse_duration('P-1Y', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-2M', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-3D', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-T4H', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-T54M', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-T6S', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-7W', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
parse_duration('P1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1D1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1H1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('1Y2M3D1SPT1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1Y2M3D2MT1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P2M3D1ST1Y1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1Y2M2MT3D1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1D1Y1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('PT1S1H', builder=None)
def test_parse_duration_badstr(self):
testtuples = ('bad', '')
for testtuple in testtuples:
with self.assertRaises(ValueError):
parse_duration(testtuple, builder=None)
def test_parse_duration_prescribed(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_prescribed(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_prescribed_negative(self):
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-T1H', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-T2M', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-T3S', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-4W', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_prescribed_multiplefractions(self):
with self.assertRaises(ISOFormatError):
#Multiple fractions are not allowed
_parse_duration_prescribed('P1Y2M3DT4H5.1234M6.1234S', None)
def test_parse_duration_prescribed_middlefraction(self):
with self.assertRaises(ISOFormatError):
#Fraction only allowed on final component
_parse_duration_prescribed('P1Y2M3DT4H5.1234M6S', None)
def test_parse_duration_prescribed_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1Dasdfasdf', None)
def test_parse_duration_prescribed_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1D1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1H1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('1Y2M3D1SPT1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1Y2M3D2MT1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P2M3D1ST1Y1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1Y2M2MT3D1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1D1Y1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('PT1S1H', None)
def test_parse_duration_prescribed_notime(self):
testtuples = (('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_prescribed_notime(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_prescribed_notime_timepart(self):
#Ensure no time part is allowed
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1D1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1H1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1Y2M3D4H', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1Y2M3D4H5S', None)
def test_parse_duration_prescribed_notime_negative(self):
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-1Y', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-2M', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-3D', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-7W', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-1Y2M3D', None)
def test_parse_duration_prescribed_notime_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1H1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1D1Y1M', None)
def test_parse_duration_prescribed_time(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_prescribed_time(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_prescribed_time_timeindate(self):
#Don't allow time components in date half
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D4HT54M6S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D6ST4H54M', None)
def test_parse_duration_prescribed_time_dateintime(self):
#Don't allow date components in time half
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P2M3DT1Y4H54M6S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2MT3D4H54M6S', None)
def test_parse_duration_prescribed_time_negative(self):
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_time('P-1Y2M3DT4H54M6S', None)
def test_parse_duration_prescribed_time_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('1Y2M3D1SPT1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D2MT1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P2M3D1ST1Y1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M2MT3D1S', None)
def test_parse_duration_combined(self):
testtuples = (('P0003-06-04T12:30:05', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05'}),
('P0003-06-04T12:30:05,5', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05.5'}),
('P0003-06-04T12:30:05.5', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05.5'}),
('P0001-02-03T14:43:59.9999997', {'PnY': '0001',
'PnM': '02',
'PnD': '03',
'TnH': '14',
'TnM': '43',
'TnS':
'59.9999997'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_combined(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_combined_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
_parse_duration_combined('P0003-06-04T12:30:05.5asdfasdf', None)
def test_parse_duration_element(self):
testtuples = (('P1Y2M3D', 'Y', '1'),
('P1Y2M3D', 'M', '2'),
('P1Y2M3D', 'D', '3'),
('PT4H54M6,5S', 'H', '4'),
('PT4H54M6,5S', 'M', '54'),
('PT4H54M6,5S', 'S', '6.5'),
('T4H5M6.1234S', 'H', '4'),
('T4H5M6.1234S', 'M', '5'),
('T4H5M6.1234S', 'S', '6.1234'))
for testtuple in testtuples:
self.assertEqual(_parse_duration_element(testtuple[0],
testtuple[1]),
testtuple[2])
def test_has_any_component(self):
self.assertTrue(_has_any_component('P1Y', ['Y', 'M']))
self.assertFalse(_has_any_component('P1Y', ['M', 'D']))
def test_component_order_correct(self):
self.assertTrue(_component_order_correct('P1Y1M1D',
['P', 'Y', 'M', 'D']))
self.assertTrue(_component_order_correct('P1Y1M',
['P', 'Y', 'M', 'D']))
self.assertFalse(_component_order_correct('P1D1Y1M',
['P', 'Y', 'M', 'D']))
self.assertFalse(_component_order_correct('PT1S1H',
['T', 'H', 'M', 'S']))
| 48.996086
| 104
| 0.477294
|
acfcbd24441982f326d475353e2d6214cb457640
| 1,155
|
py
|
Python
|
fsharp_token_builders.py
|
jfitz/code-stat
|
dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26
|
[
"MIT"
] | null | null | null |
fsharp_token_builders.py
|
jfitz/code-stat
|
dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26
|
[
"MIT"
] | null | null | null |
fsharp_token_builders.py
|
jfitz/code-stat
|
dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26
|
[
"MIT"
] | null | null | null |
from codestat_token import Token
from token_builders import (
TokenBuilder,
EscapedStringTokenBuilder
)
# token reader for single-character text literal (string)
class FsharpCharTokenBuilder(EscapedStringTokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, quotes):
super().__init__(quotes, False)
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(self.text) < 2:
return 0
if self.text[-1] != self.text[0]:
return 0
# length limited to 3 (or more with backslash)
if '\\' in self.text:
# at most four chars (two quotes, backslash, and one other)
if len(self.text) > 4:
return 0
# backslash must be first char (and may repeat for second)
if self.text[1] != '\\':
return 0
else:
# at most three chars (two quotes, one character)
if len(self.text) > 3:
return 0
# cannot follow an identifier
if len(line_printable_tokens) > 0 and \
line_printable_tokens[-1].group == 'identifier':
return 0
return len(self.text)
| 24.0625
| 65
| 0.649351
|
acfcbd5fda902955a9eb6021890eb53b034fa478
| 405
|
py
|
Python
|
blender/arm/logicnode/value_random_color.py
|
DsmMatt/armory
|
3fa9321016f6e83b2c1009e5ce220566fb35011e
|
[
"Zlib"
] | 1
|
2018-12-04T05:33:53.000Z
|
2018-12-04T05:33:53.000Z
|
blender/arm/logicnode/value_random_color.py
|
DsmMatt/armory
|
3fa9321016f6e83b2c1009e5ce220566fb35011e
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/value_random_color.py
|
DsmMatt/armory
|
3fa9321016f6e83b2c1009e5ce220566fb35011e
|
[
"Zlib"
] | null | null | null |
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class RandomColorNode(Node, ArmLogicTreeNode):
'''Random color node'''
bl_idname = 'LNRandomColorNode'
bl_label = 'Random (Color)'
bl_icon = 'GAME'
def init(self, context):
self.outputs.new('NodeSocketColor', 'Color')
add_node(RandomColorNode, category='Value')
| 25.3125
| 52
| 0.71358
|
acfcbdb1107363f99b74f244e518d190be83d424
| 10,680
|
py
|
Python
|
daisy/tasks/ONT.py
|
tschoonj/cgat-daisy
|
f85a2c82ca04f352aad00660cfc14a9aa6773168
|
[
"MIT"
] | 1
|
2020-06-29T14:39:42.000Z
|
2020-06-29T14:39:42.000Z
|
daisy/tasks/ONT.py
|
tschoonj/cgat-daisy
|
f85a2c82ca04f352aad00660cfc14a9aa6773168
|
[
"MIT"
] | 1
|
2019-05-15T20:50:37.000Z
|
2019-05-15T20:50:37.000Z
|
daisy/tasks/ONT.py
|
tschoonj/cgat-daisy
|
f85a2c82ca04f352aad00660cfc14a9aa6773168
|
[
"MIT"
] | 1
|
2021-11-11T13:22:56.000Z
|
2021-11-11T13:22:56.000Z
|
import re
import os
from .ToolRunner import ToolRunner
from .CollateRunner import CollateRunner
from .MetricRunner import MetricRunner
import cgatcore.pipeline as P
import cgatcore.iotools as IOTools
import cgatcore.experiment as E
from .VariantCallers import VariantCaller, get_reference
from .Runner import resolve_argument
class run_tool_ont_nanonet(ToolRunner):
name = "ont_nanonet"
expected = ["tgz"]
output = "result.fastq.gz"
path = "daisy"
total_size = 100
batch_size = 10
num_threads = 5
# Note that without specifying a chemistry, nanonet fails
options = "--chemistry r9.4 --no-event_detect"
def get_version(self):
return "builtin"
def run(self, outfile, params):
statements = []
batch_outfiles = []
for batch_id, batch_start in enumerate(range(
0, params.total_size, params.batch_size)):
batch_outfile = "{outfile}_{batch_id}.fastq.gz".format(
**locals())
batch_outfiles.append(batch_outfile)
if os.path.exists(batch_outfile):
continue
statements.append(
"daisy ont-nanonet "
"--log={batch_outfile}.log "
"--batch-number={batch_id} "
"--batch-size={params.batch_size} "
"--num-threads={params.num_threads} "
"{params.options} "
"{params.tgz} "
"2> {batch_outfile}.err "
"| gzip "
"> {batch_outfile}".format(
**locals()))
retvals = P.run(statements,
job_array=True,
job_threads=params.num_threads)
batch_outfiles = " ".join(batch_outfiles)
statement = ("cat {batch_outfiles} > {outfile}; ".format(**locals()))
# "rm -f {batch_outfiles}".format(
# **locals()))
retvals.append(P.run(statement))
return retvals
class run_collate_ont_classify(CollateRunner):
path = "daisy"
name = "ont_daisy_classify"
min_average_quality = 14
min_length = 1000
min_size_bytes = 0
newer_than = None
def get_version(self):
return "builtin"
def run(self, infiles, outfile, params):
if not outfile.endswith("-pass.fastq.gz"):
raise ValueError("outfile must end in -pass.fastq.gz, got {}".format(
outfile))
if params.min_size_bytes:
before = len(infiles)
infiles = [x for x in infiles if os.path.getsize(x) >= params.min_size_bytes]
E.debug("removing small files: after={}, before={}, removed={}".format(
len(infiles), before, before - len(infiles)))
if params.newer_than:
before = len(infiles)
cutoff = os.path.getmtime(params.newer_than)
infiles = [x for x in infiles if os.path.getmtime(x) > cutoff]
E.debug("removing old files: after={}, before={}, removed={}".format(
len(infiles), before, before - len(infiles)))
if len(infiles) == 0:
E.warn("no files left after filtering, creating empty file")
IOTools.touch_file(outfile)
return
infiles = " ".join(infiles)
outfile_fail = IOTools.snip(outfile, "-pass.fastq.gz") + "-fail.fastq.gz"
statement = (
"zcat {infiles} "
"| daisy fastq2fastq "
"--method=filter-ONT "
"--min-average-quality={params.min_average_quality} "
"--log={outfile}.log "
"--min-length={params.min_length} "
"--output-removed-fastq={outfile_fail} "
"- "
"| gzip "
"> {outfile}".format(**locals()))
return P.run(statement)
class run_metric_ont_classify(MetricRunner):
path = "daisy"
name = "ont_daisy_classify"
min_average_quality = 14
min_length = 1000
def get_version(self):
return "builtin"
def run(self, infile, outfile, params):
outfile_pass = IOTools.snip(outfile, ".tsv") + "-pass.fastq.gz"
outfile_fail = IOTools.snip(outfile, ".tsv") + "-fail.fastq.gz"
statement = (
"zcat {infile} "
"| daisy fastq2fastq "
"--method=filter-ONT "
"--min-average-quality={params.min_average_quality} "
"--log={outfile}.log "
"--min-length={params.min_length} "
"--output-removed-fastq={outfile_fail} "
"--output-stats-tsv={outfile} "
"- "
"| gzip "
"> {outfile_pass} "
"".format(**locals()))
return P.run(statement)
class run_metric_ont_variant_depth_ratio(MetricRunner):
"""use freebayes to return a table with ref and alternate allele
counts at positions given in a reference VCF file.
If sample_size is given, a sample of homozygous reference alleles
is added.
"""
path = "samtools"
path_freebayes = "freebayes"
path_bcftools = "bcftools"
reference_fasta = None
reference_vcf = None
ref_sample_size = None
name = "ont_variant_depth_ratio"
options_freebayes = ("--no-indels --no-mnps --no-complex "
"--haplotype-length 0 --pooled-continuous "
"--min-alternate-fraction 0")
options_bcftools = ""
def get_version(self):
help_string = E.run("{self.path_freebayes} --version".format(**locals()),
return_stdout=True).strip()
return re.search("version:\s+(\S+)", help_string).groups()[0]
def run(self, infile, outfile, params):
if params.reference_fasta is None:
raise ValueError("ont_variant_depth_ratio requires reference_fasta to be set")
if params.reference_vcf is None:
raise ValueError("ont_variant_depth_ratio requires reference_vcf to be set")
statement = []
if params.ref_sample_size is not None:
reference_vcf = outfile + ".ref_sample.vcf.gz"
statement.append(
"daisy fasta2vcf "
"--log={outfile}.fasta2vcf.log "
"--sample-size={params.ref_sample_size} {params.reference_fasta} "
"| bgzip "
"> {outfile}.fasta2vcf.vcf.gz; "
"tabix -p vcf {outfile}.fasta2vcf.vcf.gz; "
"bcftools concat --allow-overlap "
"{params.reference_vcf} "
"{outfile}.fasta2vcf.vcf.gz "
"| bgzip "
"> {reference_vcf}; "
"tabix -p vcf {reference_vcf} "
.format(**locals()))
else:
reference_vcf = params.reference_vcf
statement.append(
"{params.path_freebayes} "
"-f {params.reference_fasta} "
"--variant-input {reference_vcf} "
"--only-use-input-alleles "
"{params.options_freebayes} "
"{infile} "
"| bgzip "
"> {outfile}.genotyped.vcf.gz; ".format(**locals()))
# "tabix -p vcf {outfile}.genotyped.vcf.gz; "
# "{params.path_bcftools} view {params.options_bcftools} "
# "{reference_vcf} "
# "| bgzip > {outfile}.ref.vcf.gz; "
# "tabix -p vcf {outfile}.ref.vcf.gz; "
# "{params.path_bcftools} query -f \"%%CHROM\\t%%POS\\t[%%GT]\\t[%%DPR]\\n\" "
# "{outfile}.genotyped.vcf.gz > {outfile}.genotyped.tsv; "
# "{params.path_bcftools} query -f \"%%CHROM\\t%%POS\\t[%%GT]\\n\" "
# "{outfile}.ref.tsv; "
# "join -1 2 -2 2 {outfile}.ref.tsv {outfile}.genotyped.tsv "
# "| perl -p -e \"s/[, ]/\\t/g\" "
# "| cut -f 1,3,5,6,7 "
# "| grep -v '\.' "
# "> {outfile}".format(**locals()))
statement = ";".join(statement)
return P.run(statement)
class run_tool_nanopolish_variants(VariantCaller):
name = "ont_nanopolish_variants"
path = "nanopolish"
fast5_path = "/data/gru/ont/andreas/fast5"
chunk_size = 1000000
def get_version(self):
help_string = E.run("{self.path} variants --version".format(**locals()),
return_stdout=True).strip()
return re.search("Version (\S+)", help_string).groups()[0]
def run(self, outfile, params):
bam = resolve_argument(params.bam)
reference_fasta = get_reference(params)
filtered_bam = outfile + ".filtered.bam"
filtered_fasta = outfile + ".filtered.fasta"
retvals = []
if not os.path.exists(filtered_bam):
statement = (
"samtools view -b -F 256 {bam} > {filtered_bam}; "
"samtools index {filtered_bam}".format(**locals()))
retvals.extend(P.run(statement))
if not os.path.exists(filtered_fasta):
statement = (
"samtools view {filtered_bam} "
"| cut -f 1,10 "
"| sort "
"| uniq "
"| sed -r 's:(\\S+)\\t:>\\1 {params.fast5_path}/\\1.fast5\\n:' "
"> {filtered_fasta}".format(**locals()))
retvals.extend(P.run(statement))
statement = (
"{params.path} "
"variants "
"--reads={filtered_fasta} "
"--genome={reference_fasta} "
"--bam={filtered_bam} "
"{params.options} ".format(**locals()))
self.run_parallel_by_region(
statement,
bam,
reference_fasta,
outfile,
params,
region_option="--window={contig}:{start}-{end}",
job_memory="16G")
return retvals
class run_tools_ont_large_variant_caller(ToolRunner):
"""run
Note that this script requires samtools to be on the PATH.
"""
name = "ont_large_variant_caller"
path = "large-variant-caller"
expected = ["bam"]
output = "result.bed.gz"
max_segment_length = 500000
def get_version(self):
return "builtin"
def run(self, outfile, params):
bam = resolve_argument(params.bam)
statement = (
"daisy ont-large-variant-caller "
"--log={outfile}.log "
"--bamfile={bam} "
"{params.options} "
"| uniq "
"| bgzip "
"> {outfile}.all.bed.gz; "
"zcat {outfile}.all.bed.gz "
"| awk '$3 - $2 < {params.max_segment_length}' "
"| bgzip "
"> {outfile}; "
.format(**locals()))
return P.run(statement)
| 31.597633
| 90
| 0.544288
|
acfcbf2614abd719d68bf6e78f82f1bd53db7e65
| 14,267
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200801/get_security_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200801/get_security_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200801/get_security_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetSecurityRuleResult',
'AwaitableGetSecurityRuleResult',
'get_security_rule',
]
@pulumi.output_type
class GetSecurityRuleResult:
"""
Network security rule.
"""
def __init__(__self__, access=None, description=None, destination_address_prefix=None, destination_address_prefixes=None, destination_application_security_groups=None, destination_port_range=None, destination_port_ranges=None, direction=None, etag=None, id=None, name=None, priority=None, protocol=None, provisioning_state=None, source_address_prefix=None, source_address_prefixes=None, source_application_security_groups=None, source_port_range=None, source_port_ranges=None, type=None):
if access and not isinstance(access, str):
raise TypeError("Expected argument 'access' to be a str")
pulumi.set(__self__, "access", access)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if destination_address_prefix and not isinstance(destination_address_prefix, str):
raise TypeError("Expected argument 'destination_address_prefix' to be a str")
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
if destination_address_prefixes and not isinstance(destination_address_prefixes, list):
raise TypeError("Expected argument 'destination_address_prefixes' to be a list")
pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes)
if destination_application_security_groups and not isinstance(destination_application_security_groups, list):
raise TypeError("Expected argument 'destination_application_security_groups' to be a list")
pulumi.set(__self__, "destination_application_security_groups", destination_application_security_groups)
if destination_port_range and not isinstance(destination_port_range, str):
raise TypeError("Expected argument 'destination_port_range' to be a str")
pulumi.set(__self__, "destination_port_range", destination_port_range)
if destination_port_ranges and not isinstance(destination_port_ranges, list):
raise TypeError("Expected argument 'destination_port_ranges' to be a list")
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if direction and not isinstance(direction, str):
raise TypeError("Expected argument 'direction' to be a str")
pulumi.set(__self__, "direction", direction)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_address_prefix and not isinstance(source_address_prefix, str):
raise TypeError("Expected argument 'source_address_prefix' to be a str")
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if source_address_prefixes and not isinstance(source_address_prefixes, list):
raise TypeError("Expected argument 'source_address_prefixes' to be a list")
pulumi.set(__self__, "source_address_prefixes", source_address_prefixes)
if source_application_security_groups and not isinstance(source_application_security_groups, list):
raise TypeError("Expected argument 'source_application_security_groups' to be a list")
pulumi.set(__self__, "source_application_security_groups", source_application_security_groups)
if source_port_range and not isinstance(source_port_range, str):
raise TypeError("Expected argument 'source_port_range' to be a str")
pulumi.set(__self__, "source_port_range", source_port_range)
if source_port_ranges and not isinstance(source_port_ranges, list):
raise TypeError("Expected argument 'source_port_ranges' to be a list")
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def access(self) -> str:
"""
The network traffic is allowed or denied.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> Optional[str]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[str]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[Sequence[str]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> str:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the security rule resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> Optional[str]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[str]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[Sequence[str]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSecurityRuleResult(GetSecurityRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecurityRuleResult(
access=self.access,
description=self.description,
destination_address_prefix=self.destination_address_prefix,
destination_address_prefixes=self.destination_address_prefixes,
destination_application_security_groups=self.destination_application_security_groups,
destination_port_range=self.destination_port_range,
destination_port_ranges=self.destination_port_ranges,
direction=self.direction,
etag=self.etag,
id=self.id,
name=self.name,
priority=self.priority,
protocol=self.protocol,
provisioning_state=self.provisioning_state,
source_address_prefix=self.source_address_prefix,
source_address_prefixes=self.source_address_prefixes,
source_application_security_groups=self.source_application_security_groups,
source_port_range=self.source_port_range,
source_port_ranges=self.source_port_ranges,
type=self.type)
def get_security_rule(network_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
security_rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityRuleResult:
"""
Network security rule.
:param str network_security_group_name: The name of the network security group.
:param str resource_group_name: The name of the resource group.
:param str security_rule_name: The name of the security rule.
"""
__args__ = dict()
__args__['networkSecurityGroupName'] = network_security_group_name
__args__['resourceGroupName'] = resource_group_name
__args__['securityRuleName'] = security_rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200801:getSecurityRule', __args__, opts=opts, typ=GetSecurityRuleResult).value
return AwaitableGetSecurityRuleResult(
access=__ret__.access,
description=__ret__.description,
destination_address_prefix=__ret__.destination_address_prefix,
destination_address_prefixes=__ret__.destination_address_prefixes,
destination_application_security_groups=__ret__.destination_application_security_groups,
destination_port_range=__ret__.destination_port_range,
destination_port_ranges=__ret__.destination_port_ranges,
direction=__ret__.direction,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
priority=__ret__.priority,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state,
source_address_prefix=__ret__.source_address_prefix,
source_address_prefixes=__ret__.source_address_prefixes,
source_application_security_groups=__ret__.source_application_security_groups,
source_port_range=__ret__.source_port_range,
source_port_ranges=__ret__.source_port_ranges,
type=__ret__.type)
| 45.006309
| 492
| 0.693839
|
acfcc1116197c60e1452808bda5232a89c85ee48
| 2,026
|
py
|
Python
|
docs/conf.py
|
shreesowndarya/aqme
|
2cb45c5068b15b94e658bfdc53b60db82ec3502a
|
[
"MIT"
] | 7
|
2022-01-20T22:18:57.000Z
|
2022-03-11T05:47:55.000Z
|
docs/conf.py
|
shreesowndarya/aqme
|
2cb45c5068b15b94e658bfdc53b60db82ec3502a
|
[
"MIT"
] | 13
|
2022-01-21T19:11:02.000Z
|
2022-02-07T18:02:18.000Z
|
docs/conf.py
|
shreesowndarya/aqme
|
2cb45c5068b15b94e658bfdc53b60db82ec3502a
|
[
"MIT"
] | 4
|
2022-01-21T19:03:37.000Z
|
2022-03-11T05:45:49.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'aqme'
copyright = '2020, Shree Sowndarya S. V., Juan V. Alegre Requena, Robert S. Paton'
author = 'Shree Sowndarya S. V., Juan V. Alegre Requena, Robert S. Paton'
# The full version, including alpha/beta/rc tags
release = 'v1.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 36.836364
| 82
| 0.665844
|
acfcc124acd49d7fe78c67f5ca8ebce802fab8c7
| 2,360
|
py
|
Python
|
config/cassava/resnet50_c2.py
|
li-phone/OpenOneCla
|
b70a752fe405307fa7e77465a8772846e1d29d5a
|
[
"MIT"
] | null | null | null |
config/cassava/resnet50_c2.py
|
li-phone/OpenOneCla
|
b70a752fe405307fa7e77465a8772846e1d29d5a
|
[
"MIT"
] | null | null | null |
config/cassava/resnet50_c2.py
|
li-phone/OpenOneCla
|
b70a752fe405307fa7e77465a8772846e1d29d5a
|
[
"MIT"
] | null | null | null |
# model settings
model_config = dict(
model='resnet50',
num_classes=2,
pretrained=True
)
# split data settings
data_name = 'cassava'
data_root = "data/cassava/"
# img_save_dir = data_root + "/imgs/"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
dataset = dict(
raw_train_path=data_root + 'train.csv',
raw_split=[('train', 0.8), ('val', 1.0)],
balance=False,
label_transform={(3,): 0, (0, 1, 2, 4,): 1},
# raw_test_path=data_root + '/annotations/instance_train_alcohol.csv',
train=dict(
name='train',
ann_file=data_root + '/annotations/cls_train.csv',
img_prefix=data_root + '/train_images/',
img_scale=(224, 224),
keep_ratio=False,
img_norm_cfg=img_norm_cfg,
),
val=dict(
name='val',
ann_file=data_root + '/annotations/cls_val.csv',
img_prefix=data_root + '/train_images/',
img_scale=(224, 224),
keep_ratio=False,
img_norm_cfg=img_norm_cfg,
),
test=dict(
name='test',
ann_file=data_root + '/annotations/cls_test.csv',
img_prefix=data_root + '/test_images/',
img_scale=(224, 224),
keep_ratio=False,
img_norm_cfg=img_norm_cfg,
),
)
# log settings
log = dict(
out_file='train_log_out.txt',
data_file='train_log_data.json'
)
# train process settings
train_mode = ['train']
val_mode = ['val']
total_epochs = 12
work_dir = './work_dirs/' + data_name + '/' + model_config['model'] + '/resnet50_c2'
resume_from = work_dir + '/latest.pth'
load_from = './work_dirs/cache/resnet50-19c8e357.pth'
# load_from = None
mix = dict(
type='none',
alpha=2.0,
)
optimizer = dict(
type='SGD',
# type='Adam',
Adam=dict(lr=0.0025, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False),
SGD=dict(lr=0.0025, momentum=0, dampening=0, weight_decay=0, nesterov=False)
)
lr_scheduler = dict(
type='CosineAnnealingLR',
CosineAnnealingLR=dict(T_max=total_epochs),
)
loss = dict(
type='CrossEntropyLoss',
CrossEntropyLoss=dict(),
FocalLoss=dict(),
InverseLoss=dict(alpha=1, beta=0.01),
)
freq_cfg = dict(
checkpoint_save=1,
log_print=20,
)
gpus = '1'
data_loader = dict(
batch_size=32, shuffle=True,
)
val_data_loader = dict(
batch_size=8, shuffle=False,
)
| 25.652174
| 86
| 0.636864
|
acfcc4647181551c931f67d4eea85a3324544525
| 4,119
|
py
|
Python
|
syne_tune/stopping_criterion.py
|
talesa/syne-tune
|
282156294a64a0cd260ccd908f3cf6b3e8c71003
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
syne_tune/stopping_criterion.py
|
talesa/syne-tune
|
282156294a64a0cd260ccd908f3cf6b3e8c71003
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-02-25T15:56:36.000Z
|
2022-02-25T17:53:10.000Z
|
syne_tune/stopping_criterion.py
|
talesa/syne-tune
|
282156294a64a0cd260ccd908f3cf6b3e8c71003
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from dataclasses import dataclass
from typing import Optional, Dict
from syne_tune.tuning_status import TuningStatus
logger = logging.getLogger(__name__)
@dataclass
class StoppingCriterion:
"""
Stopping criterion that can be used in a Tuner, for instance
`Tuner(stop_criterion=StoppingCriterion(max_wallclock_time=3600), ...)`
While a lambda can be used for the Tuner, e.g.
`Tuner(stop_criterion=lambda status: status.wallclock_time > 3600, ...)`
Using this class is needed when using the remote launcher to ensure serialization works correctly.
"""
max_wallclock_time: float = None
max_num_trials_started: int = None
max_num_trials_completed: int = None
max_cost: float = None
max_num_trials_finished: int = None
# minimum value for metrics, any value bellow this threshold will trigger a stop
min_metric_value: Optional[Dict[str, float]] = None
# maximum value for metrics, any value above this threshold will trigger a stop
max_metric_value: Optional[Dict[str, float]] = None
# todo we should have unit-test for all those cases.
def __call__(self, status: TuningStatus) -> bool:
if self.max_wallclock_time is not None and status.wallclock_time > self.max_wallclock_time:
logger.info(f"reaching max wallclock time ({self.max_wallclock_time}), stopping there.")
return True
if self.max_num_trials_started is not None and status.num_trials_started > self.max_num_trials_started:
logger.info(f"reaching max number of trials started ({self.max_num_trials_started}), stopping there.")
return True
if self.max_num_trials_completed is not None and status.num_trials_completed > self.max_num_trials_completed:
logger.info(f"reaching max number of trials completed ({self.max_num_trials_completed}), stopping there.")
return True
if self.max_num_trials_finished is not None and status.num_trials_finished > self.max_num_trials_finished:
logger.info(f"reaching max number of trials finished ({self.max_num_trials_finished}), stopping there.")
return True
if self.max_cost is not None and status.cost > self.max_cost:
logger.info(f"reaching max cost ({self.max_cost}), stopping there.")
return True
if self.max_metric_value is not None and status.overall_metric_statistics.count > 0:
max_metrics_observed = status.overall_metric_statistics.max_metrics
for metric, max_metric_accepted in self.max_metric_value.items():
if metric in max_metrics_observed and max_metrics_observed[metric] > max_metric_accepted:
logger.info(f"found {metric} with value ({max_metrics_observed[metric]}), "
f"above the provided threshold {max_metric_accepted} stopping there.")
return True
if self.min_metric_value is not None and status.overall_metric_statistics.count > 0:
min_metrics_observed = status.overall_metric_statistics.min_metrics
for metric, min_metric_accepted in self.min_metric_value.items():
if metric in min_metrics_observed and min_metrics_observed[metric] < min_metric_accepted:
logger.info(f"found {metric} with value ({min_metrics_observed[metric]}), "
f"bellow the provided threshold {min_metric_accepted} stopping there.")
return True
return False
| 53.493506
| 118
| 0.71158
|
acfcc51f20e8ddb36d6a579f337f644acdf23a5d
| 1,825
|
py
|
Python
|
pynasqm/trajectories/amber_restart_files.py
|
PotentialParadox/pynasqm
|
1bd51299b6ca7f8229d8a15428515d53a358903c
|
[
"MIT"
] | 1
|
2020-03-13T22:34:03.000Z
|
2020-03-13T22:34:03.000Z
|
pynasqm/trajectories/amber_restart_files.py
|
PotentialParadox/pynasqm
|
1bd51299b6ca7f8229d8a15428515d53a358903c
|
[
"MIT"
] | null | null | null |
pynasqm/trajectories/amber_restart_files.py
|
PotentialParadox/pynasqm
|
1bd51299b6ca7f8229d8a15428515d53a358903c
|
[
"MIT"
] | null | null | null |
from functools import singledispatch
from pynasqm.trajectories.qmground import QmGround
from pynasqm.trajectories.qmexcited import QmExcited
from pynasqm.trajectories.fluorescence import Fluorescence
from pynasqm.trajectories.absorption import Absorption
from pynasqm.trajectories.ppump import PPump
from pynasqm.trajectories.utils import traj_indices, snap_indices
@singledispatch
def amber_pc_restart_files(traj_data, restart_attempt):
raise NotImplementedError(f"traj_data type not supported by amber_pc_restart_files\n"\
f"{traj_data}")
@amber_pc_restart_files.register(QmGround)
@amber_pc_restart_files.register(QmExcited)
@amber_pc_restart_files.register(PPump)
def _(traj_data, restart_attempt):
return ["snap_for_{}_t{}_r{}.rst".format(traj_data.job_suffix, i, restart_attempt+1)
for i in traj_indices(traj_data)]
@amber_pc_restart_files.register(Absorption)
@amber_pc_restart_files.register(Fluorescence)
def _(traj_data, restart_attempt):
return [f"snap_{snap_id}_for_{traj_data.job_suffix}_t{traj}.rst"
for traj in traj_indices(traj_data)
for snap_id in snap_indices(traj_data)]
@singledispatch
def amber_hpc_restart_files(traj_data, restart_attempt):
raise NotImplementedError(f"traj_data type not supported by amber_hpc_restart_files\n"\
f"{traj_data}")
@amber_hpc_restart_files.register(QmGround)
@amber_hpc_restart_files.register(QmExcited)
@amber_hpc_restart_files.register(PPump)
def _(traj_data, restart_attempt):
return ["snap_for_{}_t${{ID}}_r{}.rst".format(traj_data.job_suffix, restart_attempt+1)]
@amber_hpc_restart_files.register(Absorption)
@amber_hpc_restart_files.register(Fluorescence)
def _(traj_data, restart_attempt):
return [f"snap_${{i}}_for_fluorescence_t${{ID}}.rst"]
| 46.794872
| 91
| 0.786301
|
acfcc52a82f116ae13610f88a38e701815896775
| 397
|
py
|
Python
|
materialize-css-django-material-admin/step1/djmaterial/djmaterial/wsgi.py
|
fullstackpython/blog-code-examples
|
a6afcb874e88086686071aa1b2a47548aed5a2b0
|
[
"MIT"
] | 65
|
2017-06-13T01:02:17.000Z
|
2022-01-10T09:58:29.000Z
|
materialize-css-django-material-admin/step1/djmaterial/djmaterial/wsgi.py
|
fullstackpython/blog-code-examples
|
a6afcb874e88086686071aa1b2a47548aed5a2b0
|
[
"MIT"
] | 1
|
2020-06-05T18:07:42.000Z
|
2020-06-05T18:07:42.000Z
|
materialize-css-django-material-admin/step1/djmaterial/djmaterial/wsgi.py
|
fullstackpython/blog-code-examples
|
a6afcb874e88086686071aa1b2a47548aed5a2b0
|
[
"MIT"
] | 50
|
2017-07-01T02:10:19.000Z
|
2022-03-24T17:23:58.000Z
|
"""
WSGI config for djmaterial project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djmaterial.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
acfcc54f05c8bc2da35b82ee1d544d88ddf612c6
| 13,455
|
py
|
Python
|
homeassistant/components/system_bridge/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 2
|
2021-01-29T02:52:01.000Z
|
2021-05-15T04:23:18.000Z
|
homeassistant/components/system_bridge/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 69
|
2020-08-04T09:03:43.000Z
|
2022-03-31T06:13:01.000Z
|
homeassistant/components/system_bridge/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 7
|
2021-03-20T12:34:01.000Z
|
2021-12-02T10:13:52.000Z
|
"""Support for System Bridge sensors."""
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any
from systembridge import Bridge
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_GIGABYTES,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_POTENTIAL_VOLT,
FREQUENCY_GIGAHERTZ,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from . import SystemBridgeDeviceEntity
from .const import DOMAIN
from .coordinator import SystemBridgeDataUpdateCoordinator
ATTR_AVAILABLE = "available"
ATTR_FILESYSTEM = "filesystem"
ATTR_LOAD_AVERAGE = "load_average"
ATTR_LOAD_IDLE = "load_idle"
ATTR_LOAD_SYSTEM = "load_system"
ATTR_LOAD_USER = "load_user"
ATTR_MOUNT = "mount"
ATTR_SIZE = "size"
ATTR_TYPE = "type"
ATTR_USED = "used"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up System Bridge sensor based on a config entry."""
coordinator: SystemBridgeDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities = [
SystemBridgeCpuSpeedSensor(coordinator),
SystemBridgeCpuTemperatureSensor(coordinator),
SystemBridgeCpuVoltageSensor(coordinator),
*(
SystemBridgeFilesystemSensor(coordinator, key)
for key, _ in coordinator.data.filesystem.fsSize.items()
),
SystemBridgeMemoryFreeSensor(coordinator),
SystemBridgeMemoryUsedSensor(coordinator),
SystemBridgeMemoryUsedPercentageSensor(coordinator),
SystemBridgeKernelSensor(coordinator),
SystemBridgeOsSensor(coordinator),
SystemBridgeProcessesLoadSensor(coordinator),
SystemBridgeBiosVersionSensor(coordinator),
]
if coordinator.data.battery.hasBattery:
entities.append(SystemBridgeBatterySensor(coordinator))
entities.append(SystemBridgeBatteryTimeRemainingSensor(coordinator))
async_add_entities(entities)
class SystemBridgeSensor(SystemBridgeDeviceEntity, SensorEntity):
"""Defines a System Bridge sensor."""
def __init__(
self,
coordinator: SystemBridgeDataUpdateCoordinator,
key: str,
name: str,
icon: str | None,
device_class: str | None,
unit_of_measurement: str | None,
enabled_by_default: bool,
) -> None:
"""Initialize System Bridge sensor."""
self._device_class = device_class
self._unit_of_measurement = unit_of_measurement
super().__init__(coordinator, key, name, icon, enabled_by_default)
@property
def device_class(self) -> str | None:
"""Return the class of this sensor."""
return self._device_class
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class SystemBridgeBatterySensor(SystemBridgeSensor):
"""Defines a Battery sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"battery",
"Battery",
None,
DEVICE_CLASS_BATTERY,
PERCENTAGE,
True,
)
@property
def native_value(self) -> float:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return bridge.battery.percent
class SystemBridgeBatteryTimeRemainingSensor(SystemBridgeSensor):
"""Defines the Battery Time Remaining sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"battery_time_remaining",
"Battery Time Remaining",
None,
DEVICE_CLASS_TIMESTAMP,
None,
True,
)
@property
def native_value(self) -> str | None:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
if bridge.battery.timeRemaining is None:
return None
return str(datetime.now() + timedelta(minutes=bridge.battery.timeRemaining))
class SystemBridgeCpuSpeedSensor(SystemBridgeSensor):
"""Defines a CPU speed sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"cpu_speed",
"CPU Speed",
"mdi:speedometer",
None,
FREQUENCY_GIGAHERTZ,
True,
)
@property
def native_value(self) -> float:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return bridge.cpu.currentSpeed.avg
class SystemBridgeCpuTemperatureSensor(SystemBridgeSensor):
"""Defines a CPU temperature sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"cpu_temperature",
"CPU Temperature",
None,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
False,
)
@property
def native_value(self) -> float:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return bridge.cpu.temperature.main
class SystemBridgeCpuVoltageSensor(SystemBridgeSensor):
"""Defines a CPU voltage sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"cpu_voltage",
"CPU Voltage",
None,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_POTENTIAL_VOLT,
False,
)
@property
def native_value(self) -> float:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return bridge.cpu.cpu.voltage
class SystemBridgeFilesystemSensor(SystemBridgeSensor):
"""Defines a filesystem sensor."""
def __init__(
self, coordinator: SystemBridgeDataUpdateCoordinator, key: str
) -> None:
"""Initialize System Bridge sensor."""
uid_key = key.replace(":", "")
super().__init__(
coordinator,
f"filesystem_{uid_key}",
f"{key} Space Used",
"mdi:harddisk",
None,
PERCENTAGE,
True,
)
self._fs_key = key
@property
def native_value(self) -> float:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return (
round(bridge.filesystem.fsSize[self._fs_key]["use"], 2)
if bridge.filesystem.fsSize[self._fs_key]["use"] is not None
else None
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the entity."""
bridge: Bridge = self.coordinator.data
return {
ATTR_AVAILABLE: bridge.filesystem.fsSize[self._fs_key]["available"],
ATTR_FILESYSTEM: bridge.filesystem.fsSize[self._fs_key]["fs"],
ATTR_MOUNT: bridge.filesystem.fsSize[self._fs_key]["mount"],
ATTR_SIZE: bridge.filesystem.fsSize[self._fs_key]["size"],
ATTR_TYPE: bridge.filesystem.fsSize[self._fs_key]["type"],
ATTR_USED: bridge.filesystem.fsSize[self._fs_key]["used"],
}
class SystemBridgeMemoryFreeSensor(SystemBridgeSensor):
"""Defines a memory free sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"memory_free",
"Memory Free",
"mdi:memory",
None,
DATA_GIGABYTES,
True,
)
@property
def native_value(self) -> float | None:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return (
round(bridge.memory.free / 1000 ** 3, 2)
if bridge.memory.free is not None
else None
)
class SystemBridgeMemoryUsedSensor(SystemBridgeSensor):
"""Defines a memory used sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"memory_used",
"Memory Used",
"mdi:memory",
None,
DATA_GIGABYTES,
False,
)
@property
def native_value(self) -> str | None:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return (
round(bridge.memory.used / 1000 ** 3, 2)
if bridge.memory.used is not None
else None
)
class SystemBridgeMemoryUsedPercentageSensor(SystemBridgeSensor):
"""Defines a memory used percentage sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"memory_used_percentage",
"Memory Used %",
"mdi:memory",
None,
PERCENTAGE,
True,
)
@property
def native_value(self) -> str | None:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return (
round((bridge.memory.used / bridge.memory.total) * 100, 2)
if bridge.memory.used is not None and bridge.memory.total is not None
else None
)
class SystemBridgeKernelSensor(SystemBridgeSensor):
"""Defines a kernel sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"kernel",
"Kernel",
"mdi:devices",
None,
None,
True,
)
@property
def native_value(self) -> str:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return bridge.os.kernel
class SystemBridgeOsSensor(SystemBridgeSensor):
"""Defines an OS sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"os",
"Operating System",
"mdi:devices",
None,
None,
True,
)
@property
def native_value(self) -> str:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return f"{bridge.os.distro} {bridge.os.release}"
class SystemBridgeProcessesLoadSensor(SystemBridgeSensor):
"""Defines a Processes Load sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"processes_load",
"Load",
"mdi:percent",
None,
PERCENTAGE,
True,
)
@property
def native_value(self) -> float | None:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return (
round(bridge.processes.load.currentLoad, 2)
if bridge.processes.load.currentLoad is not None
else None
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the entity."""
bridge: Bridge = self.coordinator.data
attrs = {}
if bridge.processes.load.avgLoad is not None:
attrs[ATTR_LOAD_AVERAGE] = round(bridge.processes.load.avgLoad, 2)
if bridge.processes.load.currentLoadUser is not None:
attrs[ATTR_LOAD_USER] = round(bridge.processes.load.currentLoadUser, 2)
if bridge.processes.load.currentLoadSystem is not None:
attrs[ATTR_LOAD_SYSTEM] = round(bridge.processes.load.currentLoadSystem, 2)
if bridge.processes.load.currentLoadIdle is not None:
attrs[ATTR_LOAD_IDLE] = round(bridge.processes.load.currentLoadIdle, 2)
return attrs
class SystemBridgeBiosVersionSensor(SystemBridgeSensor):
"""Defines a bios version sensor."""
def __init__(self, coordinator: SystemBridgeDataUpdateCoordinator) -> None:
"""Initialize System Bridge sensor."""
super().__init__(
coordinator,
"bios_version",
"BIOS Version",
"mdi:chip",
None,
None,
False,
)
@property
def native_value(self) -> str:
"""Return the state of the sensor."""
bridge: Bridge = self.coordinator.data
return bridge.system.bios.version
| 30.719178
| 87
| 0.618803
|
acfcc623f98d8b735c18b16d0bcedba0ed9190d1
| 1,777
|
py
|
Python
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/GetPredictiveTaskDataRequest.py
|
Explorer1092/aliyun-openapi-python-sdk
|
13d95bdceb7fd3bba807275a5a15b32d531a1a63
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/GetPredictiveTaskDataRequest.py
|
Explorer1092/aliyun-openapi-python-sdk
|
13d95bdceb7fd3bba807275a5a15b32d531a1a63
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/GetPredictiveTaskDataRequest.py
|
Explorer1092/aliyun-openapi-python-sdk
|
13d95bdceb7fd3bba807275a5a15b32d531a1a63
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class GetPredictiveTaskDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2017-07-05', 'GetPredictiveTaskData')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_SkillGroupId(self):
return self.get_query_params().get('SkillGroupId')
def set_SkillGroupId(self,SkillGroupId):
self.add_query_param('SkillGroupId',SkillGroupId)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
| 35.54
| 74
| 0.76646
|
acfcc73856728552fb047bb3ad993df6fdebc53d
| 1,370
|
py
|
Python
|
cnfencoder/cnf.py
|
jreig/cnf-encoder
|
9f3f098bc0c7b5aaf87ae4bf949222ae80950a5a
|
[
"MIT"
] | null | null | null |
cnfencoder/cnf.py
|
jreig/cnf-encoder
|
9f3f098bc0c7b5aaf87ae4bf949222ae80950a5a
|
[
"MIT"
] | null | null | null |
cnfencoder/cnf.py
|
jreig/cnf-encoder
|
9f3f098bc0c7b5aaf87ae4bf949222ae80950a5a
|
[
"MIT"
] | null | null | null |
import os
class CNF:
"""
Store the total number of variables and the list of clauses of the CNF.
Clauses are represented by a list of literals, where each literal is encoded
using an integer. Negative integers represents negated literals.
"""
def __init__(self):
self.num_variables = 0
self.clauses = []
def saveToFile(self, filename: str):
"""
Store the CNF into a file using DIMACS format
"""
with open(filename, "w") as f:
f.write(
f"p cnf {self.num_variables} {len(self.clauses)}" + os.linesep)
for c in self.clauses:
literals = [str(l) for l in c]
row = " ".join(literals) + " 0" + os.linesep
f.write(row)
def readFromFile(self, filename: str):
"""
Read the CNF from a valid DIMACS file.
No format validation is done.
"""
with open(filename) as f:
for _, line in enumerate(f):
if(line.startswith("c")):
continue
elif(line.startswith("p")):
tokens = line.split(" ")
self.num_variables = int(tokens[2])
else:
clause = line.split(" ")
self.clauses.append([int(l) for l in clause if l != "0\n"])
| 32.619048
| 80
| 0.516058
|
acfcc77844c7bd017a7158b2fa3b965bd88e28fd
| 1,940
|
py
|
Python
|
tests/pyre.pkg/components/protocol_compatibility.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pyre.pkg/components/protocol_compatibility.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pyre.pkg/components/protocol_compatibility.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
"""
Verify that compatibility among protocols is detected correctly
"""
def test():
import pyre
# declare a couple of protocols
class base(pyre.protocol):
"""the base protocol"""
common = pyre.properties.int()
class derived(base):
"""a derived one, so automatically compatible"""
extra = pyre.properties.int()
class ok(pyre.protocol):
"""one that doesn't derive but has the right public protocol"""
common = pyre.properties.int()
class notok(pyre.protocol):
"""one that doesn't provide the right public protocol"""
what = pyre.properties.int()
class badtype(pyre.protocol):
"""one that has the right trait but of the wrong type"""
@pyre.provides
def common(self):
"""method, not property"""
class shadow(base):
"""one that derives but shadows the trait in an incompatible way"""
@pyre.provides
def common(self):
"""method, not property"""
# compatibility checks
# the ones that should succeed
assert derived.pyre_isCompatible(base)
assert ok.pyre_isCompatible(base)
assert derived.pyre_isCompatible(ok)
# and the ones that should fail
assert not ok.pyre_isCompatible(derived)
assert not notok.pyre_isCompatible(base)
assert not notok.pyre_isCompatible(derived)
assert not notok.pyre_isCompatible(ok)
assert not badtype.pyre_isCompatible(base)
assert not badtype.pyre_isCompatible(derived)
assert not badtype.pyre_isCompatible(ok)
assert not shadow.pyre_isCompatible(base)
assert not shadow.pyre_isCompatible(derived)
assert not shadow.pyre_isCompatible(ok)
return base, derived, ok, notok, badtype, shadow
# main
if __name__ == "__main__":
test()
# end of file
| 26.575342
| 75
| 0.666495
|
acfcc8a1573a785e9e3c18002e8fdee900e8464e
| 12,234
|
py
|
Python
|
mvsnet/test.py
|
GentleDell/MVSNet
|
c6407b1b2301c4a0bdb629e41ba3f8d2d9fe3b11
|
[
"MIT"
] | null | null | null |
mvsnet/test.py
|
GentleDell/MVSNet
|
c6407b1b2301c4a0bdb629e41ba3f8d2d9fe3b11
|
[
"MIT"
] | null | null | null |
mvsnet/test.py
|
GentleDell/MVSNet
|
c6407b1b2301c4a0bdb629e41ba3f8d2d9fe3b11
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright 2019, Yao Yao, HKUST.
Test script.
"""
from __future__ import print_function
import os
import time
import sys
import math
import argparse
import imageio
import numpy as np
import cv2
import tensorflow as tf
sys.path.append("../")
from tools.common import Notify
from preprocess import *
from model import *
from loss import *
# dataset parameters
tf.app.flags.DEFINE_string('dense_folder', '../scan9/scan9/',
"""Root path to dense folder.""")
tf.app.flags.DEFINE_string('model_dir',
'../models/tf_model/',
"""Path to restore the model.""")
tf.app.flags.DEFINE_integer('ckpt_step', 100000,
"""ckpt step.""")
# input parameters
tf.app.flags.DEFINE_integer('view_num', 5,
"""Number of images (1 ref image and view_num - 1 view images).""")
tf.app.flags.DEFINE_integer('max_d', 256,
"""Maximum depth step when testing.""")
tf.app.flags.DEFINE_integer('max_w', 1600,
"""Maximum image width when testing.""")
tf.app.flags.DEFINE_integer('max_h', 1200,
"""Maximum image height when testing.""")
tf.app.flags.DEFINE_float('sample_scale', 0.25,
"""Downsample scale for building cost volume (W and H).""")
tf.app.flags.DEFINE_float('interval_scale', 0.8,
"""Downsample scale for building cost volume (D).""")
tf.app.flags.DEFINE_float('base_image_size', 8,
"""Base image size""")
tf.app.flags.DEFINE_integer('batch_size', 1,
"""Testing batch size.""")
tf.app.flags.DEFINE_bool('adaptive_scaling', True,
"""Let image size to fit the network, including 'scaling', 'cropping'""")
# network architecture
tf.app.flags.DEFINE_string('regularization', 'GRU',
"""Regularization method, including '3DCNNs' and 'GRU'""")
tf.app.flags.DEFINE_boolean('refinement', False,
"""Whether to apply depth map refinement for MVSNet""")
tf.app.flags.DEFINE_bool('inverse_depth', True,
"""Whether to apply inverse depth for R-MVSNet""")
FLAGS = tf.app.flags.FLAGS
class MVSGenerator:
""" data generator class, tf only accept generator without param """
def __init__(self, sample_list, view_num):
self.sample_list = sample_list
self.view_num = view_num
self.sample_num = len(sample_list)
self.counter = 0
def __iter__(self):
while True:
for data in self.sample_list:
# read input data
images = []
cams = []
image_index = int(os.path.splitext(os.path.basename(data[0]))[0])
selected_view_num = int(len(data) / 2)
for view in range(min(self.view_num, selected_view_num)):
image_file = file_io.FileIO(data[2 * view], mode='rb') # all 'r' are changed to be 'rb' to load image data
image = imageio.imread(image_file, as_gray=False, pilmode='RGB') # as binary for futher de/encoding, by zhantao deng@ 26-07-2019
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cam_file = file_io.FileIO(data[2 * view + 1], mode='rb')
cam = load_cam(cam_file, FLAGS.interval_scale)
if cam[1][3][2] == 0:
cam[1][3][2] = FLAGS.max_d
images.append(image)
cams.append(cam)
if selected_view_num < self.view_num:
for view in range(selected_view_num, self.view_num):
image_file = file_io.FileIO(data[0], mode='rb')
image = imageio.imread(image_file, as_gray=False, pilmode='RGB')
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cam_file = file_io.FileIO(data[1], mode='rb')
cam = load_cam(cam_file, FLAGS.interval_scale)
images.append(image)
cams.append(cam)
print ('range: ', cams[0][1, 3, 0], cams[0][1, 3, 1], cams[0][1, 3, 2], cams[0][1, 3, 3])
# determine a proper scale to resize input
resize_scale = 1
if FLAGS.adaptive_scaling:
h_scale = 0
w_scale = 0
for view in range(self.view_num):
height_scale = float(FLAGS.max_h) / images[view].shape[0]
width_scale = float(FLAGS.max_w) / images[view].shape[1]
if height_scale > h_scale:
h_scale = height_scale
if width_scale > w_scale:
w_scale = width_scale
if h_scale > 1 or w_scale > 1:
print ("max_h, max_w should < W and H!")
exit(-1)
resize_scale = h_scale
if w_scale > h_scale:
resize_scale = w_scale
scaled_input_images, scaled_input_cams = scale_mvs_input(images, cams, scale=resize_scale)
# crop to fit network
croped_images, croped_cams = crop_mvs_input(scaled_input_images, scaled_input_cams)
# center images
centered_images = []
for view in range(self.view_num):
centered_images.append(center_image(croped_images[view]))
# sample cameras for building cost volume
real_cams = np.copy(croped_cams)
scaled_cams = scale_mvs_camera(croped_cams, scale=FLAGS.sample_scale)
# return mvs input
scaled_images = []
for view in range(self.view_num):
scaled_images.append(scale_image(croped_images[view], scale=FLAGS.sample_scale))
scaled_images = np.stack(scaled_images, axis=0)
croped_images = np.stack(croped_images, axis=0)
scaled_cams = np.stack(scaled_cams, axis=0)
self.counter += 1
yield (scaled_images, centered_images, scaled_cams, image_index)
def mvsnet_pipeline(mvs_list):
""" mvsnet in altizure pipeline """
print ('sample number: ', len(mvs_list))
# create output folder
output_folder = os.path.join(FLAGS.dense_folder, 'depths_mvsnet')
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
# testing set
mvs_generator = iter(MVSGenerator(mvs_list, FLAGS.view_num))
generator_data_type = (tf.float32, tf.float32, tf.float32, tf.int32)
mvs_set = tf.data.Dataset.from_generator(lambda: mvs_generator, generator_data_type)
mvs_set = mvs_set.batch(FLAGS.batch_size)
mvs_set = mvs_set.prefetch(buffer_size=1)
# data from dataset via iterator
mvs_iterator = mvs_set.make_initializable_iterator()
scaled_images, centered_images, scaled_cams, image_index = mvs_iterator.get_next()
# set shapes
scaled_images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3]))
centered_images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3]))
scaled_cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))
depth_start = tf.reshape(
tf.slice(scaled_cams, [0, 0, 1, 3, 0], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])
depth_interval = tf.reshape(
tf.slice(scaled_cams, [0, 0, 1, 3, 1], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])
depth_num = tf.cast(
tf.reshape(tf.slice(scaled_cams, [0, 0, 1, 3, 2], [1, 1, 1, 1, 1]), []), 'int32')
# deal with inverse depth
if FLAGS.regularization == '3DCNNs' and FLAGS.inverse_depth:
depth_end = tf.reshape(
tf.slice(scaled_cams, [0, 0, 1, 3, 3], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])
else:
depth_end = depth_start + (tf.cast(depth_num, tf.float32) - 1) * depth_interval
# depth map inference using 3DCNNs
if FLAGS.regularization == '3DCNNs':
init_depth_map, prob_map = inference_mem(
centered_images, scaled_cams, FLAGS.max_d, depth_start, depth_interval)
if FLAGS.refinement:
ref_image = tf.squeeze(tf.slice(centered_images, [0, 0, 0, 0, 0], [-1, 1, -1, -1, 3]), axis=1)
refined_depth_map = depth_refine(
init_depth_map, ref_image, FLAGS.max_d, depth_start, depth_interval, True)
# depth map inference using GRU
elif FLAGS.regularization == 'GRU':
init_depth_map, prob_map = inference_winner_take_all(centered_images, scaled_cams,
depth_num, depth_start, depth_end, reg_type='GRU', inverse_depth=FLAGS.inverse_depth)
# init option
init_op = tf.global_variables_initializer()
var_init_op = tf.local_variables_initializer()
# GPU grows incrementally
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# initialization
sess.run(var_init_op)
sess.run(init_op)
total_step = 0
# load model
if FLAGS.model_dir is not None:
pretrained_model_ckpt_path = os.path.join(FLAGS.model_dir, FLAGS.regularization, 'model.ckpt')
restorer = tf.train.Saver(tf.global_variables())
restorer.restore(sess, '-'.join([pretrained_model_ckpt_path, str(FLAGS.ckpt_step)]))
print(Notify.INFO, 'Pre-trained model restored from %s' %
('-'.join([pretrained_model_ckpt_path, str(FLAGS.ckpt_step)])), Notify.ENDC)
total_step = FLAGS.ckpt_step
# run inference for each reference view
sess.run(mvs_iterator.initializer)
for step in range(len(mvs_list)):
start_time = time.time()
try:
out_init_depth_map, out_prob_map, out_images, out_cams, out_index = sess.run(
[init_depth_map, prob_map, scaled_images, scaled_cams, image_index])
except tf.errors.OutOfRangeError:
print("all dense finished") # ==> "End of dataset"
break
duration = time.time() - start_time
print(Notify.INFO, 'depth inference %d finished. (%.3f sec/step)' % (step, duration),
Notify.ENDC)
# squeeze output
out_init_depth_image = np.squeeze(out_init_depth_map)
out_prob_map = np.squeeze(out_prob_map)
out_ref_image = np.squeeze(out_images)
out_ref_image = np.squeeze(out_ref_image[0, :, :, :])
out_ref_cam = np.squeeze(out_cams)
out_ref_cam = np.squeeze(out_ref_cam[0, :, :, :])
out_index = np.squeeze(out_index)
# paths
init_depth_map_path = output_folder + ('/%08d_init.pfm' % out_index)
prob_map_path = output_folder + ('/%08d_prob.pfm' % out_index)
out_ref_image_path = output_folder + ('/%08d.jpg' % out_index)
out_ref_cam_path = output_folder + ('/%08d.txt' % out_index)
# save output
write_pfm(init_depth_map_path, out_init_depth_image)
write_pfm(prob_map_path, out_prob_map)
out_ref_image = cv2.cvtColor(out_ref_image, cv2.COLOR_RGB2BGR)
# image_file = file_io.FileIO(out_ref_image_path, mode='w')
cv2.imwrite(out_ref_image_path, out_ref_image) # modified by zhantao deng. using cv2 because imwrite
# has been removed from scipy and imageio is inconvenient.
write_cam(out_ref_cam_path, out_ref_cam)
total_step += 1
def main(_): # pylint: disable=unused-argument
""" program entrance """
# generate input path list
mvs_list = gen_pipeline_mvs_list(FLAGS.dense_folder)
# mvsnet inference
mvsnet_pipeline(mvs_list)
if __name__ == '__main__':
tf.app.run()
| 44.007194
| 158
| 0.586644
|
acfcc92bfdc5044082d8828ce3edb26c1acc5e29
| 24,028
|
py
|
Python
|
environment/lib/python3.8/site-packages/dask_ml/model_selection/_hyperband.py
|
123972/PCA-nutricion
|
aff3c51a71c887c3fa367dbf9d599be5915c80cc
|
[
"MIT"
] | null | null | null |
environment/lib/python3.8/site-packages/dask_ml/model_selection/_hyperband.py
|
123972/PCA-nutricion
|
aff3c51a71c887c3fa367dbf9d599be5915c80cc
|
[
"MIT"
] | 2
|
2021-05-11T16:00:55.000Z
|
2021-08-23T20:45:22.000Z
|
environment/lib/python3.8/site-packages/dask_ml/model_selection/_hyperband.py
|
123972/PCA-nutricion
|
aff3c51a71c887c3fa367dbf9d599be5915c80cc
|
[
"MIT"
] | null | null | null |
from __future__ import division
import logging
import math
from warnings import warn
import numpy as np
from sklearn.utils import check_random_state
from tornado import gen
from ._incremental import BaseIncrementalSearchCV
from ._successive_halving import SuccessiveHalvingSearchCV
logger = logging.getLogger(__name__)
def _get_hyperband_params(R, eta=3):
"""
Parameters
----------
R : int
The maximum number of iterations desired.
eta : int
How aggressive to be in the search
Returns
-------
brackets : Dict[int, Tuple[int, int]]
A dictionary of the form {bracket_id: (n_models, n_initial_iter)}
Notes
-----
The bracket index is a measure of how strong that n,r combination
adapts to prior input. i.e., a bracket ID of 0 means "doesn't adapt
at all" and bracket index of 5 means "adapts pretty strongly"
``R`` and ``eta`` are the terminology that the Hyperband paper uses [1]_.
References
----------
.. [1] "Hyperband: A novel bandit-based approach to hyperparameter
optimization", 2016 by L. Li, K. Jamieson, G. DeSalvo, A.
Rostamizadeh, and A. Talwalkar. https://arxiv.org/abs/1603.06560
"""
s_max = math.floor(math.log(R, eta))
B = (s_max + 1) * R
brackets = list(reversed(range(int(s_max + 1))))
N = [int(math.ceil(B / R * eta ** s / (s + 1))) for s in brackets]
R = [int(R * eta ** -s) for s in brackets]
return {b: (n, r) for b, n, r in zip(brackets, N, R)}
class HyperbandSearchCV(BaseIncrementalSearchCV):
"""Find the best parameters for a particular model with an adaptive
cross-validation algorithm.
Hyperband will find close to the best possible parameters with
the given computational budget [*]_ by spending more time training
high-performing estimators [1]_. This means that Hyperband stops training
estimators that perform poorly -- at it's core, Hyperband is an early
stopping scheme for RandomizedSearchCV.
Hyperband does not require a trade-off between "evaluate many parameters
for a short time" and "train a few parameters for a long time"
like RandomizedSearchCV.
Hyperband requires one input which requires knowing how long
to train the best performing estimator via ``max_iter``.
The other implicit input (the Dask array chuck size) requires
a rough estimate of how many parameters to sample. Specification details
are in :ref:`Notes <hyperband-notes>`.
.. [*] After :math:`N` ``partial_fit`` calls the estimator Hyperband
produces will be close to the best possible estimator that :math:`N`
``partial_fit`` calls could ever produce with high probability (where
"close" means "within log terms of the expected best possible score").
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each hyperparameter
combination. This is assumed to implement the scikit-learn estimator
interface. Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed. The estimator must implement
``partial_fit``, ``set_params``, and work well with ``clone``.
parameters : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
max_iter : int
The maximum number of partial_fit calls to any one model. This should
be the number of ``partial_fit`` calls required for the model to
converge. See :ref:`Notes <hyperband-notes>` for details on
setting this parameter.
aggressiveness : int, default=3
How aggressive to be in culling off the different estimators. Higher
values imply higher confidence in scoring (or that
the hyperparameters influence the ``estimator.score`` more
than the data). Theory suggests ``aggressiveness=3`` is close to
optimal. ``aggressiveness=4`` has higher confidence that is likely
suitable for initial exploration.
patience : int, default False
If specified, training stops when the score does not increase by
``tol`` after ``patience`` calls to ``partial_fit``. Off by default.
A ``patience`` value is automatically selected if ``patience=True`` to
work well with the Hyperband model selection algorithm.
tol : float, default 0.001
The required level of improvement to consider stopping training on
that model when ``patience`` is specified. Increasing ``tol`` will
tend to reduce training time at the cost of (potentially) worse
estimators.
test_size : float
Fraction of the dataset to hold out for computing test/validation
scores. Defaults to the size of a single partition of
the input training set.
.. note::
The testing dataset should fit in memory on a single machine.
Adjust the ``test_size`` parameter as necessary to achieve this.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's default scorer (if available) is used.
verbose : bool, float, int, optional, default: False
If False (default), don't print logs (or pipe them to stdout). However,
standard logging will still be used.
If True, print logs and use standard logging.
If float, print/log approximately ``verbose`` fraction of the time.
prefix : str, optional, default=""
While logging, add ``prefix`` to each message.
Examples
--------
>>> import numpy as np
>>> from dask_ml.model_selection import HyperbandSearchCV
>>> from dask_ml.datasets import make_classification
>>> from sklearn.linear_model import SGDClassifier
>>>
>>> X, y = make_classification(chunks=20)
>>> est = SGDClassifier(tol=1e-3)
>>> param_dist = {'alpha': np.logspace(-4, 0, num=1000),
>>> 'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge'],
>>> 'average': [True, False]}
>>>
>>> search = HyperbandSearchCV(est, param_dist)
>>> search.fit(X, y, classes=np.unique(y))
>>> search.best_params_
{'loss': 'log', 'average': False, 'alpha': 0.0080502}
Attributes
----------
metadata and metadata_ : dict[str, Union(int, dict)]
These dictionaries describe the computation performed, either
before computation happens with ``metadata`` or after computation
happens with ``metadata_``. These dictionaries both have keys
* ``n_models``, an int representing how many models will be/is created.
* ``partial_fit_calls``, an int representing how many times
``partial_fit`` will be/is called.
* ``brackets``, a list of the brackets that Hyperband runs. Each
bracket has different values for training time importance and
hyperparameter importance. In addition to ``n_models`` and
``partial_fit_calls``, each element in this list has keys
* ``bracket``, an int the bracket ID. Each bracket corresponds to
a different levels of training time importance.
For bracket 0, training time is important. For the highest
bracket, training time is not important and models are killed
aggressively.
* ``SuccessiveHalvingSearchCV params``, a dictionary used to create
the different brackets. It does not include the
``estimator`` or ``parameters`` parameters.
* ``decisions``, the number of ``partial_fit`` calls Hyperband makes
before making decisions.
These dictionaries are the same if ``patience`` is not specified. If
``patience`` is specified, it's possible that less training is
performed, and ``metadata_`` will reflect that (though ``metadata``
won't).
cv_results_ : Dict[str, np.ndarray]
A dictionary that describes how well each model has performed.
It contains information about every model regardless if it reached
``max_iter``. It has keys
* ``mean_partial_fit_time``
* ``mean_score_time``
* ``std_partial_fit_time``
* ``std_score_time``
* ``test_score``
* ``rank_test_score``
* ``model_id``
* ``partial_fit_calls``
* ``params``
* ``param_{key}``, where ``{key}`` is every key in ``params``.
* ``bracket``
The values in the ``test_score`` key correspond to the last score a model
received on the hold out dataset. The key ``model_id`` corresponds with
``history_``. This dictionary can be imported into a Pandas DataFrame.
In the ``model_id``, the bracket ID prefix corresponds to the bracket
in ``metadata``. Bracket 0 doesn't adapt to previous training at all;
higher values correspond to more adaptation.
history_ : list of dicts
Information about each model after each ``partial_fit`` call. Each dict
the keys
* ``partial_fit_time``
* ``score_time``
* ``score``
* ``model_id``
* ``params``
* ``partial_fit_calls``
* ``elapsed_wall_time``
The key ``model_id`` corresponds to the ``model_id`` in ``cv_results_``.
This list of dicts can be imported into Pandas.
model_history_ : dict of lists of dict
A dictionary of each models history. This is a reorganization of
``history_``: the same information is present but organized per model.
This data has the structure ``{model_id: [h1, h2, h3, ...]}`` where
``h1``, ``h2`` and ``h3`` are elements of ``history_``
and ``model_id`` is the model ID as in ``cv_results_``.
best_estimator_ : BaseEstimator
The model with the highest validation score as selected by
the Hyperband model selection algorithm.
best_score_ : float
Score achieved by ``best_estimator_`` on the vaidation set after the
final call to ``partial_fit``.
best_index_ : int
Index indicating which estimator in ``cv_results_`` corresponds to
the highest score.
best_params_ : dict
Dictionary of best parameters found on the hold-out data.
scorer_ :
The function used to score models, which has a call signature of
``scorer_(estimator, X, y)``.
Notes
-----
.. _hyperband-notes:
To set ``max_iter`` and the chunk size for ``X`` and ``y``, it is required
to estimate
* the number of examples at least one model will see
(``n_examples``). If 10 passes through the data are needed for
the longest trained model, ``n_examples = 10 * len(X)``.
* how many hyper-parameter combinations to sample (``n_params``)
These can be rough guesses. To determine the chunk size and ``max_iter``,
1. Let the chunks size be ``chunk_size = n_examples / n_params``
2. Let ``max_iter = n_params``
Then, every estimator sees no
more than ``max_iter * chunk_size = n_examples`` examples.
Hyperband will actually sample some more hyper-parameter combinations than
``n_examples`` (which is why rough guesses are adequate). For example,
let's say
* about 200 or 300 hyper-parameters need to be tested to effectively
search the possible hyper-parameters
* models need more than ``50 * len(X)`` examples but less than
``100 * len(X)`` examples.
Let's decide to provide ``81 * len(X)`` examples and to sample 243
parameters. Then each chunk will be 1/3rd the dataset and ``max_iter=243``.
If you use ``HyperbandSearchCV``, please use the citation for [2]_
.. code-block:: tex
@InProceedings{sievert2019better,
author = {Scott Sievert and Tom Augspurger and Matthew Rocklin},
title = {{B}etter and faster hyperparameter optimization with {D}ask},
booktitle = {{P}roceedings of the 18th {P}ython in {S}cience {C}onference},
pages = {118 - 125},
year = {2019},
editor = {Chris Calloway and David Lippa and Dillon Niederhut and David Shupe}, # noqa
doi = {10.25080/Majora-7ddc1dd1-011}
}
References
----------
.. [1] "Hyperband: A novel bandit-based approach to hyperparameter
optimization", 2016 by L. Li, K. Jamieson, G. DeSalvo, A.
Rostamizadeh, and A. Talwalkar. https://arxiv.org/abs/1603.06560
.. [2] "Better and faster hyperparameter optimization with Dask", 2018 by
S. Sievert, T. Augspurger, M. Rocklin.
https://doi.org/10.25080/Majora-7ddc1dd1-011
"""
def __init__(
self,
estimator,
parameters,
max_iter=81,
aggressiveness=3,
patience=False,
tol=1e-3,
test_size=None,
random_state=None,
scoring=None,
verbose=False,
prefix="",
):
self.aggressiveness = aggressiveness
super(HyperbandSearchCV, self).__init__(
estimator,
parameters,
max_iter=max_iter,
patience=patience,
tol=tol,
test_size=test_size,
random_state=random_state,
scoring=scoring,
verbose=verbose,
prefix=prefix,
)
def _get_SHAs(self, brackets):
patience = _get_patience(
self.patience, self.max_iter, self.aggressiveness, self.tol
)
# This is the first time self.random_state is used after
# HyperbandSearchCV.fit is called.
seed_start = check_random_state(self.random_state).randint(2 ** 31)
self._SHA_seed = seed_start
# These brackets are ordered by adaptivity; bracket=0 is least adaptive
SHAs = {}
for b, (n, r) in brackets.items():
sha = SuccessiveHalvingSearchCV(
self.estimator,
self.parameters,
n_initial_parameters=n,
aggressiveness=self.aggressiveness,
max_iter=self.max_iter,
n_initial_iter=r,
patience=patience,
tol=self.tol,
test_size=self.test_size,
random_state=seed_start + b if b != 0 else self.random_state,
scoring=self.scoring,
verbose=self.verbose,
prefix=f"{self.prefix}, bracket={b}",
)
SHAs[b] = sha
return SHAs
@gen.coroutine
def _fit(self, X, y, **fit_params):
X, y, scorer = self._validate_parameters(X, y)
brackets = _get_hyperband_params(self.max_iter, eta=self.aggressiveness)
SHAs = self._get_SHAs(brackets)
# Which bracket to run first? Going to go with most adaptive;
# that works best on one machine.
# (though it doesn't matter a ton; _fit prioritizes high scores
_brackets_ids = list(reversed(sorted(SHAs)))
# _fit is run in parallel because it's also a tornado coroutine
_SHAs = yield [SHAs[b]._fit(X, y, **fit_params) for b in _brackets_ids]
SHAs = {b: SHA for b, SHA in zip(_brackets_ids, _SHAs)}
# This for-loop rename estimator IDs and pulls out wall times
key = "bracket={}-{}".format
for b, SHA in SHAs.items():
new_ids = {old: key(b, old) for old in SHA.cv_results_["model_id"]}
SHA.cv_results_["model_id"] = np.array(
[new_ids[old] for old in SHA.cv_results_["model_id"]]
)
SHA.model_history_ = {
new_ids[old]: v for old, v in SHA.model_history_.items()
}
for hist in SHA.model_history_.values():
for h in hist:
h["model_id"] = new_ids[h["model_id"]]
h["bracket"] = b
for b, SHA in SHAs.items():
n = len(SHA.cv_results_["model_id"])
SHA.cv_results_["bracket"] = np.ones(n, dtype=int) * b
cv_keys = {k for SHA in SHAs.values() for k in SHA.cv_results_.keys()}
cv_results = {
k: [v for b in _brackets_ids for v in SHAs[b].cv_results_[k]]
for k in cv_keys
}
cv_results = {k: np.array(v) for k, v in cv_results.items()}
scores = {b: SHA.best_score_ for b, SHA in SHAs.items()}
best_bracket = max(scores, key=scores.get)
best_estimator = SHAs[best_bracket].best_estimator_
estimator_history = {
ident: hist
for SHA in SHAs.values()
for ident, hist in SHA.model_history_.items()
}
# Order history by time
history = sum([SHA.history_ for b, SHA in SHAs.items()], [])
idx = np.argsort([v["elapsed_wall_time"] for v in history])
history = [history[i] for i in idx]
best_model_id = SHAs[best_bracket].cv_results_["model_id"][
SHAs[best_bracket].best_index_
]
best_index = np.argwhere(np.array(cv_results["model_id"]) == best_model_id)
best_index = best_index.flat[0]
meta, _ = _get_meta(
{b: SHA.history_ for b, SHA in SHAs.items()}, brackets.keys(), SHAs, key
)
self.metadata_ = {
"n_models": sum(m["n_models"] for m in meta),
"partial_fit_calls": sum(m["partial_fit_calls"] for m in meta),
"brackets": meta,
}
self.best_index_ = int(best_index)
self.best_estimator_ = best_estimator
self.best_score_ = scores[best_bracket]
self.best_params_ = cv_results["params"][best_index]
self.scorer_ = scorer
self.model_history_ = estimator_history
self.history_ = history
self.cv_results_ = cv_results
self.multimetric_ = SHAs[best_bracket].multimetric_
self._SuccessiveHalvings_ = SHAs
raise gen.Return(self)
@property
def metadata(self):
bracket_info = _hyperband_paper_alg(self.max_iter, eta=self.aggressiveness)
num_models = sum(b["n_models"] for b in bracket_info)
for bracket in bracket_info:
bracket["decisions"] = sorted(list(bracket["decisions"]))
num_partial_fit = sum(b["partial_fit_calls"] for b in bracket_info)
bracket_info = list(reversed(sorted(bracket_info, key=lambda x: x["bracket"])))
brackets = _get_hyperband_params(self.max_iter, eta=self.aggressiveness)
SHAs = self._get_SHAs(brackets)
for bracket in bracket_info:
b = bracket["bracket"]
bracket["SuccessiveHalvingSearchCV params"] = _get_SHA_params(SHAs[b])
bracket_info = sorted(bracket_info, key=lambda x: x["bracket"])
info = {
"partial_fit_calls": num_partial_fit,
"n_models": num_models,
"brackets": bracket_info,
}
return info
def _get_meta(hists, brackets, SHAs, key):
meta_ = []
history_ = {}
for bracket in brackets:
hist = hists[bracket]
info_hist = {key(bracket, h["model_id"]): [] for h in hist}
for h in hist:
info_hist[key(bracket, h["model_id"])] += [h]
hist = info_hist
history_.update(hist)
calls = {k: max(hi["partial_fit_calls"] for hi in h) for k, h in hist.items()}
decisions = {hi["partial_fit_calls"] for h in hist.values() for hi in h}
if bracket != max(brackets):
decisions.discard(1)
meta_.append(
{
"decisions": sorted(list(decisions)),
"n_models": len(hist),
"bracket": bracket,
"partial_fit_calls": sum(calls.values()),
"SuccessiveHalvingSearchCV params": _get_SHA_params(SHAs[bracket]),
}
)
meta_ = sorted(meta_, key=lambda x: x["bracket"])
return meta_, history_
def _get_SHA_params(SHA):
"""
Parameters
----------
SHA : SuccessiveHalvingSearchCV
Returns
-------
params : dict
Dictionary to re-create a SuccessiveHalvingSearchCV without the
estimator or parameters
Example
-------
>>> from sklearn.linear_model import SGDClassifier
>>> model = SGDClassifier()
>>> params = {"alpha": np.logspace(-1, 1)}
>>> SHA = SuccessiveHalvingSearchCV(model, params, tol=0.1,
... patience=True, random_state=42)
>>> _get_SHA_params(SHA)
{'aggressiveness': 3,
'max_iter': 100,
'n_initial_iter': 9,
'n_initial_parameters': 10,
'patience': True,
'random_state': 42,
'scoring': None,
'test_size': None,
'tol': 0.1}
"""
return {
k: v
for k, v in SHA.get_params().items()
if "estimator_" not in k and k != "parameters" and k != "estimator"
}
def _hyperband_paper_alg(R, eta=3):
"""
Algorithm 1 from the Hyperband paper [1]_.
References
----------
1. "Hyperband: A novel bandit-based approach to hyperparameter
optimization", 2016 by L. Li, K. Jamieson, G. DeSalvo, A. Rostamizadeh,
and A. Talwalkar. https://arxiv.org/abs/1603.06560
"""
s_max = math.floor(math.log(R, eta))
B = (s_max + 1) * R
brackets = reversed(range(int(s_max + 1)))
hists = {}
for s in brackets:
n = int(math.ceil(B / R * eta ** s / (s + 1)))
r = R * eta ** -s
r = int(r)
T = set(range(n))
hist = {
"num_estimators": n,
"estimators": {n: 0 for n in range(n)},
"decisions": [],
}
for i in range(s + 1):
n_i = math.floor(n * eta ** -i)
r_i = np.round(r * eta ** i).astype(int)
L = {model: r_i for model in T}
hist["estimators"].update(L)
hist["decisions"] += [r_i]
to_keep = math.floor(n_i / eta)
T = {model for i, model in enumerate(T) if i < to_keep}
hists[s] = hist
info = [
{
"bracket": k,
"n_models": hist["num_estimators"],
"partial_fit_calls": sum(hist["estimators"].values()),
"decisions": {int(h) for h in hist["decisions"]},
}
for k, hist in hists.items()
]
return info
def _get_patience(patience, max_iter, aggressiveness, tol):
if not isinstance(patience, bool) and patience < max(max_iter // aggressiveness, 1):
msg = (
"The goal of `patience` is to stop training estimators that have "
"already converged *when few estimators remain*. "
"Hyperband is already an (almost optimal) adaptive scheme, "
"and patience should be large to be a minimal layer on top "
"of Hyperband. \n\n"
"To clear this warning, set \n\n"
" * patience=True\n"
" * patience >= {}\n"
" * tol=None or tol=np.nan\n\n"
"instead of patience={} "
)
if (tol is not None) and not np.isnan(tol):
warn(msg.format(max_iter // aggressiveness, patience))
elif isinstance(patience, bool) and patience:
return max(max_iter // aggressiveness, 1)
elif isinstance(patience, bool) and not patience:
return False
return int(patience)
| 37.958926
| 102
| 0.613243
|
acfcc967ab137664a189755cf911564205187b6a
| 5,618
|
py
|
Python
|
keybind/binder.py
|
idlesign/keybind
|
d439051bf3db019f466aa6396b31d61d9df9d254
|
[
"BSD-3-Clause"
] | 14
|
2018-12-15T08:40:47.000Z
|
2022-03-29T13:37:09.000Z
|
keybind/binder.py
|
idlesign/keybind
|
d439051bf3db019f466aa6396b31d61d9df9d254
|
[
"BSD-3-Clause"
] | 4
|
2019-07-04T21:26:19.000Z
|
2021-03-03T09:25:02.000Z
|
keybind/binder.py
|
idlesign/keybind
|
d439051bf3db019f466aa6396b31d61d9df9d254
|
[
"BSD-3-Clause"
] | 3
|
2019-04-25T18:53:59.000Z
|
2020-04-16T11:36:15.000Z
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import logging
import threading
LOGGER = logging.getLogger('keybinder')
def configure_logging(log_level=None):
"""Performs basic logging configuration.
:param log_level: logging level, e.g. logging.DEBUG
Default: logging.INFO
:param show_logger_names: bool - flag to show logger names in output
"""
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level or logging.INFO)
class KeyBinder(object):
"""Binds keys to functions globally.
.. code-block:: python
def do(): print('do')
KeyBinder.activate({
'Ctrl-K': do,
})
"""
def __init__(self, keymap=None, listen_events=None):
"""
:param dict keymap: Key name to function mapping.
Example:
.. code-block:: python
def do(): print('do')
{
'Ctrl-K': do,
'1': None, # Just intercept.
}
:param int listen_events: X Events or a combination of them.
Examples:
* Xlib.X.KeyPressMask
* Xlib.X.KeyPressMask | Xlib.X.ButtonReleaseMask
"""
from Xlib import X, XK
from Xlib.display import Display
self.x = X
self.xk = XK
self.disp = Display()
self.screen = self.disp.screen().root
self.events = listen_events or self.x.KeyPressMask
self.keymap = keymap or {}
self.mapped = {}
@classmethod
def activate(cls, keymap=None, listen_events=None, run_thread=False):
"""Alternative constructor.
Performs keys binding and runs a listener thread.
:param dict keymap: Key name to function mapping.
:param int listen_events: X Events or a combination of them.
:param bool run_thread: Run a key listening loop in a thread.
:rtype: KeyBinder
"""
binder = cls(keymap=keymap, listen_events=listen_events)
if keymap:
binder.register_keys()
else:
binder.sniff()
if run_thread:
binder.run_thread()
else:
binder.listen()
return binder
def listen(self):
"""Run keys events listening loop."""
events = self.events
screen = self.screen
mapped = self.mapped
while True:
event = screen.display.next_event()
capture = event.type & events
if not capture:
continue
keycode = event.detail
key, handler = mapped.get(keycode, (keycode, None))
if handler:
handler()
else:
LOGGER.info('Intercepted key: %s', key)
def run_thread(self):
"""Runs key events listening loop in a thread."""
grabber = threading.Thread(target=self.listen)
grabber.daemon = True
grabber.start()
def register_key(self, key, modifier_default='NumLock'):
"""Registers a key to listen to.
:param str|unicode|int key: Key name or code.
:param str|unicode modifier_default: Use this modifier if none specified.
:rtype: bool
"""
x = self.x
modifiers_map = {
'Ctrl': x.ControlMask, # 37 105
'Shift': x.ShiftMask, # 50 62
'CapsLock': x.LockMask, # 66
'Alt': x.Mod1Mask, # 64 108
'NumLock': x.Mod2Mask, # 77
'Super': x.Mod4Mask, # 133 134
}
has_error = []
modifier_alias = None
modifiers, keycode = self._parse_key(key)
def on_error(err, event):
has_error.append((err, event))
modifier_alias = modifier_alias or modifier_default
modifier_mask = 0
for modifier in modifiers:
modifier_mask |= modifiers_map[modifier]
# Simulate X.AnyModifier as it leads to BadAccess, as if somebody has already grabbed it before us.
modifiers_all = [
modifier_mask,
modifier_mask | modifiers_map['NumLock'],
modifier_mask | modifiers_map['CapsLock'],
modifier_mask | modifiers_map['NumLock'] | modifiers_map['CapsLock'],
]
for mod in modifiers_all:
self.screen.grab_key(keycode, mod, True, x.GrabModeAsync, x.GrabModeAsync, on_error)
success = not has_error
if success:
self.mapped[keycode] = (key, self.keymap[key])
return success
def register_keys(self):
"""Registers all keys from current keymap."""
# screen.change_attributes(event_mask=capture_events)
for key in self.keymap.keys():
if not self.register_key(key):
LOGGER.warning('Unable to register handler for: %s', key)
def sniff(self):
"""Grab all events. Useful for keycode sniffing."""
x = self.x
self.screen.grab_keyboard(self.events, x.GrabModeAsync, x.GrabModeAsync, x.CurrentTime)
def _parse_key(self, key):
if isinstance(key, int):
return [], key
elif isinstance(key, str):
*modifiers, key_only = key.split('-')
keycode = self.disp.keysym_to_keycode(self.xk.string_to_keysym(key_only))
LOGGER.debug('Key translated: %s -> %s', key, keycode)
return modifiers, keycode
else:
raise TypeError("Given key must be a key code (int), or a shortcut (str), e. g. 'Ctrl-K.")
| 26.375587
| 107
| 0.568708
|
acfcc9dc25396820d8e51ccba891f343ea3ba467
| 1,205
|
py
|
Python
|
src/ralph/reports/urls.py
|
DoNnMyTh/ralph
|
97b91639fa68965ad3fd9d0d2652a6545a2a5b72
|
[
"Apache-2.0"
] | 1,668
|
2015-01-01T12:51:20.000Z
|
2022-03-29T09:05:35.000Z
|
src/ralph/reports/urls.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 2,314
|
2015-01-02T13:26:26.000Z
|
2022-03-29T04:06:03.000Z
|
src/ralph/reports/urls.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 534
|
2015-01-05T12:40:28.000Z
|
2022-03-29T21:10:12.000Z
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from ralph.reports import views
urlpatterns = [
url(
r'^category_model_report/?$',
views.CategoryModelReport.as_view(),
name='category_model_report'
),
url(
r'^category_model__status_report/?$',
views.CategoryModelStatusReport.as_view(),
name='category_model__status_report'
),
url(
r'^manufactured_category_model_report/?$',
views.ManufacturerCategoryModelReport.as_view(),
name='manufactured_category_model_report'
),
url(
r'^status_model_report/?$',
views.StatusModelReport.as_view(),
name='status_model_report'
),
url(
r'^asset_relations/?$',
views.AssetRelationsReport.as_view(),
name='asset-relations'
),
url(
r'^licence_relations/?$',
views.LicenceRelationsReport.as_view(),
name='licence-relations'
),
url(
r'^failures_report/?$',
views.FailureReport.as_view(),
name='failures-report'
),
url(
r'^supports_report/?$',
views.AssetSupportsReport.as_view(),
name='assets-supports'
),
]
| 25.104167
| 56
| 0.60332
|
acfccb3dbdccbd118f55a7132d9ae2ff7cfe322e
| 3,188
|
py
|
Python
|
tensorboard/examples/plugins/example_basic/tensorboard_plugin_example/plugin.py
|
davidsoergel/tensorboard
|
2f6d8dc93ca83484f8f6473ab008ea43202b3a46
|
[
"Apache-2.0"
] | 2
|
2019-11-23T18:36:20.000Z
|
2019-12-07T20:58:02.000Z
|
tensorboard/examples/plugins/example_basic/tensorboard_plugin_example/plugin.py
|
davidsoergel/tensorboard
|
2f6d8dc93ca83484f8f6473ab008ea43202b3a46
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/examples/plugins/example_basic/tensorboard_plugin_example/plugin.py
|
davidsoergel/tensorboard
|
2f6d8dc93ca83484f8f6473ab008ea43202b3a46
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:04:28.000Z
|
2019-10-10T06:04:28.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A sample plugin to demonstrate dynamic loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
import six
from tensorboard.plugins import base_plugin
from tensorboard.util import tensor_util
import werkzeug
from werkzeug import wrappers
from tensorboard_plugin_example import metadata
class ExamplePlugin(base_plugin.TBPlugin):
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
self._multiplexer = context.multiplexer
def is_active(self):
return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))
def get_plugin_apps(self):
return {
"/index.js": self._serve_js,
"/tags": self._serve_tags,
"/greetings": self._serve_greetings,
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path="/index.js")
@wrappers.Request.application
def _serve_js(self, request):
del request # unused
filepath = os.path.join(os.path.dirname(__file__), "static", "index.js")
with open(filepath) as infile:
contents = infile.read()
return werkzeug.Response(contents, content_type="application/javascript")
@wrappers.Request.application
def _serve_tags(self, request):
del request # unused
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
result = {run: {} for run in self._multiplexer.Runs()}
for (run, tag_to_content) in six.iteritems(mapping):
for tag in tag_to_content:
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
result[run][tag] = {
u"description": summary_metadata.summary_description,
}
contents = json.dumps(result, sort_keys=True)
return werkzeug.Response(contents, content_type="application/json")
@wrappers.Request.application
def _serve_greetings(self, request):
run = request.args.get("run")
tag = request.args.get("tag")
if run is None or tag is None:
raise werkzeug.exceptions.BadRequest("Must specify run and tag")
try:
data = [
np.asscalar(tensor_util.make_ndarray(event.tensor_proto))
.decode("utf-8")
for event in self._multiplexer.Tensors(run, tag)
]
except KeyError:
raise werkzeug.exceptions.BadRequest("Invalid run or tag")
contents = json.dumps(data, sort_keys=True)
return werkzeug.Response(contents, content_type="application/json")
| 35.032967
| 80
| 0.7133
|
acfccb5fadbdc3e2a1fb127d2c15bc4752f96e7c
| 1,383
|
py
|
Python
|
server/video_capture_server.py
|
nuk-icslab/nukxScan
|
fb11c90d44f6b9bcb57d2254a34ea74539397bb2
|
[
"MIT"
] | null | null | null |
server/video_capture_server.py
|
nuk-icslab/nukxScan
|
fb11c90d44f6b9bcb57d2254a34ea74539397bb2
|
[
"MIT"
] | null | null | null |
server/video_capture_server.py
|
nuk-icslab/nukxScan
|
fb11c90d44f6b9bcb57d2254a34ea74539397bb2
|
[
"MIT"
] | 1
|
2022-01-03T07:57:50.000Z
|
2022-01-03T07:57:50.000Z
|
#!/bin/python
import cv2
import socket
import numpy as np
import hashlib
import binascii
SERVER = {
'HOST': '0.0.0.0',
'PORT': 3000
}
def recvFrame(sock):
BUFF_SIZE = 4096 # 4 KiB
data = b''
data_len = -1
while True:
part = sock.recv(BUFF_SIZE)
if data_len == -1:
data_len = int.from_bytes(part[0:4], byteorder='little')
data += part[4:]
else:
data += part
if len(data) == data_len:
# Received whole frame
break
return data
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((SERVER['HOST'], SERVER['PORT']))
server.listen(10)
while True:
conn, addr = server.accept()
print('Received new capture')
img_bytes = recvFrame(conn)
print('Length:', len(img_bytes), 'bytes')
hash_gen = hashlib.md5()
hash_gen.update(img_bytes)
hash_val = hash_gen.hexdigest()
print('MD5:', hash_val)
img_bytes = np.frombuffer(img_bytes, dtype=np.uint8)
img_bytes = img_bytes.reshape(-1,1)
img = cv2.imdecode(img_bytes, cv2.IMREAD_COLOR)
cv2.imshow('Server', img)
cv2.waitKey(1) # Must write this after imopen
serverMessage = 'Capture received MD5: '+hash_val
conn.sendall(serverMessage.encode())
conn.close()
cv2.destroyAllWindows()
| 24.263158
| 68
| 0.636298
|
acfccb6e7b62cdab266e7901344a64460702b59b
| 3,996
|
py
|
Python
|
src/trainer.py
|
JunjieHu/ReCo-RL
|
4406f6eec2d6bee4aa12c8b22494f2d167c570c1
|
[
"MIT"
] | 22
|
2020-02-25T02:07:15.000Z
|
2022-02-20T20:15:32.000Z
|
src/trainer.py
|
JunjieHu/ReCo-RL
|
4406f6eec2d6bee4aa12c8b22494f2d167c570c1
|
[
"MIT"
] | 7
|
2020-05-18T09:13:47.000Z
|
2021-09-26T14:21:19.000Z
|
src/trainer.py
|
JunjieHu/ReCo-RL
|
4406f6eec2d6bee4aa12c8b22494f2d167c570c1
|
[
"MIT"
] | 9
|
2020-02-25T21:30:31.000Z
|
2021-04-22T11:36:49.000Z
|
from __future__ import print_function
import math
import torch
import numpy as np
class Trainer(object):
def __init__(self, args, model, criterion):
self.args = args
self.model = model
self.criterion = criterion
# initialize optimizer and learning rate
if self.args.optim == 'sgd':
self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
else:
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9, 0.999), eps=1e-8, amsgrad=True)
# initialize loger
self._num_updates = 0
if args.cuda:
self.model = self.model.cuda()
self.criterion = self.criterion.cuda()
def save_checkpoint(self, filename):
pass
def load_checkpoint(self, filename):
pass
def train_step(self, sample, objective='MLE'):
self.model.train()
self.optimizer.zero_grad()
# forward pass
if objective == 'MLE':
loss, log_outputs = self._forward(sample)
elif objective == 'REINFORCE':
loss, log_outputs = self._reinforce(sample)
elif objective == 'MIXER':
mle_loss, log_outputs = self._forward(sample)
rl_loss, _ = self._reinforce(sample)
loss = (1 - self.args.rl_weight) * mle_loss + self.args.rl_weight * rl_loss
log_outputs['rl_loss'] = rl_loss
elif objective.startswith('REWARD'):
loss, log_outputs = self._reward(sample, torch.nn.BCELoss())
else:
print('Specify objective: MLE or REINFORCE')
exit(0)
# backward pass
grad_norm = self._backward(loss)
return loss, log_outputs
def _forward(self, sample):
# get the model's prediction
lprobs = self.model(sample['src_seq'], sample['src_lengths'], sample['trg_seq'])
target = sample['target']
# get the loss
loss = self.criterion(lprobs.contiguous().view(-1, lprobs.size(-1)), target.contiguous().view(-1))
loss = loss / sample['num_trg_seq']
logging_outputs = {'loss': loss, 'nsample': sample['target'].size(0)}
return loss, logging_outputs
def _backward(self, loss):
loss.backward()
if self.args.clip_norm > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_norm)
else:
grad_norm = math.sqrt(sum(p.grad.data.norm()**2 for p in self.model.parameters()))
self.optimizer.step()
self._num_updates += 1
return grad_norm
def valid_step(self, sample, objective='MLE'):
self.model.eval()
with torch.no_grad():
if objective == 'MLE':
loss, log_outputs = self._forward(sample)
elif objective == 'REINFORCE':
loss, log_outputs = self._reinforce(sample)
elif objective == 'MIXER':
mle_loss, log_outputs = self._forward(sample)
rl_loss, _ = self._reinforce(sample)
loss = (1 - self.args.rl_weight) * mle_loss + self.args.rl_weight * rl_loss
log_outputs['rl_loss'] = rl_loss
elif objective.startswith('REWARD'):
loss, log_outputs = self._reward(sample, torch.nn.BCELoss())
# loss, log_outputs = self._forward(sample, eval=True)
return loss, log_outputs
def _reinforce(self, sample):
self.model.train()
self.optimizer.zero_grad()
loss = self.model.reinforce(sample, self.args.sample_size, self.args.decode_max_length, self.args.rl_reward)
logging_outputs = {'loss': loss, 'nsample': sample['target'].size(0)}
return loss, logging_outputs
def _reward(self, sample, criteria):
self.model.train()
self.optimizer.zero_grad()
return self.model.train_reward(sample, criteria)
| 37.345794
| 156
| 0.605105
|
acfccb8daecccd9fa8a44af8870dfa9b4e049a51
| 1,962
|
py
|
Python
|
Python 3 - 9 - Testes automatizados/tests/test_leilao.py
|
dudu1626/Cursos-Alura-de-Python
|
3ebe63d732c4e40984ad5423c7158d5f59a1fd0e
|
[
"MIT"
] | null | null | null |
Python 3 - 9 - Testes automatizados/tests/test_leilao.py
|
dudu1626/Cursos-Alura-de-Python
|
3ebe63d732c4e40984ad5423c7158d5f59a1fd0e
|
[
"MIT"
] | null | null | null |
Python 3 - 9 - Testes automatizados/tests/test_leilao.py
|
dudu1626/Cursos-Alura-de-Python
|
3ebe63d732c4e40984ad5423c7158d5f59a1fd0e
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from src.leilao.dominio import Usuario, Lance, Leilao
# criado automaticamente com o comando Ctrl+shift+t com o cursor na classe Avaliador
class TestLeilao(TestCase):
def setUp(self): # criação de um cenário único para os teste rodarem
self.dudu = Usuario('Eduardo', 300.0)
self.fer = Usuario('Fernando', 300.0)
self.amor = Usuario('Aline', 300.0)
self.lance_amor = Lance(self.amor, 100.0)
self.lance_fer = Lance(self.fer, 150.0)
self.lance_dudu = Lance(self.dudu, 200.0)
self.leilao = Leilao('celular')
def test_deve_retornar_o_maior_e_o_menor_valor_de_um_lance_quando_adicionados_em_ordem_crescente(self):
self.leilao.propoe(self.lance_amor)
self.leilao.propoe(self.lance_fer)
self.leilao.propoe(self.lance_dudu)
valor_esperado_maior = 200.0
valor_esperado_menor = 100.0
self.assertEqual(valor_esperado_menor, self.leilao.menor_lance)
self.assertEqual(valor_esperado_maior, self.leilao.maior_lance)
def test_deve_retornar_o_mesmo_valor_para_maior_e_menor_lance_quando_leilao_tiver_apenas_um_lance(self):
self.leilao.propoe(self.lance_dudu)
valor_esperado_maior = 200
valor_esperado_menor = 200
self.assertEqual(valor_esperado_menor, self.leilao.menor_lance)
self.assertEqual(valor_esperado_maior, self.leilao.maior_lance)
def test_nao_deve_permitir_propor_um_lance_em_ordem_decrescente(self):
with self.assertRaises(ValueError):
self.leilao.propoe(self.lance_dudu)
self.leilao.propoe(self.lance_fer)
self.leilao.propoe(self.lance_amor)
def test_nao_deve_permitir_propor_lance_caso_o_usuario_seja_o_mesmo(self):
lance_dudu_250 = Lance(self.dudu, 250.0)
with self.assertRaises(ValueError):
self.leilao.propoe(self.lance_dudu)
self.leilao.propoe(lance_dudu_250)
| 38.470588
| 108
| 0.722222
|
acfccd452e0f0a707c132f04e46795d0f4b49370
| 10,875
|
py
|
Python
|
src/problem4.py
|
bobbyliu6/24-Exam3-201920
|
633a5f58ea7b8287c7299b7ce0ef41f07317d306
|
[
"MIT"
] | null | null | null |
src/problem4.py
|
bobbyliu6/24-Exam3-201920
|
633a5f58ea7b8287c7299b7ce0ef41f07317d306
|
[
"MIT"
] | null | null | null |
src/problem4.py
|
bobbyliu6/24-Exam3-201920
|
633a5f58ea7b8287c7299b7ce0ef41f07317d306
|
[
"MIT"
] | null | null | null |
"""
Exam 3, problem 4.
Authors: Vibha Alangar, Aaron Wilkin, David Mutchler, Dave Fisher,
Matt Boutell, Amanda Stouder, their colleagues and
Weizhou Liu. January 2019.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import time
import testing_helper
def main():
""" Calls the TEST functions in this module. """
run_test_problem4()
###############################################################################
# DONE: 2. READ the doc-string for the is_prime function defined below.
# It is the same as you have seen before.
# After you UNDERSTAND the doc-string (JUST the doc-string, NOT the code),
# ASKING QUESTIONS AS NEEDED, change the above _TODO_ to DONE.
###############################################################################
def is_prime(n):
"""
What comes in: An integer n.
What goes out:
-- Returns True if the given integer is prime,
False if the given integer is NOT prime.
Treats integers less than 2 as NOT prime.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
-- is_prime(1) returns False
Note: The algorithm used here is simple and clear but slow.
"""
if n < 2:
return False
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
def run_test_problem4():
""" Tests the problem5 function. """
####################################################################
# THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.
####################################################################
print()
print('--------------------------------------------------')
print('Testing the problem5 function:')
print('--------------------------------------------------')
format_string = ' problem5( {}, {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
seq_seq = [(3, 25),
(33, 50, 20, 55, 10),
(6, 13, 70, 33, 37),
(7, 11, 109, 61),
(),
(5, 5, 3, 150)
]
n = 5
expected = 13
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 2:
seq_seq = [(3, 25),
(33, 50, 20, 55, 10),
(6, 13, 70, 33, 37),
(7, 11, 109, 61),
(),
(5, 5, 3, 150)
]
n = 50
expected = 109
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 3:
seq_seq = [(3, 25),
(33, 50, 20, 55, 10),
(6, 13, 70, 33, 37),
(7, 11, 109, 61),
(),
(5, 5, 3, 150)
]
n = 120
expected = -1
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
seq_seq = [(3, 25),
(33, 50, 20, 55, 10),
(6, 13, 70, 33, 37),
(7, 11, 109, 61),
(),
(5, 5, 3, 150)
]
n = 17
expected = 37
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 5:
seq_seq = [(3, 25),
(33, 50, 20, 55, 10),
(6, 13, 70, 33, 37),
(7, 11, 109, 61),
(),
(5, 5, 3, 150)
]
n = 2
expected = 3
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 6:
seq_seq = [(3, 25),
(33, 50, 20, 55, 10),
(6, 13, 70, 33, 37),
(7, 11, 109, 61),
(),
(5, 5, 3, 150)
]
n = 3
expected = 13
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 7:
seq_seq = [(2, 5, 30),
(8, 40),
(13,),
(400, 23, 17),
]
n = 1
expected = 2
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 8:
seq_seq = [(2, 5, 30),
(8, 40),
(13,),
(400, 23, 17),
]
n = 2
expected = 5
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 9:
seq_seq = [(2, 5, 30),
(8, 40),
(13,),
(400, 23, 17),
]
n = 7
expected = 13
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 10:
seq_seq = [(2, 5, 30),
(8, 40),
(13,),
(400, 23, 17),
]
n = 14
expected = 23
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 11:
seq_seq = [(2, 5, 30),
(8, 40),
(13,),
(400, 23, 17),
]
n = 100
expected = -1
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 12:
seq_seq = []
n = 3
expected = -1
print_expected_result_of_test([seq_seq, n], expected, test_results,
format_string)
actual = problem4(seq_seq, n) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# SUMMARY of the test results:
print_summary_of_test_results(test_results)
def problem4(seq_of_seq, n):
for k in range(len(seq_of_seq)):
leng=len(seq_of_seq[k])
for j in range(leng):
if is_prime(seq_of_seq[k][j])==True and seq_of_seq[k][j]>n:
return seq_of_seq[k][j]
return -1
"""
What comes in:
-- A sequence of sequences: seq_of_seq
where the sub-sequences contain only positive integers
-- A non-negative integer: n
What goes out: Returns the first number in the sub-sequences
that is both prime and greater than n.
Returns -1 if there are no prime numbers greater than n.
Side effects: None.
Examples:
Let seq_of_seq = [(3, 25),
(33, 50, 20, 55, 10),
(6, 13, 70, 33, 37),
(7, 11, 109, 61),
(),
(5, 5, 3, 150)
]
Then if n = 5, returns 13.
If n = 50, returns 109.
If n = 120, returns -1.
If n = 17, returns 37.
If n = 2, returns 3.
If n = 3, returns 13.
Type hints:
:type seq_of_seq: [[int]]
:type n: int
:rtype: int
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Tests have been written for you (above).
# -------------------------------------------------------------------------
###############################################################################
# Our tests use the following to print error messages in red.
# Do NOT change it. You do NOT have to do anything with it.
###############################################################################
def print_expected_result_of_test(arguments, expected,
test_results, format_string):
testing_helper.print_expected_result_of_test(arguments, expected,
test_results,
format_string)
def print_actual_result_of_test(expected, actual, test_results):
testing_helper.print_actual_result_of_test(expected, actual,
test_results)
def print_summary_of_test_results(test_results):
testing_helper.print_summary_of_test_results(test_results)
# To allow color-coding the output to the console:
USE_COLORING = True # Change to False to revert to OLD style coloring
testing_helper.USE_COLORING = USE_COLORING
if USE_COLORING:
# noinspection PyShadowingBuiltins
print = testing_helper.print_colored
else:
# noinspection PyShadowingBuiltins
print = testing_helper.print_uncolored
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# The try .. except prevents error messages on the console from being
# intermingled with ordinary output to the console.
# -----------------------------------------------------------------------------
try:
main()
except Exception:
print('ERROR - While running this test,', color='red')
print('your code raised the following exception:', color='red')
print()
time.sleep(1)
raise
| 32.854985
| 79
| 0.490943
|
acfccdb13ab96a1d2a78a237467c4510eaf22d28
| 907
|
py
|
Python
|
rusentrel/rusentrel_ds_cv/mi_att/att_hidden_z_yang.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | null | null | null |
rusentrel/rusentrel_ds_cv/mi_att/att_hidden_z_yang.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | 1
|
2020-12-16T18:21:11.000Z
|
2020-12-30T10:08:27.000Z
|
rusentrel/rusentrel_ds_cv/mi_att/att_hidden_z_yang.py
|
nicolay-r/attitude-extraction-with-attention-and-ds
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
[
"MIT"
] | 1
|
2021-03-29T20:58:26.000Z
|
2021-03-29T20:58:26.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../../')
from arekit.contrib.networks.multi.configurations.att_self import AttSelfOverSentencesConfig
from arekit.contrib.networks.multi.architectures.att_self import AttSelfOverSentences
from rusentrel.mi_names import AttSelfOverInstancesModelNames
from rusentrel.classic.mi.att_hidden_z_yang import run_testing_att_hidden_zyang_bilstm
from rusentrel.classic_cv.common import CV_COUNT, \
classic_cv_common_callback_modification_func, \
CV_NAME_PREFIX
if __name__ == "__main__":
run_testing_att_hidden_zyang_bilstm(
name_prefix=CV_NAME_PREFIX,
cv_count=CV_COUNT,
model_names_classtype=AttSelfOverInstancesModelNames,
network_classtype=AttSelfOverSentences,
config_classtype=AttSelfOverSentencesConfig,
custom_callback_func=classic_cv_common_callback_modification_func)
| 36.28
| 92
| 0.805954
|
acfccdb6d24a7e1ba490705dd147f21dbf921d31
| 2,656
|
py
|
Python
|
deploy/pdserving/det_local_server.py
|
gvvynplaine/PaddleOCR
|
ec903eb5a0aeed54067739e2e6c3dfa0cdc112c9
|
[
"Apache-2.0"
] | 2
|
2020-09-18T04:28:50.000Z
|
2020-09-18T04:30:09.000Z
|
deploy/pdserving/det_local_server.py
|
pengfudan/paddleocr
|
7404050bdf4d4555065d2db1ac30cdb6d892d6f8
|
[
"Apache-2.0"
] | 3
|
2020-03-05T03:45:46.000Z
|
2020-03-05T04:26:26.000Z
|
deploy/pdserving/det_local_server.py
|
pengfudan/paddleocr
|
7404050bdf4d4555065d2db1ac30cdb6d892d6f8
|
[
"Apache-2.0"
] | 1
|
2020-07-22T10:31:31.000Z
|
2020-07-22T10:31:31.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client import Client
import cv2
import sys
import numpy as np
import os
from paddle_serving_client import Client
from paddle_serving_app.reader import Sequential, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import DBPostProcess, FilterBoxes
from paddle_serving_server_gpu.web_service import WebService
import time
import re
import base64
class OCRService(WebService):
def init_det(self):
self.det_preprocess = Sequential([
ResizeByFactor(32, 960), Div(255),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
(2, 0, 1))
])
self.filter_func = FilterBoxes(10, 10)
self.post_func = DBPostProcess({
"thresh": 0.3,
"box_thresh": 0.5,
"max_candidates": 1000,
"unclip_ratio": 1.5,
"min_size": 3
})
def preprocess(self, feed=[], fetch=[]):
data = base64.b64decode(feed[0]["image"].encode('utf8'))
data = np.fromstring(data, np.uint8)
im = cv2.imdecode(data, cv2.IMREAD_COLOR)
self.ori_h, self.ori_w, _ = im.shape
det_img = self.det_preprocess(im)
_, self.new_h, self.new_w = det_img.shape
return {"image": det_img[np.newaxis, :].copy()}, ["concat_1.tmp_0"]
def postprocess(self, feed={}, fetch=[], fetch_map=None):
det_out = fetch_map["concat_1.tmp_0"]
ratio_list = [
float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
]
dt_boxes_list = self.post_func(det_out, [ratio_list])
dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w])
return {"dt_boxes": dt_boxes.tolist()}
ocr_service = OCRService(name="ocr")
ocr_service.load_model_config("ocr_det_model")
ocr_service.set_gpus("0")
ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0)
ocr_service.init_det()
ocr_service.run_debugger_service()
ocr_service.run_web_service()
| 36.888889
| 79
| 0.68637
|
acfccdc39be08944b1d7ea686b0db3515119678e
| 2,667
|
py
|
Python
|
src/vdbdump.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 4
|
2021-10-17T11:17:59.000Z
|
2022-02-28T16:58:40.000Z
|
src/vdbdump.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 17
|
2021-10-05T21:44:06.000Z
|
2022-03-31T16:58:40.000Z
|
src/vdbdump.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 8
|
2021-09-02T18:55:49.000Z
|
2022-03-09T21:05:28.000Z
|
#!/usr/bin/env python
import db
import sys
import time
import string
import pprint
# time2timestamp(t) -- convert time to "YYYY-MM-DD HH:MM:SS"
def time2timestamp(t):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
# timestamp2time(ts) -- convert "YYYY-MM-DD HH:MM:SS" to time
def timestamp2time(s):
return time.mktime(time.strptime(s, "%Y-%m-%d %H:%M:%S"))
def formatedv(vol):
blocksize = vol['blocksize']
capacity_bytes = vol['capacity_bytes']
declared = time2timestamp(vol['declared'])
eod_cookie = vol['eod_cookie']
external_label = vol['external_label']
first_access = time2timestamp(vol['first_access'])
last_access = time2timestamp(vol['last_access'])
library = vol['library']
media_type = vol['media_type']
non_del_files = vol['non_del_files']
remaining_bytes = vol['remaining_bytes']
if vol.has_key('sum_mounts'):
sum_mounts = vol['sum_mounts']
else:
sum_mounts = 0
sum_rd_access = vol['sum_rd_access']
sum_rd_err = vol['sum_rd_err']
sum_wr_access = vol['sum_wr_access']
sum_wr_err = vol['sum_wr_err']
system_inhibit_0 = vol['system_inhibit'][0]
system_inhibit_1 = vol['system_inhibit'][1]
if vol.has_key('si_time'):
si_time_0 = time2timestamp(vol['si_time'][0])
si_time_1 = time2timestamp(vol['si_time'][1])
else:
si_time_0 = declared
si_time_1 = declared
user_inhibit_0 = vol['user_inhibit'][0]
user_inhibit_1 = vol['user_inhibit'][1]
t = string.split(vol['volume_family'], '.')
storage_group = t[0]
file_family = t[1]
if len(t) > 2:
wrapper = t[2]
else:
wrapper = 'none'
if vol.has_key('comment'):
comment = vol['comment']
else:
comment = ''
res = "%d\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s"%(
blocksize, capacity_bytes, declared, eod_cookie,
external_label, first_access, last_access, library,
media_type, non_del_files, remaining_bytes, sum_mounts,
sum_rd_access, sum_rd_err, sum_wr_access, sum_wr_err,
system_inhibit_0, system_inhibit_1,
si_time_0, si_time_1,
user_inhibit_0, user_inhibit_1,
storage_group, file_family, wrapper, comment)
return res
if __name__ == '__main__':
vol = db.DbTable('volume', '.', '/tmp', [], 0)
c = vol.newCursor()
k, v = c.first()
count = 0
if len(sys.argv) > 1:
outf = open(sys.argv[1], 'w')
else:
outf = open('vol.dmp', 'w')
last_time = time.time()
while k:
l = formatedv(v)
outf.write(l+'\n')
k, v = c.next()
count = count + 1
if count % 1000 == 0:
time_now = time.time()
print "%12d %14.2f records/sec"%(count,
1000.0/(time_now - last_time))
last_time = time_now
# if count > 10:
# break
outf.close()
c.close()
vol.close()
| 27.494845
| 113
| 0.68054
|
acfccde0024f39961407b72068541d9b4027c54c
| 3,119
|
py
|
Python
|
sdks/python/apache_beam/examples/snippets/transforms/aggregation/combineglobally_test.py
|
dannymartinm/beam
|
7bf822a541939ff6874d60d4ef18957bc05128b8
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-02-25T21:58:19.000Z
|
2022-02-25T21:58:19.000Z
|
sdks/python/apache_beam/examples/snippets/transforms/aggregation/combineglobally_test.py
|
dannymartinm/beam
|
7bf822a541939ff6874d60d4ef18957bc05128b8
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-01-06T16:01:15.000Z
|
2022-01-06T16:01:15.000Z
|
sdks/python/apache_beam/examples/snippets/transforms/aggregation/combineglobally_test.py
|
dannymartinm/beam
|
7bf822a541939ff6874d60d4ef18957bc05128b8
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 17
|
2021-12-15T19:31:54.000Z
|
2022-01-31T18:54:23.000Z
|
# coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import unittest
import mock
from apache_beam.examples.snippets.util import assert_matches_stdout
from apache_beam.testing.test_pipeline import TestPipeline
from . import combineglobally
def check_common_items(actual):
expected = '''[START common_items]
{'🍅', '🥕'}
[END common_items]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_common_items_with_exceptions(actual):
expected = '''[START common_items_with_exceptions]
{'🍅'}
[END common_items_with_exceptions]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_custom_common_items(actual):
expected = '''[START custom_common_items]
{'🍅', '🍇', '🌽'}
[END custom_common_items]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_percentages(actual):
expected = '''[START percentages]
{'🥕': 0.3, '🍅': 0.6, '🍆': 0.1}
[END percentages]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
@mock.patch('apache_beam.Pipeline', TestPipeline)
# pylint: disable=line-too-long
@mock.patch(
'apache_beam.examples.snippets.transforms.aggregation.combineglobally.print',
str)
# pylint: enable=line-too-long
class CombineGloballyTest(unittest.TestCase):
def test_combineglobally_function(self):
combineglobally.combineglobally_function(check_common_items)
def test_combineglobally_lambda(self):
combineglobally.combineglobally_lambda(check_common_items)
def test_combineglobally_multiple_arguments(self):
combineglobally.combineglobally_multiple_arguments(
check_common_items_with_exceptions)
def test_combineglobally_side_inputs_singleton(self):
combineglobally.combineglobally_side_inputs_singleton(
check_common_items_with_exceptions)
# TODO: enable side inputs tests after [BEAM-8400] is fixed.
# https://github.com/apache/beam/issues/19851
# def test_combineglobally_side_inputs_iter(self):
# combineglobally.combineglobally_side_inputs_iter(
# check_common_items_with_exceptions)
# def test_combineglobally_side_inputs_dict(self):
# combineglobally.combineglobally_side_inputs_dict(
# check_custom_common_items)
def test_combineglobally_combinefn(self):
combineglobally.combineglobally_combinefn(check_percentages)
if __name__ == '__main__':
unittest.main()
| 32.489583
| 81
| 0.77621
|
acfccdff6c07b3fcd2e75a6baaa4653d42189912
| 155
|
py
|
Python
|
src/macro/untitled1.py
|
shreyash-sharma/automater
|
aebec0492ce03f7d7d2f8b1a902bfb059803c9be
|
[
"Apache-2.0"
] | 1
|
2021-05-14T06:12:07.000Z
|
2021-05-14T06:12:07.000Z
|
src/macro/untitled1.py
|
shreyash-sharma/automater
|
aebec0492ce03f7d7d2f8b1a902bfb059803c9be
|
[
"Apache-2.0"
] | null | null | null |
src/macro/untitled1.py
|
shreyash-sharma/automater
|
aebec0492ce03f7d7d2f8b1a902bfb059803c9be
|
[
"Apache-2.0"
] | null | null | null |
import pyautogui
start = pyautogui.locateCenterOnScreen('prett.png')#If the file is not a png file it will not work
print(start)
pyautogui.moveTo(start)
| 38.75
| 99
| 0.787097
|
acfccf49946afcddb0fc5a1305f405bd5685595d
| 1,480
|
py
|
Python
|
pretty_display.py
|
hsean/python_web_scrapper
|
39f8bda3b589a9126b12177e70896728c2407384
|
[
"MIT"
] | null | null | null |
pretty_display.py
|
hsean/python_web_scrapper
|
39f8bda3b589a9126b12177e70896728c2407384
|
[
"MIT"
] | null | null | null |
pretty_display.py
|
hsean/python_web_scrapper
|
39f8bda3b589a9126b12177e70896728c2407384
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
###############################################################################
# Author: Sean Hendrickson
# File: pretty_display.py
# Desc: This file displays a list of titles and prices in two columns and
# displays the total sum at the end of the list
###############################################################################
# store text file
lines = []
titles = []
prices = []
totalCost = 0
max_title_length = 0
max_price_length = 0
# read data from a text file
with open('output.txt', 'r') as f:
lines = f.readlines()
count = 0
for line in lines:
if line.startswith('$'):
simplifiedPrice = line.replace('$','')
simplifiedPrice = simplifiedPrice.replace('\n','')
prices.append(simplifiedPrice)
totalCost += float(simplifiedPrice) # sum prices for later use
elif line == "\n":
pass
else:
titles.append(line)
count = count + 1
if len(line) > max_title_length: # find largest string
max_title_length = len(line)
# find maximum price length
max_price_length = len(str(round(totalCost, 2)))
# display each item
for x in range(count):
print("{:{x}} ${:>{y}}".format(titles[x].strip(), prices[x].strip(), x=max_title_length, y=max_price_length))
print("{:>{x}} ${:{y}.2f}".format("Total Cost:", round(totalCost, 2), x=max_title_length, y=max_price_length))
| 32.888889
| 117
| 0.538514
|
acfcd17f187c19ecc2e78baa7599cad920280fe9
| 989
|
py
|
Python
|
userbot/utils/__init__.py
|
masbentoooredoo/WeebProject
|
d34c14b42801915518a87831f952e5ade95183e2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/utils/__init__.py
|
masbentoooredoo/WeebProject
|
d34c14b42801915518a87831f952e5ade95183e2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/utils/__init__.py
|
masbentoooredoo/WeebProject
|
d34c14b42801915518a87831f952e5ade95183e2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 10
|
2020-12-13T14:32:47.000Z
|
2021-04-11T06:45:35.000Z
|
# Copyright (C) 2020 Adek Maulana
#
# SPDX-License-Identifier: GPL-3.0-or-later
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .chrome import chrome, options
from .google_images_download import googleimagesdownload
from .progress import progress
from .tools import (
human_to_bytes,
humanbytes,
md5,
post_to_telegraph,
run_cmd,
time_formatter,
)
| 35.321429
| 72
| 0.73913
|
acfcd1a2def76a0befdca6f8dfca422a50db9c6b
| 3,247
|
py
|
Python
|
bin/batch_process.py
|
atomicguy/vvr_tools
|
76989d3d432bbb2a93249978f1b894e6e98cd8f4
|
[
"MIT"
] | 1
|
2018-10-18T09:33:05.000Z
|
2018-10-18T09:33:05.000Z
|
bin/batch_process.py
|
atomicguy/vvr_tools
|
76989d3d432bbb2a93249978f1b894e6e98cd8f4
|
[
"MIT"
] | null | null | null |
bin/batch_process.py
|
atomicguy/vvr_tools
|
76989d3d432bbb2a93249978f1b894e6e98cd8f4
|
[
"MIT"
] | null | null | null |
from __future__ import division, absolute_import
import os
import glob
import json
import progressbar
import subprocess
from argparse import ArgumentParser
from src.card import StereoCard
from src.pairs import StereoPairGC
def find_filepaths(path, extension):
return glob.glob('%s/*.%s' % (path, extension))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--img_dir', type=str, help='directory of images', required=True)
parser.add_argument('--out', type=str, help='output directory', required=True)
parser.add_argument('--eval', type=str, help='evaluate results', default=False)
parser.add_argument('--truth', type=str, help='path to truth data', default='data/validation_cardinfo')
args = parser.parse_args()
card_info_path = os.path.join(args.out, 'info.json')
mip_info_path = os.path.join(args.out, 'pairs.json')
results_path = os.path.join(args.out, 'results')
if not os.path.exists(args.out):
os.makedirs(args.out)
if not os.path.exists(results_path):
os.makedirs(results_path)
card_info_list = []
mip_info_list = []
images = find_filepaths(args.img_dir, 'jpg')
info = {'split_channel': 'cbcr',
'binary': 'otsu',
'filter_size': 5,
'input_imgs': args.img_dir,
'method_details':
{'gc_type': 'rect',
'iter_count': 5,
'k_scale': 0.01,
'scale': 0.5,
'probable_background': [0.0, 0.0, 0.0, 0.0],
'probable_foreground': [0.0, 0.0, 0.0, 0.0],
'rect_scale': [0.05, 0.01, 0.05, 0.01],
'sure_foreground': [0.0, 0.0, 0.0, 0.0]
},
'mip_method': 'grabcut',
'out_dir': results_path}
for img in progressbar.progressbar(sorted(images)):
name = os.path.splitext(os.path.basename(img))[0]
info['path'] = img
card = StereoCard(info)
img_data = card.img
bbox = card.bbox
card_info = {'name': name, 'bbox': bbox}
card_info_list.append(card_info)
method_details = info['method_details']
method_details['card_bb'] = bbox
method_details['path'] = info['path']
mip = StereoPairGC(method_details)
mip_bb_card = mip.mip_bbox()
mip_bb = {'x0': mip_bb_card[0],
'y0': mip_bb_card[1],
'x1': mip_bb_card[2],
'y1': mip_bb_card[3]}
mip_info = {'name': name, 'bbox': mip_bb}
mip_info_list.append(mip_info)
img_mip = mip.img.crop((mip_bb['x0'], mip_bb['y0'], mip_bb['x1'], mip_bb['y1']))
img_mip.save(os.path.join(results_path, '{}.jpg'.format(name)))
with open(card_info_path, 'w') as f:
json.dump(card_info_list, f, indent=2)
with open(mip_info_path, 'w') as f:
json.dump(mip_info_list, f, indent=2)
if json.loads(args.eval.lower()):
eval_cmd = ['python3', 'bin/evaluate.py',
'--card_info', card_info_path,
'--pair_info', mip_info_path,
'--truth', args.truth,
'--out', args.out]
subprocess.call(eval_cmd)
| 32.47
| 107
| 0.578072
|
acfcd3b371da66b9d6bbba28ff77045e3a2c76b3
| 12,880
|
py
|
Python
|
train.py
|
quanzhanjiajia/yolov4-flask
|
644aa91ad776c9e306b49aec0aaa33fc34d5dd05
|
[
"MIT"
] | null | null | null |
train.py
|
quanzhanjiajia/yolov4-flask
|
644aa91ad776c9e306b49aec0aaa33fc34d5dd05
|
[
"MIT"
] | null | null | null |
train.py
|
quanzhanjiajia/yolov4-flask
|
644aa91ad776c9e306b49aec0aaa33fc34d5dd05
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, ReduceLROnPlateau, EarlyStopping
from nets.yolo4 import yolo_body
from nets.loss import yolo_loss
from utils.utils import get_random_data, get_random_data_with_Mosaic, rand, WarmUpCosineDecayScheduler, ModelCheckpoint
import os
#---------------------------------------------------#
# 获得类和先验框
#---------------------------------------------------#
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
#---------------------------------------------------#
# 训练数据生成器
#---------------------------------------------------#
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, mosaic=False):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
flag = True
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
if mosaic:
if flag and (i+4) < n:
image, box = get_random_data_with_Mosaic(annotation_lines[i:i+4], input_shape)
i = (i+1) % n
else:
image, box = get_random_data(annotation_lines[i], input_shape)
i = (i+1) % n
flag = bool(1-flag)
else:
image, box = get_random_data(annotation_lines[i], input_shape)
i = (i+1) % n
image_data.append(image)
box_data.append(box)
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
#---------------------------------------------------#
# 读入xml文件,并输出y_true
#---------------------------------------------------#
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
# 一共有三个特征层数
num_layers = len(anchors)//3
# 先验框
# 678为 142,110, 192,243, 459,401
# 345为 36,75, 76,55, 72,146
# 012为 12,16, 19,36, 40,28
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32') # 416,416
# 读出xy轴,读出长宽
# 中心点(m,n,2)
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
# 计算比例
true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]
# m张图
m = true_boxes.shape[0]
# 得到网格的shape为13,13;26,26;52,52
grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
# y_true的格式为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)
y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
dtype='float32') for l in range(num_layers)]
# [1,9,2]
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
# 长宽要大于0才有效
valid_mask = boxes_wh[..., 0]>0
for b in range(m):
# 对每一张图进行处理
wh = boxes_wh[b, valid_mask[b]]
if len(wh)==0: continue
# [n,1,2]
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
# 计算真实框和哪个先验框最契合
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# 维度是(n) 感谢 消尽不死鸟 的提醒
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
# floor用于向下取整
i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')
# 找到真实框在特征层l中第b副图像对应的位置
k = anchor_mask[l].index(n)
c = true_boxes[b,t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5+c] = 1
return y_true
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
#----------------------------------------------------#
# 检测精度mAP和pr曲线计算参考视频
# https://www.bilibili.com/video/BV1zE411u7Vw
#----------------------------------------------------#
if __name__ == "__main__":
# 标签的位置
annotation_path = '2007_train.txt'
# 获取classes和anchor的位置
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
#------------------------------------------------------#
# 权值文件请看README,百度网盘下载
# 训练自己的数据集时提示维度不匹配正常
# 预测的东西都不一样了自然维度不匹配
#------------------------------------------------------#
weights_path = 'model_data/yolo4_weight.h5'
# 获得classes和anchor
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
# 一共有多少类
num_classes = len(class_names)
num_anchors = len(anchors)
# 输入的shape大小
# 显存比较小可以使用416x416
# 现存比较大可以使用608x608
input_shape = (416,416)
mosaic = True
Cosine_scheduler = False
label_smoothing = 0
# 输入的图像为
image_input = Input(shape=(None, None, 3))
h, w = input_shape
# 创建yolo模型
print('Create YOLOv4 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
model_body = yolo_body(image_input, num_anchors//3, num_classes)
# 载入预训练权重
print('Load weights {}.'.format(weights_path))
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
# y_true为13,13,3,85
# 26,26,3,85
# 52,52,3,85
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
# 输入为*model_body.input, *y_true
# 输出为model_loss
loss_input = [*model_body.output, *y_true]
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5, 'label_smoothing': label_smoothing})(loss_input)
model = Model([model_body.input, *y_true], model_loss)
model.summary()
# 训练后的模型保存的位置
log_dir = os.path.join("logs")
if not os.path.exists(log_dir):
os.mkdir(log_dir)
# 训练参数设置
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir+"/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5", save_weights_only=True, save_best_only=False, period=1)
early_stopping = EarlyStopping(min_delta=0, patience=10, verbose=1)
# 0.1用于验证,0.9用于训练
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
#------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#------------------------------------------------------#
freeze_layers = 302
for i in range(freeze_layers): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(freeze_layers, len(model_body.layers)))
# 调整非主干模型first
if True:
Init_epoch = 0
Freeze_epoch = 50
# batch_size大小,每次喂入多少数据
batch_size = 2
# 最大学习率
learning_rate_base = 1e-3
if Cosine_scheduler:
# 预热期
warmup_epoch = int((Freeze_epoch-Init_epoch)*0.2)
# 总共的步长
total_steps = int((Freeze_epoch-Init_epoch) * num_train / batch_size)
# 预热步长
warmup_steps = int(warmup_epoch * num_train / batch_size)
# 学习率
reduce_lr = WarmUpCosineDecayScheduler(learning_rate_base=learning_rate_base,
total_steps=total_steps,
warmup_learning_rate=1e-4,
warmup_steps=warmup_steps,
hold_base_rate_steps=num_train,
min_learn_rate=1e-6
)
model.compile(optimizer=Adam(), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
else:
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1)
model.compile(optimizer=Adam(learning_rate_base), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit(data_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, mosaic=mosaic),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, mosaic=False),
validation_steps=max(1, num_val//batch_size),
epochs=Freeze_epoch,
initial_epoch=Init_epoch,
max_queue_size=1,
callbacks=[logging, checkpoint, reduce_lr, early_stopping],
)
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
for i in range(freeze_layers): model_body.layers[i].trainable = True
# 解冻后训练
if True:
Freeze_epoch = 50
Epoch = 100
# batch_size大小,每次喂入多少数据
batch_size = 2
# 最大学习率
learning_rate_base = 1e-4
if Cosine_scheduler:
# 预热期
warmup_epoch = int((Epoch-Freeze_epoch)*0.2)
# 总共的步长
total_steps = int((Epoch-Freeze_epoch) * num_train / batch_size)
# 预热步长
warmup_steps = int(warmup_epoch * num_train / batch_size)
# 学习率
reduce_lr = WarmUpCosineDecayScheduler(learning_rate_base=learning_rate_base,
total_steps=total_steps,
warmup_learning_rate=1e-5,
warmup_steps=warmup_steps,
hold_base_rate_steps=num_train//2,
min_learn_rate=1e-6
)
model.compile(optimizer=Adam(), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
else:
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1)
model.compile(optimizer=Adam(learning_rate_base), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, mosaic=mosaic),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, mosaic=False),
validation_steps=max(1, num_val//batch_size),
epochs=Epoch,
initial_epoch=Freeze_epoch,
max_queue_size=1,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'last1.h5')
| 41.682848
| 155
| 0.563975
|
acfcd3d93d2af7d238335cfb9c35406a8e49d4ee
| 1,263
|
py
|
Python
|
src/robot/running/usererrorhandler.py
|
yahman72/robotframework
|
9f82d9a2bf088073859eb23a33d275c6a8c0b975
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2015-03-11T14:59:20.000Z
|
2015-03-11T14:59:20.000Z
|
src/robot/running/usererrorhandler.py
|
yahman72/robotframework
|
9f82d9a2bf088073859eb23a33d275c6a8c0b975
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/running/usererrorhandler.py
|
yahman72/robotframework
|
9f82d9a2bf088073859eb23a33d275c6a8c0b975
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
class UserErrorHandler:
"""Created if creating handlers fail -- running raises DataError.
The idea is not to raise DataError at processing time and prevent all
tests in affected test case file from executing. Instead UserErrorHandler
is created and if it is ever run DataError is raised then.
"""
type = 'error'
def __init__(self, name, error):
self.name = self.longname = name
self.doc = self.shortdoc = ''
self.error = error
self.timeout = ''
def init_keyword(self, varz):
pass
def run(self, *args):
raise DataError(self.error)
| 33.236842
| 77
| 0.705463
|
acfcd407595d516404479fdfc5895e79ddfdccc9
| 175
|
py
|
Python
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-add_title-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-add_title-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-add_title-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
import pyvista
pl = pyvista.Plotter()
pl.background_color = 'grey'
actor = pl.add_title('Plot Title', font='courier', color='k',
font_size=40)
pl.show()
| 25
| 62
| 0.628571
|
acfcd49bfb2e12972435d8f893e645d973081769
| 5,641
|
py
|
Python
|
buildpack/start.py
|
rrkh/cf-mendix-buildpack
|
ce60432790e501e4237cbeca62b1a33c4849a77c
|
[
"Apache-2.0"
] | 1
|
2021-06-17T17:48:21.000Z
|
2021-06-17T17:48:21.000Z
|
buildpack/start.py
|
todd-a-jacobs/cf-mendix-buildpack
|
7c3fe7cff21567efc9764960d24752730c675eb2
|
[
"Apache-2.0"
] | null | null | null |
buildpack/start.py
|
todd-a-jacobs/cf-mendix-buildpack
|
7c3fe7cff21567efc9764960d24752730c675eb2
|
[
"Apache-2.0"
] | 1
|
2021-07-21T11:36:46.000Z
|
2021-07-21T11:36:46.000Z
|
#!/usr/bin/env python3
import atexit
import logging
import os
import signal
import sys
import traceback
from http.server import BaseHTTPRequestHandler, HTTPServer
from buildpack import (
appdynamics,
databroker,
datadog,
dynatrace,
java,
metering,
mx_java_agent,
newrelic,
nginx,
runtime,
telegraf,
util,
)
class Maintenance(BaseHTTPRequestHandler):
MESSAGE = "App is in maintenance mode. To turn off unset DEBUG_CONTAINER variable"
def _handle_all(self):
logging.warning(self.MESSAGE)
self.send_response(503)
self.send_header("X-Mendix-Cloud-Mode", "maintenance")
self.end_headers()
self.wfile.write(self.MESSAGE.encode("utf-8"))
def do_GET(self):
self._handle_all()
def do_POST(self):
self._handle_all()
def do_PUT(self):
self._handle_all()
def do_HEAD(self):
self._handle_all()
# Exit handler to kill process group
@atexit.register
def _kill_process_group():
logging.debug("Terminating process group...")
def _kill_process_group_with_signal(signum):
try:
process_group = os.getpgrp()
os.killpg(process_group, signum)
logging.debug(
"Successfully sent [{}] to process group [{}]".format(
signum.name, process_group
),
)
except OSError as error:
logging.debug(
"Failed to send [{}] to process group [{}]: {}".format(
signum.name, process_group, error
)
)
_kill_process_group_with_signal(signal.SIGTERM)
# Handler for child process signals
# Required to kill zombie processes
def _sigchild_handler(_signo, _stack_frame):
os.waitpid(-1, os.WNOHANG)
# Handler for system termination signal (SIGTERM)
# This is required for Cloud Foundry: https://docs.cloudfoundry.org/devguide/deploy-apps/app-lifecycle.html#shutdown
def _sigterm_handler(_signo, _stack_frame):
# Call sys.exit() so that all atexit handlers are explicitly called
sys.exit()
def _register_signal_handlers():
signal.signal(signal.SIGCHLD, _sigchild_handler)
signal.signal(signal.SIGTERM, _sigterm_handler)
if os.environ.get("DEBUG_CONTAINER", "false").lower() == "true":
logging.warning(Maintenance.MESSAGE)
port = int(os.environ.get("PORT", 8080))
httpd = HTTPServer(("", port), Maintenance)
httpd.serve_forever()
if __name__ == "__main__":
m2ee = None
nginx_process = None
databroker_processes = databroker.Databroker()
_register_signal_handlers()
logging.basicConfig(
level=util.get_buildpack_loglevel(),
stream=sys.stdout,
format="%(levelname)s: %(message)s",
)
logging.info(
"Mendix Cloud Foundry Buildpack %s [%s] starting...",
util.get_buildpack_version(),
util.get_current_buildpack_commit(),
)
try:
if os.getenv("CF_INSTANCE_INDEX") is None:
logging.warning(
"CF_INSTANCE_INDEX environment variable not found, assuming cluster leader responsibility..."
)
# Set environment variables that the runtime needs for initial setup
if databroker.is_enabled():
os.environ[
"MXRUNTIME_{}".format(databroker.RUNTIME_DATABROKER_FLAG)
] = "true"
# Initialize the runtime
m2ee = runtime.setup(util.get_vcap_data())
# Get versions
runtime_version = runtime.get_version(os.path.abspath("."))
java_version = runtime.get_java_version(runtime_version)["version"]
model_version = runtime.get_model_version(os.path.abspath("."))
# Update runtime configuration based on component configuration
java.update_config(
m2ee.config._conf["m2ee"], util.get_vcap_data(), java_version
)
newrelic.update_config(m2ee, util.get_vcap_data()["application_name"])
appdynamics.update_config(
m2ee, util.get_vcap_data()["application_name"]
)
dynatrace.update_config(m2ee, util.get_vcap_data()["application_name"])
mx_java_agent.update_config(m2ee)
telegraf.update_config(m2ee, util.get_vcap_data()["application_name"])
(
databroker_jmx_instance_cfg,
databroker_jmx_config_files,
) = databroker_processes.get_datadog_config(
datadog._get_user_checks_dir()
)
datadog.update_config(
m2ee,
model_version=model_version,
runtime_version=runtime_version,
extra_jmx_instance_config=databroker_jmx_instance_cfg,
jmx_config_files=databroker_jmx_config_files,
)
nginx.configure(m2ee)
# Start components and runtime
telegraf.run()
datadog.run(model_version, runtime_version)
metering.run()
nginx.run()
runtime.run(m2ee)
# Wait for the runtime to be ready before starting Databroker
if databroker.is_enabled():
runtime.await_database_ready(m2ee)
databroker_processes.run(runtime.database.get_config())
except RuntimeError as re:
# Only the runtime throws RuntimeErrors (no pun intended)
# Don't use the stack trace for these
logging.error("Starting application failed: %s", re)
sys.exit(1)
except Exception:
ex = traceback.format_exc()
logging.error("Starting application failed. %s", ex)
sys.exit(1)
# Wait loop for runtime termination
runtime.await_termination(m2ee)
| 30.327957
| 116
| 0.648644
|
acfcd8487ce3fe39518bdf64623d087fa367c6bd
| 1,896
|
py
|
Python
|
boot.py
|
allenc4/Vehicle-GPS-Tracking
|
9adf6e8987f52f0a3808f8ac0717390a27b4475c
|
[
"MIT"
] | null | null | null |
boot.py
|
allenc4/Vehicle-GPS-Tracking
|
9adf6e8987f52f0a3808f8ac0717390a27b4475c
|
[
"MIT"
] | null | null | null |
boot.py
|
allenc4/Vehicle-GPS-Tracking
|
9adf6e8987f52f0a3808f8ac0717390a27b4475c
|
[
"MIT"
] | null | null | null |
# boot file for the gpy. Runs once on startup before executing main.py.
# Functionality here initializes WiFi network with known network if found.
# Otherwise, sets wifi in Access Point mode
# author: callen
#
import os
import machine
from config import ConfigNetwork
uart = machine.UART(0, baudrate=115200)
os.dupterm(uart)
# Do not initialize any wireless settings
useWifi = True
if useWifi and machine.reset_cause() != machine.SOFT_RESET:
from network import WLAN
wl = WLAN()
wl.mode(WLAN.STA)
def_ssid = 'chris-gpy'
def_auth = (WLAN.WPA2, 'micropython')
print("Scanning for known wifi networks")
available_networks = wl.scan()
networks = frozenset([e.ssid for e in available_networks])
known_network_names = frozenset([key for key in ConfigNetwork.KNOWN_NETWORKS])
network_to_use = list(networks & known_network_names)
try:
network_to_use = network_to_use[0]
network_props = ConfigNetwork.KNOWN_NETWORKS[network_to_use]
pwd = network_props['pwd']
sec = [e.sec for e in available_networks if e.ssid == network_to_use][0]
if 'config' in network_props:
wl.ifconfig(config=network_props['config'])
wl.connect(network_to_use, (sec, pwd), timeout=10000)
while not wl.isconnected():
machine.idle() # save power while waiting for connection to succeed
print("Connected to " + network_to_use + " with IP address: " + wl.ifconfig()[0])
except Exception as e:
print("Failed to connect to any known network... Exception: {}".format(e))
print("Going into AP mode")
print("Setting with default ssid: {0} and default auth {1}".format(def_ssid, def_auth))
wl.init(mode=WLAN.AP, ssid=def_ssid, auth=def_auth, channel=6, antenna=WLAN.INT_ANT, hidden=False)
#TODO For now going to tests module. Remove this
machine.main('tests.py')
| 36.461538
| 106
| 0.696203
|
acfcd898ce6717a50b38b184a7e5dff69bfa7b38
| 1,484
|
py
|
Python
|
models/discussion.py
|
Officeyutong/HelloJudge2
|
e88c0a1198f8531ee0c1121dccfedb51f3f0aa0e
|
[
"MIT"
] | 21
|
2020-02-24T07:56:55.000Z
|
2022-01-22T11:32:28.000Z
|
models/discussion.py
|
Officeyutong/HelloJudge2
|
e88c0a1198f8531ee0c1121dccfedb51f3f0aa0e
|
[
"MIT"
] | 1
|
2021-02-07T13:08:47.000Z
|
2021-02-09T04:50:49.000Z
|
models/discussion.py
|
Officeyutong/HelloJudge2
|
e88c0a1198f8531ee0c1121dccfedb51f3f0aa0e
|
[
"MIT"
] | 3
|
2020-06-04T07:56:20.000Z
|
2021-06-21T04:04:15.000Z
|
from main import db
from sqlalchemy.dialects import mysql
from sqlalchemy import Column, Integer, Text, String, DateTime, ForeignKey
from sqlalchemy.sql.expression import text
class Discussion(db.Model):
__tablename__ = "discussion"
# 讨论ID
id = Column(Integer, primary_key=True)
# 板块
# 每一个板块使用一个字符串表示,比如discussion.problem.233 表示编号为233的题目的所有讨论
# discussion.problem 表示所有题目的讨论
# discussion.problem.global 表示题目全局讨论
# discussion.global 表示全局讨论
path = Column(String(128), index=True, nullable=False)
# 标题
title = Column(String(100), index=True, nullable=False)
# 内容
content = Column(Text, nullable=False)
# 用户ID
uid = Column(Integer, ForeignKey(
"user.id", ondelete="CASCADE"), nullable=False)
# 是否置顶
top = Column(mysql.TINYINT(display_width=1),
default=0, nullable=False, index=True)
# 发送时间
time = Column(DateTime, nullable=False)
# 是否为私有讨论
# 私有讨论即只有发布者和有discussion.manage的用户可见
private = Column(mysql.TINYINT(display_width=1),
nullable=True, default=None)
def as_dict(self):
ret = dict(filter(lambda x: not x[0].startswith(
"_"), self.__dict__.items()))
return ret
@staticmethod
def by_id(id):
return db.session.query(Discussion).filter(Discussion.id == id).one_or_none()
@staticmethod
def has(id):
return db.session.query(Discussion.id).filter(Discussion.id == id).count() != 0
| 30.916667
| 87
| 0.662399
|
acfcd8a8a34cc7a88e9466d33557620bed0eb0f4
| 403
|
py
|
Python
|
bmstu_project/bmstu_project/wsgi.py
|
TrungLuong1194/django-student-management
|
5fabcad86eb1067c48c3b5f25e6a6a9e37571a6a
|
[
"MIT"
] | 1
|
2020-07-17T14:10:50.000Z
|
2020-07-17T14:10:50.000Z
|
bmstu_project/bmstu_project/wsgi.py
|
TrungLuong1194/django-student-management
|
5fabcad86eb1067c48c3b5f25e6a6a9e37571a6a
|
[
"MIT"
] | null | null | null |
bmstu_project/bmstu_project/wsgi.py
|
TrungLuong1194/django-student-management
|
5fabcad86eb1067c48c3b5f25e6a6a9e37571a6a
|
[
"MIT"
] | null | null | null |
"""
WSGI config for bmstu_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bmstu_project.settings')
application = get_wsgi_application()
| 23.705882
| 78
| 0.791563
|
acfcd98e366106d07ae1ecb9f418c20417f35bc2
| 8,006
|
py
|
Python
|
RNN/rnnModel.py
|
pedroig/Parkinsons-Disease-Digital-Biomarker
|
4c8bf1b5c4337ebc1940a7dfd2dbcce0c974e1fd
|
[
"MIT"
] | 12
|
2018-01-09T01:18:41.000Z
|
2021-04-22T19:15:38.000Z
|
RNN/rnnModel.py
|
pedroig/Parkinsons-Disease-Digital-Biomarker
|
4c8bf1b5c4337ebc1940a7dfd2dbcce0c974e1fd
|
[
"MIT"
] | null | null | null |
RNN/rnnModel.py
|
pedroig/Parkinsons-Disease-Digital-Biomarker
|
4c8bf1b5c4337ebc1940a7dfd2dbcce0c974e1fd
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import rnn_utils as ru
import time
from datetime import datetime
tf.reset_default_graph()
# Hard-coded parameters
timeSeries = ['outbound', 'rest'] # , 'return']
n_steps = 4000
n_inputs = 6 # Rotation Rate (XYZ) and Acceleration (XYZ)
n_neurons = 20
n_outputs = 2
n_layers = 1
learning_rate = 0.001
wavelet = '' # Empty string for no wavelet
level = 4
dataFractionTrain = 1
dataFractionVal = 1
validateOnOldAgeGroup = True
useDemographics = False
# Training parameters
n_epochs = 30
batch_size = 1000
# Log directory for tensorboard
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
folderName = "run-{}_epochs-{}_learningRate-{}_batchSize-{}".format(now, n_epochs, learning_rate, batch_size)
if wavelet is not '':
folderName += "_{}{}".format(wavelet, level)
logdir = "{}/{}/".format(root_logdir, folderName)
# Placeholder Tensors
y = tf.placeholder(tf.int32, [None], name="y")
gender = tf.placeholder(tf.float32, [None, 1], name="gender")
age = tf.placeholder(tf.float32, [None, 1], name="age")
X = {}
seq_length = {}
for timeSeriesName in timeSeries:
with tf.name_scope(timeSeriesName + "_placeholders") as scope:
X[timeSeriesName] = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
seq_length[timeSeriesName] = tf.placeholder(tf.int32, [None])
# Model
outputs = {}
states = {}
top_layer_h_state = {}
lstm_cells = {}
multi_cell = {}
finalRNNlayers = []
for timeSeriesName in timeSeries:
with tf.variable_scope(timeSeriesName) as scope:
lstm_cells[timeSeriesName] = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
for layer in range(n_layers)]
multi_cell[timeSeriesName] = tf.contrib.rnn.MultiRNNCell(lstm_cells[timeSeriesName])
outputs[timeSeriesName], states[timeSeriesName] = tf.nn.dynamic_rnn(
multi_cell[timeSeriesName], X[timeSeriesName], dtype=tf.float32,
sequence_length=seq_length[timeSeriesName])
top_layer_h_state[timeSeriesName] = states[timeSeriesName][-1][1]
finalRNNlayers.append(top_layer_h_state[timeSeriesName])
concat3_top_layer_h_states = tf.concat(finalRNNlayers, axis=1, name="3Stages_concat")
if useDemographics:
finalLayerInput = tf.concat([concat3_top_layer_h_states, age, gender], axis=1, name="finalLayerInput")
else:
finalLayerInput = tf.concat([concat3_top_layer_h_states], axis=1, name="finalLayerInput")
logits = tf.layers.dense(finalLayerInput, n_outputs, name="logits")
with tf.name_scope("Cost_function") as scope:
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("Train") as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
positiveClass_probability = tf.sigmoid(logits[:, 1] - logits[:, 0])
auc, auc_update_op = tf.metrics.auc(labels=y, predictions=positiveClass_probability, num_thresholds=10000)
precision, precision_update_op = tf.metrics.precision_at_thresholds(labels=y, thresholds=[0.5],
predictions=positiveClass_probability)
recall, recall_update_op = tf.metrics.recall_at_thresholds(labels=y, thresholds=[0.5],
predictions=positiveClass_probability)
with tf.name_scope("init_and_save"):
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver()
with tf.name_scope("tensorboard"):
loss_summary = tf.summary.scalar('Loss', loss)
auc_train_summary = tf.summary.scalar('AUC_Training', auc)
auc_val_summary = tf.summary.scalar('AUC_Validation', auc)
precision_train_summary = tf.summary.scalar('Precision_Training', precision[0])
precision_val_summary = tf.summary.scalar('Precision_Validation', precision[0])
recall_train_summary = tf.summary.scalar('Recall_Training', recall[0])
recall_val_summary = tf.summary.scalar('Recall_Validation', recall[0])
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
# Reading tables
featuresTableTrain = ru.readPreprocessTable('train')
featuresTableVal = ru.readPreprocessTable('val')
if validateOnOldAgeGroup:
featuresTableVal = featuresTableVal[featuresTableVal.age > 56]
# Setting size of dataset
featuresTableTrain = featuresTableTrain.sample(frac=dataFractionTrain)
featuresTableVal = featuresTableVal.sample(frac=dataFractionVal)
featuresTableTrain.reset_index(inplace=True)
featuresTableVal.reset_index(inplace=True)
# Reading time series for validation set
X_val, y_val, seq_length_val = ru.generateSetFromTable(featuresTableVal, n_steps, n_inputs, wavelet, level)
feed_dict_val = {
y: y_val,
age: np.asarray(featuresTableVal["age"]).reshape((-1, 1)),
gender: np.asarray(featuresTableVal["Male"]).reshape((-1, 1))
}
for timeSeriesName in timeSeries:
feed_dict_val[X[timeSeriesName]] = X_val[timeSeriesName]
feed_dict_val[seq_length[timeSeriesName]] = seq_length_val[timeSeriesName]
n_batches = len(featuresTableTrain) // batch_size
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
# reset the local variables used for metrics
sess.run(tf.local_variables_initializer())
epoch_start_time = time.time()
for batch_index in range(n_batches):
# Building Batch
featuresTableBatch = featuresTableTrain[featuresTableTrain.index // batch_size == batch_index]
X_batch, y_batch, seq_length_batch = ru.generateSetFromTable(featuresTableBatch, n_steps, n_inputs, wavelet, level)
feed_dict_batch = {
y: y_batch,
age: np.asarray(featuresTableBatch["age"]).reshape((-1, 1)),
gender: np.asarray(featuresTableBatch["Male"]).reshape((-1, 1))
}
for timeSeriesName in timeSeries:
feed_dict_batch[X[timeSeriesName]] = X_batch[timeSeriesName]
feed_dict_batch[seq_length[timeSeriesName]] = seq_length_batch[timeSeriesName]
# Training operation and metrics updates
sess.run([training_op, auc_update_op, precision_update_op, recall_update_op], feed_dict=feed_dict_batch)
# Tensorboard summary
if batch_index % 4 == 0:
summary_str = loss_summary.eval(feed_dict=feed_dict_batch)
step = epoch * n_batches + batch_index
file_writer.add_summary(summary_str, step)
# Metrics
print("Epoch: {}, Execution time: {} seconds".format(epoch, time.time() - epoch_start_time))
# Metrics on training data
print("\tTraining")
file_writer.add_summary(auc_train_summary.eval(), epoch)
file_writer.add_summary(precision_train_summary.eval(), epoch)
file_writer.add_summary(recall_train_summary.eval(), epoch)
print("\t\tROC AUC:", auc.eval())
print("\t\tPrecision:", precision.eval()[0])
print("\t\tRecall:", recall.eval()[0])
# Validation set metrics for current epoch
print("\tValidation")
sess.run(tf.local_variables_initializer())
precision_val, auc_val, recall_val = sess.run([precision_update_op, auc_update_op, recall_update_op], feed_dict=feed_dict_val)
file_writer.add_summary(auc_val_summary.eval(), epoch)
file_writer.add_summary(precision_val_summary.eval(), epoch)
file_writer.add_summary(recall_val_summary.eval(), epoch)
print("\t\tROC AUC:", auc_val)
print("\t\tPrecision:", precision_val[0])
print("\t\tRecall:", recall_val[0])
if epoch >= 14:
save_path = saver.save(sess, "./checkpoints/{}/model.ckpt".format(folderName))
save_path = saver.save(sess, "./checkpoints/{}/model.ckpt".format(folderName))
file_writer.close()
| 41.481865
| 134
| 0.701599
|
acfcda31dc993b0ea1b3eff18cda49ddf0059ca3
| 1,615
|
py
|
Python
|
apps/sso/oauth2/decorators.py
|
g10f/sso
|
ba6eb712add388c69d4880f5620a2e4ce42d3fee
|
[
"BSD-3-Clause"
] | 3
|
2021-05-16T17:06:57.000Z
|
2021-05-28T17:14:05.000Z
|
apps/sso/oauth2/decorators.py
|
g10f/sso
|
ba6eb712add388c69d4880f5620a2e4ce42d3fee
|
[
"BSD-3-Clause"
] | null | null | null |
apps/sso/oauth2/decorators.py
|
g10f/sso
|
ba6eb712add388c69d4880f5620a2e4ce42d3fee
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import wraps
from django.core.exceptions import PermissionDenied
import logging
logger = logging.getLogger(__name__)
def request_passes_test(test_func):
"""
Decorator for views that checks that the request object passes the given test.
The test should be a callable that takes the request object and returns True if the request passes.
"""
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if test_func(request):
return view_func(request, *args, **kwargs)
else:
raise PermissionDenied
return _wrapped_view
return decorator
def scopes_required(scopes):
"""
Decorator for views that checks whether a token has particular scopes.
"""
def check_scopes(request):
if scopes:
# check scopes
required_scopes = set(scopes)
if request.scopes:
valid_scopes = request.scopes
if required_scopes.issubset(valid_scopes):
return True
raise PermissionDenied('required scopes not matching')
return request_passes_test(check_scopes)
def client_required(client_uuids, raise_exception=False):
"""
Decorator for views that checks whether a token has particular scopes.
"""
def check_client(request):
if client_uuids:
if request.client and request.client.uuid.hex in client_uuids:
return True
raise PermissionDenied('client_id not allowed')
return request_passes_test(check_client)
| 30.471698
| 103
| 0.66192
|
acfcdb6276d360c3313fdfdbb91c2a9d5a8ada83
| 1,995
|
py
|
Python
|
tensorflow/inception_v2/run.py
|
asutic/ncappzoo
|
cfac66c16e25105eedb258c895caead83db361de
|
[
"MIT"
] | 1
|
2021-07-31T07:59:58.000Z
|
2021-07-31T07:59:58.000Z
|
tensorflow/inception_v2/run.py
|
asutic/ncappzoo
|
cfac66c16e25105eedb258c895caead83db361de
|
[
"MIT"
] | null | null | null |
tensorflow/inception_v2/run.py
|
asutic/ncappzoo
|
cfac66c16e25105eedb258c895caead83db361de
|
[
"MIT"
] | 1
|
2018-12-05T10:48:43.000Z
|
2018-12-05T10:48:43.000Z
|
#! /usr/bin/env python3
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
from mvnc import mvncapi as mvnc
import sys
import numpy
import cv2
path_to_networks = './'
path_to_images = '../../data/images/'
graph_filename = 'graph'
image_filename = path_to_images + 'nps_electric_guitar.png'
#mvnc.SetGlobalOption(mvnc.GlobalOption.LOGLEVEL, 2)
devices = mvnc.EnumerateDevices()
if len(devices) == 0:
print('No devices found')
quit()
device = mvnc.Device(devices[0])
device.OpenDevice()
#Load graph
with open(path_to_networks + graph_filename, mode='rb') as f:
graphfile = f.read()
#Load preprocessing data
mean = 128
std = 1/128
#Load categories
categories = []
with open(path_to_networks + 'categories.txt', 'r') as f:
for line in f:
cat = line.split('\n')[0]
if cat != 'classes':
categories.append(cat)
f.close()
print('Number of categories:', len(categories))
#Load image size
with open(path_to_networks + 'inputsize.txt', 'r') as f:
reqsize = int(f.readline().split('\n')[0])
graph = device.AllocateGraph(graphfile)
img = cv2.imread(image_filename).astype(numpy.float32)
dx,dy,dz= img.shape
delta=float(abs(dy-dx))
if dx > dy: #crop the x dimension
img=img[int(0.5*delta):dx-int(0.5*delta),0:dy]
else:
img=img[0:dx,int(0.5*delta):dy-int(0.5*delta)]
img = cv2.resize(img, (reqsize, reqsize))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
for i in range(3):
img[:,:,i] = (img[:,:,i] - mean) * std
print('Start download to NCS...')
graph.LoadTensor(img.astype(numpy.float16), 'user object')
output, userobj = graph.GetResult()
top_inds = output.argsort()[::-1][:5]
print(''.join(['*' for i in range(79)]))
print('inception-v2 on NCS')
print(''.join(['*' for i in range(79)]))
for i in range(5):
print(top_inds[i], categories[top_inds[i]], output[top_inds[i]])
print(''.join(['*' for i in range(79)]))
graph.DeallocateGraph()
device.CloseDevice()
print('Finished')
| 24.62963
| 68
| 0.674185
|
acfcde5ce1e093d516547f3a391236ed11ad530e
| 5,816
|
py
|
Python
|
django_archive/management/commands/archive.py
|
Adnn/django-archive
|
2ea3256c560a35b79eaa4cf2808da8687179a164
|
[
"MIT"
] | 5
|
2017-06-21T13:15:22.000Z
|
2020-12-18T09:21:03.000Z
|
django_archive/management/commands/archive.py
|
Adnn/django-archive
|
2ea3256c560a35b79eaa4cf2808da8687179a164
|
[
"MIT"
] | null | null | null |
django_archive/management/commands/archive.py
|
Adnn/django-archive
|
2ea3256c560a35b79eaa4cf2808da8687179a164
|
[
"MIT"
] | 1
|
2020-11-09T18:40:00.000Z
|
2020-11-09T18:40:00.000Z
|
from collections import OrderedDict
from datetime import datetime
from io import BytesIO
from json import dump
from os import path
from tarfile import TarInfo, TarFile
from django.apps.registry import apps
from django.conf import settings
from django.core.files.base import File
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import models
from django.utils.encoding import smart_bytes
from .utils import *
from ... import __version__
class MixedIO(BytesIO):
"""
A BytesIO that accepts and encodes Unicode data.
This class was born out of a need for a BytesIO that would accept writes of
both bytes and Unicode data - allowing identical usage from both Python 2
and Python 3.
"""
def rewind(self):
"""
Seeks to the beginning and returns the size.
"""
size = self.tell()
self.seek(0)
return size
def write(self, data):
"""
Writes the provided data, converting Unicode to bytes as needed.
"""
BytesIO.write(self, smart_bytes(data))
def walk_storage_files(storage, directory=""):
directories, files = storage.listdir(directory)
for f in files:
media_root_relative_path = format(path.join(directory, f))
storage_file = storage.open(media_root_relative_path , "rb")
# Some storage (at least FileSystemStorage) do not provide the 'name' argument to the File ctor on opening,
# which sets File.name to the absolute path. Instead, it should be relative to the media root.
storage_file.name = media_root_relative_path
yield storage_file
for d in directories:
for f in walk_storage_files(storage, path.join(directory, d)):
yield f
class Command(BaseCommand):
"""
Create an archive of database tables and uploaded media, potentially compressed.
"""
help = "Create an archive of database tables and uploaded media, potentially compressed."
def handle(self, *args, **kwargs):
"""
Process the command.
"""
self.attr = AttributeRepository()
if not path.isdir(self.attr.get('ARCHIVE_DIRECTORY')):
self.stderr.write("Setting 'ARCHIVE_DIRECTORY' set to the non-existent directory '{}'."
.format(self.attr.get('ARCHIVE_DIRECTORY')))
exit(1)
with self._create_archive() as tar:
self._dump_db(tar)
self._dump_files(tar)
self._dump_meta(tar)
self.stdout.write("Backup completed to archive '{}'.".format(tar.name))
def _create_archive(self):
"""
Create the archive and return the TarFile.
"""
filename = self.attr.get('ARCHIVE_FILENAME')
fmt = self.attr.get('ARCHIVE_FORMAT')
absolute_path = path.join(
self.attr.get('ARCHIVE_DIRECTORY'),
'%s.tar%s' % (datetime.today().strftime(filename), '.'+fmt if fmt else '')
)
return TarFile.open(absolute_path, 'w:%s' % fmt)
def _dump_db(self, tar):
"""
Dump the rows in each model to the archive.
"""
# Dump the tables to a MixedIO
data = MixedIO()
call_command('dumpdata', all=True, format='json', indent=self.attr.get('ARCHIVE_DB_INDENT'),
exclude=self.attr.get('ARCHIVE_EXCLUDE'), stdout=data)
info = TarInfo(DB_DUMP)
info.size = data.rewind()
tar.addfile(info, data)
def _dump_files(self, tar):
if self.attr.get('ARCHIVE_MEDIA_POLICY') == 'all_files':
self._dump_all_files(tar)
elif self.attr.get('ARCHIVE_MEDIA_POLICY') == 'filefield_targets':
self._dump_referenced_files(tar)
elif self.attr.get('ARCHIVE_MEDIA_POLICY'):
self.stderr.write("Warning: ARCHIVE_MEDIA_POLICY value '{}' is not supported. Media files not archived."
.format(self.attr.get('ARCHIVE_MEDIA_POLICY')))
def _dump_all_files(self, tar):
"""
Dump all media files found by the media storage class.
"""
media_storage = get_mediastorage()
for file in walk_storage_files(media_storage):
self._add_file(tar, file)
file.close()
def _dump_referenced_files(self, tar):
"""
Dump all media files that are reference by a FileField.
"""
# Loop through all models and find FileFields
for model in apps.get_models():
# Get the name of all file fields in the model
field_names = []
for field in model._meta.fields:
if isinstance(field, models.FileField):
field_names.append(field.name)
# If any were found, loop through each row
if len(field_names):
for row in model.objects.all():
for field_name in field_names:
field = getattr(row, field_name)
if field:
self._add_file(tar, field)
field.close()
def _dump_meta(self, tar):
"""
Dump metadata to the archive.
"""
data = MixedIO()
meta_dict = OrderedDict((
('version', __version__),
('db_file', DB_DUMP),
('media_folder', MEDIA_DIR),
('settings', self.attr.settings_dict()),
))
dump(meta_dict, data, indent=2)
info = TarInfo(META_DUMP)
info.size = data.rewind()
tar.addfile(info, data)
def _add_file(self, tar, file):
info = TarInfo(path.join(MEDIA_DIR, file.name))
info.size = file.size
tar.addfile(info, file)
| 33.813953
| 116
| 0.604883
|
acfcde7fc6522ff42b1d50c2c5d1ae84d1959542
| 23,507
|
py
|
Python
|
.vscode-server/data/User/History/-27e0d3eb/nbc2.py
|
UNIZAR-30226-2022-09/back-end
|
7f20e141e34bf0ae7cce70515a1e4bb0cd85b173
|
[
"MIT"
] | null | null | null |
.vscode-server/data/User/History/-27e0d3eb/nbc2.py
|
UNIZAR-30226-2022-09/back-end
|
7f20e141e34bf0ae7cce70515a1e4bb0cd85b173
|
[
"MIT"
] | 1
|
2022-02-16T12:12:43.000Z
|
2022-02-16T12:15:03.000Z
|
.vscode-server/data/User/History/-27e0d3eb/nbc2.py
|
UNIZAR-30226-2022-09/back-end
|
7f20e141e34bf0ae7cce70515a1e4bb0cd85b173
|
[
"MIT"
] | null | null | null |
# from flask import Flask, Blueprint
# from flask_sqlalchemy import SQLAlchemy
# from flask_login import LoginManager
# import os
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
from numpy import array
import json
from bson import json_util
#import pandas as pd
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
#id = db.Column(db.Integer, primary_key=True )
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
#id = db.Sequence('id', start=1, increment=1)
descripcion = db.Column(db.String(1000))
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class PublicacionRecomandacion:
def __init__(self,id, tipo, titulo,autor,descripcion,link,usuario,foto_de_perfil,nlikes,ncomentarios,nguardados,likemio,guardadomio):
self.id = id
self.tipo = tipo
self.titulo = titulo
self.autor = autor
self.descripcion = descripcion
self.link = link
self.usuario = usuario
self.foto_de_perfil = foto_de_perfil
self.nlikes = nlikes
self.ncomentarios = ncomentarios
self.nguardados = nguardados
self.likemio = likemio
self.guardadomio = guardadomio
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
def token_required_id(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
print(token)
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
current_id = Publicacion.query.filter_by(id=data['id']).first()
_id = data['id']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,_id,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
# Ruta para el login
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
#nick = request.form.get("nick")
#password = request.form.get("password")
#e_mail = request.form.get("e_mail")
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user: # si esto devuelve algo entonces el email existe
return jsonify({'error': 'Existe correo'}) #json diciendo error existe email
if nick:
return jsonify({'error': 'Existe nick'})
#if (check_email(e_mail) == True and check_password(data['password']) == True ):
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
# auth = request.authorization #new ESTO SI LO HACES CON AUTH
data= request.get_json()
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})#error mal user
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'}) #error mala contraseña
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
#"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
#db.session.commit()
#cambia_foto
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None: #data['cambia_foto']:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"): # articulo
return jsonify({'id' : publicacion.id})
#guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"): # recomendacion
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPdf', methods=['POST'])
@token_required
def guardarPDF(current_user):
_id=request.headers['id']
propia = Propia( id = _id)
db.session.add(propia)
db.session.commit()
propia = Propia.query.filter_by(id=_id).first()
if request.files['pdf'] is not None:
file = request.files['pdf']
#print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
db.session.commit()
else:
print("pdf nulo")
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
# @app.route('/misArticulos', methods=['GET'])
# @token_required
# def misArticulos(current_user):
# data= request.get_json()
# x = select([Usuario.Nombre_de_usuario, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
# resultb = db.session.execute(x)
# Nombre_de_usuario = ""
# foto_de_perfil= 'http://51.255.50.207:5000/display/'
# for b in resultb:
# Nombre_de_usuario=b.Nombre_de_usuario
# foto_de_perfil += b.foto_de_perfil
# id = select([Publicacion.id]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
# descripcion = select( [Publicacion.descripcion]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
# timestamp = select([Publicacion.timestamp]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
# results = db.session.execute(id)
# resultss = db.session.execute(descripcion)
# resultsss = db.session.execute(timestamp)
# #ver si ese ID existe en recomendacion sino es un post propio
# vector0 = []
# vector1 = []
# vector2 = []
# Gustas = []
# Comentarios= []
# Guardados= []
# for r in results:
# #print(str(r))
# vector0 += r
# Gustas += str(db.session.query(Gusta).filter(Gusta.id == 'r' ).count())
# Comentarios += str(db.session.query(Comenta).filter(Comenta.id == 'r' ).count())
# Guardados += str(db.session.query(Guarda).filter(Guarda.id == 'r').count())
# for r in resultss:
# vector1 += r
# for r in resultsss:
# vector2 += r
# vector3 = []
# vector4 = []
# vector5 = []
# for r in vector0:
# link = select([Recomendacion.link]).where((Recomendacion.id == r))
# titulo = select([Recomendacion.titulo]).where((Recomendacion.id == r))
# autor = select([Recomendacion.autor]).where((Recomendacion.id == r))
# resulta = db.session.execute(link)
# resultaa = db.session.execute(titulo)
# resultaaa = db.session.execute(autor)
# for a in resulta:
# vector3 +=a
# for a in resultaa:
# vector4 +=a
# for a in resultaaa:
# vector5 +=a
# #def __init__(self,id, tipo, titulo,autor,descripcion,link,usuario,foto_de_perfil,nlikes,ncomentarios,nguardados,likemio,guardadomio):
# finalDictionary = {}
# i=0
# x=0
# for x in range(len(vector0)):
# print("EL ID ES: ", vector0[x])
# existe = db.session.query(Recomendacion).filter(Recomendacion.id == vector0[x] ).count()
# print("EXISTE ES: ", existe)
# if bool(existe):
# print(i , "bool: " , existe, " e x= ", vector0[x])
# GustaMio = db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == vector0[x] ).count()
# GuardadoMio = db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == vector0[x]).count()
# #post = PublicacionRecomandacion(vector0[i],2,vector4[i],vector5[i],vector1[i],vector3[i], Nombre_de_usuario, foto_de_perfil,Gustas[i],Comentarios[i],Guardados[i],bool(GustaMio),bool(GuardadoMio))
# #vectorFinal +=post
# #dictionary = {'Apple': 3, 'Grapes': 1}
# #array = {'key' : vector0[x], 'value' : post}
# #finalDictionary[vector0[x]] = [ str(vector0[i]) , str(vector4[i]) ] ## ESTO VA
# finalDictionary[vector0[x]] = { 'tipo' : 2 ,'titulo' : str(vector4[i]), 'autor' : str(vector5[i]),'descripcion' : str(vector1[i]),'link' : str(vector3[i]),'usuario' : Nombre_de_usuario,'foto_de_perfil' : foto_de_perfil,'nlikes' : str(Gustas[i]),'nlikemio' : bool(GustaMio),'ncomentarios' : str(Comentarios[i]),'nguardados' : str(Guardados[i]),'guardadomio' : bool(GuardadoMio) }
# #print(result)
# #vectorFinal.append(post)
# i = i + 1
# #return json.dumps(vectorFinal)
# #finalDictionary[vector0[x]] = [str(vector0[i]) +'value' : str(vector4[i])}]
# return json.dumps(finalDictionary, indent = i)
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/misRecomendaciones', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
data= request.get_json()
x = select([Usuario.Nombre_de_usuario, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
resultb = db.session.execute(x)
Nombre_de_usuario = ""
foto_de_perfil= 'http://51.255.50.207:5000/display/'
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
foto_de_perfil += b.foto_de_perfil
id = select([Publicacion.id]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
descripcion = select( [Publicacion.descripcion]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
timestamp = select([Publicacion.timestamp]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(id)
resultss = db.session.execute(descripcion)
resultsss = db.session.execute(timestamp)
vector0 = []
vector1 = []
vector2 = []
Gustas = []
Comentarios= []
Guardados= []
for r in results:
vector0 += r
Gustas += str(db.session.query(Gusta).filter(Gusta.id == 'r' ).count())
Comentarios += str(db.session.query(Comenta).filter(Comenta.id == 'r' ).count())
Guardados += str(db.session.query(Guarda).filter(Guarda.id == 'r').count())
for r in resultss:
vector1 += r
for r in resultsss:
vector2 += r
vector3 = []
vector4 = []
vector5 = []
for r in vector0:
link = select([Recomendacion.link]).where((Recomendacion.id == r))
titulo = select([Recomendacion.titulo]).where((Recomendacion.id == r))
autor = select([Recomendacion.autor]).where((Recomendacion.id == r))
resulta = db.session.execute(link)
resultaa = db.session.execute(titulo)
resultaaa = db.session.execute(autor)
for a in resulta:
vector3 +=a
for a in resultaa:
vector4 +=a
for a in resultaaa:
vector5 +=a
#def __init__(self,id, tipo, titulo,autor,descripcion,link,usuario,foto_de_perfil,nlikes,ncomentarios,nguardados,likemio,guardadomio):
finalDictionary = {}
i=0
x=0
for x in range(len(vector0)):
print("EL ID ES: ", vector0[x])
existe = db.session.query(Recomendacion).filter(Recomendacion.id == vector0[x] ).count()
print("EXISTE ES: ", existe)
#ver si ese ID existe en recomendacion sino es un post propio
if bool(existe):
print(i , "bool: " , existe, " e x= ", vector0[x])
GustaMio = db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == vector0[x] ).count()
GuardadoMio = db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == vector0[x]).count()
#post = PublicacionRecomandacion(vector0[i],2,vector4[i],vector5[i],vector1[i],vector3[i], Nombre_de_usuario, foto_de_perfil,Gustas[i],Comentarios[i],Guardados[i],bool(GustaMio),bool(GuardadoMio))
#vectorFinal +=post
#dictionary = {'Apple': 3, 'Grapes': 1}
#array = {'key' : vector0[x], 'value' : post}
#finalDictionary[vector0[x]] = [ str(vector0[i]) , str(vector4[i]) ] ## ESTO VA
finalDictionary[vector0[x]] = { 'tipo' : 2 ,'titulo' : str(vector4[i]), 'autor' : str(vector5[i]),'descripcion' : str(vector1[i]),'link' : str(vector3[i]),'usuario' : Nombre_de_usuario,'foto_de_perfil' : foto_de_perfil,'nlikes' : str(Gustas[i]),'nlikemio' : GustaMio,'ncomentarios' : str(Comentarios[i]),'nguardados' : str(Guardados[i]),'guardadomio' : GuardadoMio }
#print(result)
#vectorFinal.append(post)
i = i + 1
#return json.dumps(vectorFinal)
#finalDictionary[vector0[x]] = [str(vector0[i]) +'value' : str(vector4[i])}]
return json.dumps(finalDictionary, indent = i)
#return json.dumps(vectorFinal, default=json_util.default)
#json_docs.append(json_doc)
# @app.route('/misRecomendaciones', methods=['GET'])
# @token_required
# def getPostsRecomendados(current_user):
# finalDictionary = {}
# i=0
# for i in range(0,10):
# finalDictionary[i] = { 'tipo' : "xd" ,'titulo' : "su", 'autor' : "su",'descripcion' : "su",'link' : "su",'usuario' : "su",'foto_de_perfil' : "su",'nlikes' : 0,'nlikemio' : "false",'ncomentarios' : 3,'nguardados' : 2,'guardadomio' : 0 }
# return json.dumps(finalDictionary, indent = i)
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
# Contraseñas de entre 8 y 32 carácteres.
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
| 37.431529
| 396
| 0.646318
|
acfcdf275cd9dff5ea372abc6c4c061fc0f8d165
| 459
|
py
|
Python
|
mediaman/core/clients/single/abstract.py
|
MattCCS/MediaMan
|
388c0d16da437b0ede4f0903a01e41dc8e927ae6
|
[
"BSD-3-Clause-Clear"
] | 1
|
2019-05-06T19:51:08.000Z
|
2019-05-06T19:51:08.000Z
|
mediaman/core/clients/single/abstract.py
|
MattCCS/MediaMan
|
388c0d16da437b0ede4f0903a01e41dc8e927ae6
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-02-08T20:22:34.000Z
|
2021-02-08T20:22:34.000Z
|
mediaman/core/clients/single/abstract.py
|
MattCCS/MediaMan
|
388c0d16da437b0ede4f0903a01e41dc8e927ae6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
from mediaman.core.clients.abstract import abstract
class AbstractSingleClient(abstract.AbstractClient):
def __init__(self, index):
self.index = index
# self.force_init()
def force_init(self):
self.index.force_init()
def name(self):
return self.index.name()
def nickname(self):
return self.index.nickname()
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.index)})"
| 20.863636
| 63
| 0.653595
|
acfcdf437d57f0db72de41dbb7e933b1ffeb3572
| 13,463
|
py
|
Python
|
django/core/management/templates.py
|
findoslice/django
|
bb99832b0a8635cf9a29dfcb66088d8c593b8796
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/core/management/templates.py
|
findoslice/django
|
bb99832b0a8635cf9a29dfcb66088d8c593b8796
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/core/management/templates.py
|
findoslice/django
|
bb99832b0a8635cf9a29dfcb66088d8c593b8796
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import cgi
import mimetypes
import os
import posixpath
import shutil
import stat
import tempfile
from importlib import import_module
from os import path
from urllib.request import urlretrieve
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import handle_extensions
from django.template import Context, Engine
from django.utils import archive
from django.utils.version import get_docs_version
class TemplateCommand(BaseCommand):
"""
Copy either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Rewrite the following suffixes when determining the target filename.
rewrite_template_suffixes = (
# Allow shipping invalid .py files without byte-compilation.
('.py-tpl', '.py'),
)
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument('directory', nargs='?', help='Optional destination directory')
parser.add_argument('--template', help='The path or URL to load the template from.')
parser.add_argument(
'--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'
)
parser.add_argument(
'--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. Separate multiple file names '
'with commas, or use -n multiple times.'
)
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = options['verbosity']
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except FileExistsError:
raise CommandError("'%s' already exists" % top_dir)
except OSError as e:
raise CommandError(e)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(handle_extensions(options['extensions']))
extra_files = []
for file in options['files']:
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
camel_case_name = 'camel_case_%s_name' % app_or_project
camel_case_value = ''.join(x for x in name.title() if x != '_')
context = Context({
**options,
base_name: name,
base_directory: top_dir,
camel_case_name: camel_case_value,
'docs_version': get_docs_version(),
'django_version': django.__version__,
}, autoescape=False)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = self.handle_template(options['template'],
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
if new_path.endswith(extensions) or filename in extra_files:
with open(old_path, encoding='utf-8') as template_file:
content = template_file.read()
template = Engine().from_string(content)
content = template.render(context)
with open(new_path, 'w', encoding='utf-8') as new_file:
new_file.write(content)
else:
shutil.copyfile(old_path, new_path)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
def handle_template(self, template, subdir):
"""
Determine where the app or project templates are.
Use django.__path__[0] as the default because the Django install
directory isn't known.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
a_or_an = 'an' if app_or_project == 'app' else 'a'
if name is None:
raise CommandError('you must provide {an} {app} name'.format(
an=a_or_an,
app=app_or_project,
))
# Check it's a valid directory name.
if not name.isidentifier():
raise CommandError(
"'{name}' is not a valid {app} name. Please make sure the "
"name is a valid identifier.".format(
name=name,
app=app_or_project,
)
)
# Check it cannot be imported.
try:
import_module(name)
except ImportError:
pass
else:
raise CommandError(
"'{name}' conflicts with the name of an existing Python "
"module and cannot be used as {an} {app} name. Please try "
"another name.".format(
name=name,
an=a_or_an,
app=app_or_project,
)
)
def download(self, url):
"""
Download the given URL and return the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extract the given file to a temporarily and return
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""Return True if the name looks like a URL."""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| 40.18806
| 92
| 0.564287
|
acfcdf5fb651d3176f0201b4299a718ca7ee5ade
| 31,658
|
py
|
Python
|
fastapi/applications.py
|
johntheprime/fastapisoucecodes
|
8f22a865864d495f8b12820994ebdd82a76b262e
|
[
"MIT"
] | 2
|
2021-09-27T13:54:01.000Z
|
2021-11-10T10:18:20.000Z
|
fastapi/applications.py
|
johntheprime/fastapisoucecodes
|
8f22a865864d495f8b12820994ebdd82a76b262e
|
[
"MIT"
] | null | null | null |
fastapi/applications.py
|
johntheprime/fastapisoucecodes
|
8f22a865864d495f8b12820994ebdd82a76b262e
|
[
"MIT"
] | null | null | null |
# 概览:本文件是整个框架中第二大文件(以代码行数论),仅次于routing.py;
# TODO:待继续
from typing import Any, Callable, Coroutine, Dict, List, Optional, Sequence, Type, Union
from fastapi import routing
from fastapi.concurrency import AsyncExitStack
from fastapi.datastructures import Default, DefaultPlaceholder
from fastapi.encoders import DictIntStrAny, SetIntStr
from fastapi.exception_handlers import (
http_exception_handler,
request_validation_exception_handler,
)
from fastapi.exceptions import RequestValidationError
from fastapi.logger import logger
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.openapi.utils import get_openapi
from fastapi.params import Depends
from fastapi.types import DecoratedCallable
from starlette.applications import Starlette
from starlette.datastructures import State
from starlette.exceptions import HTTPException
from starlette.middleware import Middleware
from starlette.requests import Request
from starlette.responses import HTMLResponse, JSONResponse, Response
from starlette.routing import BaseRoute
from starlette.types import ASGIApp, Receive, Scope, Send
class FastAPI(Starlette):
def __init__(
self,
*,
debug: bool = False,
routes: Optional[List[BaseRoute]] = None,
title: str = "FastAPI",
description: str = "",
version: str = "0.1.0",
openapi_url: Optional[str] = "/openapi.json",
openapi_tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
docs_url: Optional[str] = "/docs",
redoc_url: Optional[str] = "/redoc",
swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect",
swagger_ui_init_oauth: Optional[Dict[str, Any]] = None,
middleware: Optional[Sequence[Middleware]] = None,
exception_handlers: Optional[
Dict[
Union[int, Type[Exception]],
Callable[[Request, Any], Coroutine[Any, Any, Response]],
]
] = None,
on_startup: Optional[Sequence[Callable[[], Any]]] = None,
on_shutdown: Optional[Sequence[Callable[[], Any]]] = None,
openapi_prefix: str = "",
root_path: str = "",
root_path_in_servers: bool = True,
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
) -> None:
self._debug: bool = debug
self.state: State = State()
self.router: routing.APIRouter = routing.APIRouter(
routes=routes,
dependency_overrides_provider=self,
on_startup=on_startup,
on_shutdown=on_shutdown,
default_response_class=default_response_class,
dependencies=dependencies,
callbacks=callbacks,
deprecated=deprecated,
include_in_schema=include_in_schema,
responses=responses,
)
self.exception_handlers: Dict[
Union[int, Type[Exception]],
Callable[[Request, Any], Coroutine[Any, Any, Response]],
] = (
{} if exception_handlers is None else dict(exception_handlers)
)
self.exception_handlers.setdefault(HTTPException, http_exception_handler)
self.exception_handlers.setdefault(
RequestValidationError, request_validation_exception_handler
)
self.user_middleware: List[Middleware] = (
[] if middleware is None else list(middleware)
)
self.middleware_stack: ASGIApp = self.build_middleware_stack()
self.title = title
self.description = description
self.version = version
self.servers = servers or []
self.openapi_url = openapi_url
self.openapi_tags = openapi_tags
# TODO: remove when discarding the openapi_prefix parameter
if openapi_prefix:
logger.warning(
'"openapi_prefix" has been deprecated in favor of "root_path", which '
"follows more closely the ASGI standard, is simpler, and more "
"automatic. Check the docs at "
"https://fastapi.tiangolo.com/advanced/sub-applications/"
)
self.root_path = root_path or openapi_prefix
self.root_path_in_servers = root_path_in_servers
self.docs_url = docs_url
self.redoc_url = redoc_url
self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url
self.swagger_ui_init_oauth = swagger_ui_init_oauth
self.extra = extra
self.dependency_overrides: Dict[Callable[..., Any], Callable[..., Any]] = {}
self.openapi_version = "3.0.2"
if self.openapi_url:
assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'"
assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'"
self.openapi_schema: Optional[Dict[str, Any]] = None
self.setup()
def openapi(self) -> Dict[str, Any]:
if not self.openapi_schema:
self.openapi_schema = get_openapi(
title=self.title,
version=self.version,
openapi_version=self.openapi_version,
description=self.description,
routes=self.routes,
tags=self.openapi_tags,
servers=self.servers,
)
return self.openapi_schema
def setup(self) -> None:
if self.openapi_url:
urls = (server_data.get("url") for server_data in self.servers)
server_urls = {url for url in urls if url}
async def openapi(req: Request) -> JSONResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
if root_path not in server_urls:
if root_path and self.root_path_in_servers:
self.servers.insert(0, {"url": root_path})
server_urls.add(root_path)
return JSONResponse(self.openapi())
self.add_route(self.openapi_url, openapi, include_in_schema=False)
if self.openapi_url and self.docs_url:
async def swagger_ui_html(req: Request) -> HTMLResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
openapi_url = root_path + self.openapi_url
oauth2_redirect_url = self.swagger_ui_oauth2_redirect_url
if oauth2_redirect_url:
oauth2_redirect_url = root_path + oauth2_redirect_url
return get_swagger_ui_html(
openapi_url=openapi_url,
title=self.title + " - Swagger UI",
oauth2_redirect_url=oauth2_redirect_url,
init_oauth=self.swagger_ui_init_oauth,
)
self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False)
if self.swagger_ui_oauth2_redirect_url:
async def swagger_ui_redirect(req: Request) -> HTMLResponse:
return get_swagger_ui_oauth2_redirect_html()
self.add_route(
self.swagger_ui_oauth2_redirect_url,
swagger_ui_redirect,
include_in_schema=False,
)
if self.openapi_url and self.redoc_url:
async def redoc_html(req: Request) -> HTMLResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
openapi_url = root_path + self.openapi_url
return get_redoc_html(
openapi_url=openapi_url, title=self.title + " - ReDoc"
)
self.add_route(self.redoc_url, redoc_html, include_in_schema=False)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if self.root_path:
scope["root_path"] = self.root_path
if AsyncExitStack:
async with AsyncExitStack() as stack:
scope["fastapi_astack"] = stack
await super().__call__(scope, receive, send)
else:
await super().__call__(scope, receive, send) # pragma: no cover
def add_api_route(
self,
path: str,
endpoint: Callable[..., Coroutine[Any, Any, Response]],
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(
JSONResponse
),
name: Optional[str] = None,
) -> None:
self.router.add_api_route(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def api_route(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.router.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable[..., Any], name: Optional[str] = None
) -> None:
self.router.add_api_websocket_route(path, endpoint, name=name)
def websocket(
self, path: str, name: Optional[str] = None
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: routing.APIRouter,
*,
prefix: str = "",
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
default_response_class: Type[Response] = Default(JSONResponse),
callbacks: Optional[List[BaseRoute]] = None,
) -> None:
self.router.include_router(
router,
prefix=prefix,
tags=tags,
dependencies=dependencies,
responses=responses,
deprecated=deprecated,
include_in_schema=include_in_schema,
default_response_class=default_response_class,
callbacks=callbacks,
)
def get(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.get(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.put(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.post(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.delete(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
operation_id=operation_id,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.options(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.head(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.patch(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.trace(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
| 42.608345
| 88
| 0.637438
|
acfcdf7f703be873f5f13222778414082b25761b
| 13,886
|
py
|
Python
|
asa_tgcn_main.py
|
cuhksz-nlp/ASA-TGCN
|
4743fb5a941114a14530aa499243dbe955583ea9
|
[
"MIT"
] | 14
|
2021-05-29T13:50:46.000Z
|
2022-03-31T09:52:37.000Z
|
asa_tgcn_main.py
|
cuhksz-nlp/ASA-TGCN
|
4743fb5a941114a14530aa499243dbe955583ea9
|
[
"MIT"
] | 5
|
2021-06-07T01:19:10.000Z
|
2022-02-20T04:11:46.000Z
|
asa_tgcn_main.py
|
cuhksz-nlp/ASA-TGCN
|
4743fb5a941114a14530aa499243dbe955583ea9
|
[
"MIT"
] | 9
|
2021-07-01T13:23:06.000Z
|
2022-02-24T03:04:52.000Z
|
import logging
import argparse
import math
import os
import sys
from time import strftime, localtime
import random
import numpy as np
from pytorch_transformers import BertModel, BertConfig
from data_utils import Tokenizer4Bert, ABSADataset
from asa_tgcn_model import AsaTgcn
from sklearn import metrics
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
CONFIG_NAME = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class Instructor:
def __init__(self, opt):
self.opt = opt
logger.info(opt)
deptype2id = ABSADataset.load_deptype_map(opt)
polarity2id = ABSADataset.get_polarity2id()
logger.info(deptype2id)
logger.info(polarity2id)
self.deptype2id = deptype2id
self.polarity2id = polarity2id
self.tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.bert_model)
config = BertConfig.from_json_file(os.path.join(opt.model_path, CONFIG_NAME))
config.num_labels=opt.polarities_dim
config.num_types=len(self.deptype2id)
logger.info(config)
self.model = AsaTgcn.from_pretrained(opt.bert_model, config=config)
self.model.to(opt.device)
self.trainset = ABSADataset(opt.train_file, self.tokenizer, self.opt, deptype2id=deptype2id)
self.testset = ABSADataset(opt.test_file, self.tokenizer, self.opt, deptype2id=deptype2id)
if os.path.exists(opt.val_file):
self.valset = ABSADataset(opt.val_file, self.tokenizer, self.opt, deptype2id=deptype2id)
elif opt.valset_ratio > 0:
valset_len = int(len(self.trainset) * opt.valset_ratio)
self.trainset, self.valset = random_split(self.trainset, (len(self.trainset)-valset_len, valset_len))
else:
self.valset = self.testset
if opt.device.type == 'cuda':
logger.info('cuda memory allocated: {}'.format(torch.cuda.memory_allocated(device=opt.device.index)))
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
logger.info('n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
logger.info('> training arguments:')
for arg in vars(self.opt):
logger.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def _reset_params(self):
for child in self.model.children():
if type(child) != BertModel: # skip bert params
for p in child.parameters():
if p.requires_grad:
if len(p.shape) > 1:
torch.nn.init.xavier_uniform_(p)
else:
stdv = 1. / math.sqrt(p.shape[0])
torch.nn.init.uniform_(p, a=-stdv, b=stdv)
def save_model(self, save_path, model, args):
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_path, WEIGHTS_NAME)
output_config_file = os.path.join(save_path, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
config = model_to_save.config
config.__dict__["deptype2id"] = self.deptype2id
config.__dict__["polarity2id"] = self.polarity2id
with open(output_config_file, "w", encoding='utf-8') as writer:
writer.write(config.to_json_string())
output_args_file = os.path.join(save_path, 'training_args.bin')
torch.save(args, output_args_file)
def _train(self, criterion, optimizer, train_data_loader, val_data_loader, test_data_loader):
max_val_acc = 0
global_step = 0
path = None
results = {"bert_model": self.opt.bert_model, "batch_size": self.opt.batch_size,
"learning_rate": self.opt.learning_rate, "seed": self.opt.seed}
for epoch in range(self.opt.num_epoch):
logger.info('>' * 100)
logger.info('epoch: {}'.format(epoch))
n_correct, n_total, loss_total = 0, 0, 0
self.model.train()
for i_batch, t_sample_batched in enumerate(train_data_loader):
global_step += 1
optimizer.zero_grad()
outputs = self.model(t_sample_batched["input_ids"].to(self.opt.device),
t_sample_batched["segment_ids"].to(self.opt.device),
t_sample_batched["valid_ids"].to(self.opt.device),
t_sample_batched["mem_valid_ids"].to(self.opt.device),
t_sample_batched["dep_adj_matrix"].to(self.opt.device),
t_sample_batched["dep_value_matrix"].to(self.opt.device))
targets = t_sample_batched['polarity'].to(self.opt.device)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_total += len(outputs)
loss_total += loss.item() * len(outputs)
if global_step % self.opt.log_step == 0:
train_acc = n_correct / n_total
train_loss = loss_total / n_total
logger.info('epoch: {}, loss: {:.4f}, acc: {:.4f}'.format(epoch, train_loss, train_acc))
val_acc, val_f1 = Instructor._evaluate_acc_f1(self.model, val_data_loader, device=self.opt.device)
logger.info('>epoch: {}, val_acc: {:.4f}, val_f1: {:.4f}'.format(epoch, val_acc, val_f1))
results["{}_val_acc".format(epoch)] = val_acc
results["{}_val_f1".format(epoch)] = val_f1
if val_acc > max_val_acc:
max_val_acc = val_acc
saving_path = os.path.join(self.opt.outdir, "epoch_{}".format(epoch))
if not os.path.exists(saving_path):
os.makedirs(saving_path)
self.save_model(saving_path, self.model, self.opt)
self.model.eval()
saving_path = os.path.join(self.opt.outdir, "epoch_{}_eval.txt".format(epoch))
test_acc, test_f1 = self._evaluate_acc_f1(self.model, test_data_loader, device=self.opt.device, saving_path=saving_path)
logger.info('>> epoch: {}, test_acc: {:.4f}, test_f1: {:.4f}'.format(epoch, test_acc, test_f1))
results["max_val_acc"] = max_val_acc
results["test_acc"] = test_acc
results["test_f1"] = test_f1
output_eval_file = os.path.join(self.opt.outdir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for k,v in results.items():
writer.write("{}={}\n".format(k,v))
return path
@staticmethod
def _evaluate_acc_f1(model, data_loader, device, saving_path=None):
n_correct, n_total = 0, 0
t_targets_all, t_outputs_all = None, None
model.eval()
saving_path_f = open(saving_path, 'w') if saving_path is not None else None
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(data_loader):
t_targets = t_sample_batched['polarity'].to(device)
t_raw_texts = t_sample_batched['raw_text']
t_aspects = t_sample_batched['aspect']
t_outputs = model(t_sample_batched["input_ids"].to(device),
t_sample_batched["segment_ids"].to(device),
t_sample_batched["valid_ids"].to(device),
t_sample_batched["mem_valid_ids"].to(device),
t_sample_batched["dep_adj_matrix"].to(device),
t_sample_batched["dep_value_matrix"].to(device))
n_correct += (torch.argmax(t_outputs, -1) == t_targets).sum().item()
n_total += len(t_outputs)
if t_targets_all is None:
t_targets_all = t_targets
t_outputs_all = t_outputs
else:
t_targets_all = torch.cat((t_targets_all, t_targets), dim=0)
t_outputs_all = torch.cat((t_outputs_all, t_outputs), dim=0)
if saving_path_f is not None:
for t_target, t_output, t_raw_text, t_aspect in zip(t_targets.detach().cpu().numpy(),
torch.argmax(t_outputs, -1).detach().cpu().numpy(),
t_raw_texts, t_aspects):
saving_path_f.write("{}\t{}\t{}\t{}\n".format(t_target, t_output, t_raw_text, t_aspect))
acc = n_correct / n_total
f1 = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[0, 1, 2], average='macro')
return acc, f1
def train(self):
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
_params = filter(lambda p: p.requires_grad, self.model.parameters())
optimizer = torch.optim.Adam(_params, lr=self.opt.learning_rate, weight_decay=self.opt.l2reg)
train_data_loader = DataLoader(dataset=self.trainset, batch_size=self.opt.batch_size, shuffle=True)
test_data_loader = DataLoader(dataset=self.testset, batch_size=self.opt.batch_size, shuffle=False)
val_data_loader = DataLoader(dataset=self.valset, batch_size=self.opt.batch_size, shuffle=False)
self._reset_params()
self._train(criterion, optimizer, train_data_loader, val_data_loader, test_data_loader)
def test(opt):
logger.info(opt)
config = BertConfig.from_json_file(os.path.join(opt.model_path, CONFIG_NAME))
logger.info(config)
tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.model_path)
model = AsaTgcn.from_pretrained(opt.model_path)
model.to(opt.device)
deptype2id = config.deptype2id
logger.info(deptype2id)
testset = ABSADataset(opt.test_file, tokenizer, opt, deptype2id=deptype2id)
test_data_loader = DataLoader(dataset=testset, batch_size=opt.batch_size, shuffle=False)
test_acc, test_f1 = Instructor._evaluate_acc_f1(model, test_data_loader, device=opt.device)
logger.info('>> test_acc: {:.4f}, test_f1: {:.4f}'.format(test_acc, test_f1))
def get_args():
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='tgcn', type=str)
parser.add_argument('--train_file', default='sample_data/train.txt', type=str)
parser.add_argument('--test_file', default='sample_data/test.txt', type=str)
parser.add_argument('--val_file', default='sample_data/val.txt', type=str)
parser.add_argument('--log', default='log', type=str)
parser.add_argument('--bert_model', default='./bert-large-uncased', type=str)
parser.add_argument('--model_path', default='./bert-large-uncased', type=str)
parser.add_argument('--learning_rate', default='2e-5', type=float)
parser.add_argument('--dropout', default=0, type=float)
parser.add_argument('--bert_dropout', default=0.2, type=float)
parser.add_argument('--l2reg', default=0.01, type=float)
parser.add_argument('--num_epoch', default=30, type=int)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--log_step', default=5, type=int)
parser.add_argument('--max_seq_len', default=100, type=int)
parser.add_argument('--polarities_dim', default=3, type=int)
parser.add_argument('--device', default=None, type=str)
parser.add_argument('--seed', default=50, type=int)
parser.add_argument('--valset_ratio', default=0, type=float)
parser.add_argument('--outdir', default='./', type=str)
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
opt = parser.parse_args()
if opt.do_train:
opt.outdir = os.path.join(opt.outdir, "{}_{}_bts_{}_lr_{}_warmup_{}_seed_{}_bert_dropout_{}".format(
opt.tool,
opt.dataset,
opt.batch_size,
opt.learning_rate,
opt.warmup_proportion,
opt.seed,
opt.bert_dropout
))
if not os.path.exists(opt.outdir):
os.mkdir(opt.outdir)
return opt
def set_seed(opt):
if opt.seed is not None:
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
opt = get_args()
set_seed(opt)
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if opt.device is None else torch.device(opt.device)
opt.n_gpu = torch.cuda.device_count()
log_file = '{}/{}-{}-{}.log'.format(opt.log, opt.model_name, opt.dataset, strftime("%y%m%d-%H%M", localtime()))
logger.addHandler(logging.FileHandler(log_file))
if opt.do_train:
ins = Instructor(opt)
ins.train()
elif opt.do_eval:
test(opt)
if __name__ == '__main__':
main()
| 45.379085
| 136
| 0.620409
|
acfce07f87efa9440bafaec53b9a6b5bbe7c8c41
| 2,870
|
py
|
Python
|
models/rundoc2vec.py
|
andrew-lockwood/lab-project
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | 1
|
2017-08-30T15:21:31.000Z
|
2017-08-30T15:21:31.000Z
|
models/rundoc2vec.py
|
andrew-lockwood/lab-project-summer2016
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | null | null | null |
models/rundoc2vec.py
|
andrew-lockwood/lab-project-summer2016
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | 1
|
2017-06-15T20:44:59.000Z
|
2017-06-15T20:44:59.000Z
|
#### Directories that need to be set before using ####
# Models: where models will be stored
# Scores: where model scores will be stored
from context import LabeledSentences, settings
# De-comment to set up gensims native logging. Extremely useful when
# training models to visualize progress
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', \
level=logging.INFO)
from gensim.models.doc2vec import Doc2Vec
from collections import Counter
import os
# Set the number of cores
import multiprocessing
cores = multiprocessing.cpu_count() * 2
class Doc2VecModel (object):
def __init__ (self, model_name):
self.model_path = os.path.join(settings.d2v, model_name)
self.model_name = model_name
self.model = None
def create_build_train_model (self, size):
self.create_model(size)
self.build_vocab()
self.train_model()
def create_model (self, size):
"""Initializes a model of a given size."""
self.model = Doc2Vec( size=size, workers=cores, sample=1e-5,
iter=20, dbow_words=1, window=8, min_count=50)
def build_vocab (self):
"""Builds vocab from a SentenceLabels object."""
self.sentences = LabeledSentences(70)
self.model.build_vocab(self.sentences)
def train_model (self):
self.model.train(self.sentences)
"""
for epoch in range(10):
print 'Beginning Epoch: %s' % (epoch + 1)
self.model.alpha -= 0.002
self.model.min_alpha = self.model.alpha
"""
#self.model.init_sims(replace=True)
self.model.save(self.model_path)
def save_model (self):
self.model.save(self.model_path)
def load_model (self):
self.model = Doc2Vec.load(self.model_path)
def vocab (self):
return self.model.vocab
def docvecs (self):
return self.model.docvecs.doctag_syn0
def docfeats (self):
return self.model.docvecs.doctag_syn0.shape[1]
def docnumber (self):
return self.model.docvecs.doctag_syn0.shape[0]
def get_docvec (self, index):
"""Returns the number of documents in the model."""
return self.model.docvecs[index]
def get_doctag (self, index):
return self.model.docvecs.index_to_doctag(index)
def get_index (self, doctag):
return self.model.docvecs.indexed_doctags(doctag)
def get_similar_words (self, word, N=20):
return self.model.similar_by_word(word, topn = N)
def get_similar_doc (self, docvec, N=20):
return self.model.docvecs.mostsimilar(docvec, topn = N)
def get_wordvec (self, word):
return self.model.syn0[word]
if __name__ == '__main__':
d2v = Doc2VecModel('300model')
d2v.create_build_train_model(300)
| 29.587629
| 78
| 0.650871
|
acfce30b9254f9c11d2a226b0b6caab4d0770895
| 1,735
|
py
|
Python
|
bitmovin_api_sdk/encoding/inputs/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/inputs/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/inputs/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.inputs.inputs_api import InputsApi
from bitmovin_api_sdk.encoding.inputs.type.type_api import TypeApi
from bitmovin_api_sdk.encoding.inputs.rtmp.rtmp_api import RtmpApi
from bitmovin_api_sdk.encoding.inputs.redundant_rtmp.redundant_rtmp_api import RedundantRtmpApi
from bitmovin_api_sdk.encoding.inputs.s3.s3_api import S3Api
from bitmovin_api_sdk.encoding.inputs.s3_role_based.s3_role_based_api import S3RoleBasedApi
from bitmovin_api_sdk.encoding.inputs.generic_s3.generic_s3_api import GenericS3Api
from bitmovin_api_sdk.encoding.inputs.local.local_api import LocalApi
from bitmovin_api_sdk.encoding.inputs.gcs.gcs_api import GcsApi
from bitmovin_api_sdk.encoding.inputs.gcs_service_account.gcs_service_account_api import GcsServiceAccountApi
from bitmovin_api_sdk.encoding.inputs.azure.azure_api import AzureApi
from bitmovin_api_sdk.encoding.inputs.ftp.ftp_api import FtpApi
from bitmovin_api_sdk.encoding.inputs.sftp.sftp_api import SftpApi
from bitmovin_api_sdk.encoding.inputs.http.http_api import HttpApi
from bitmovin_api_sdk.encoding.inputs.https.https_api import HttpsApi
from bitmovin_api_sdk.encoding.inputs.aspera.aspera_api import AsperaApi
from bitmovin_api_sdk.encoding.inputs.akamai_netstorage.akamai_netstorage_api import AkamaiNetstorageApi
from bitmovin_api_sdk.encoding.inputs.srt.srt_api import SrtApi
from bitmovin_api_sdk.encoding.inputs.tcp.tcp_api import TcpApi
from bitmovin_api_sdk.encoding.inputs.udp.udp_api import UdpApi
from bitmovin_api_sdk.encoding.inputs.udp_multicast.udp_multicast_api import UdpMulticastApi
from bitmovin_api_sdk.encoding.inputs.zixi.zixi_api import ZixiApi
from bitmovin_api_sdk.encoding.inputs.input_list_query_params import InputListQueryParams
| 72.291667
| 109
| 0.895101
|
acfce3126c4cc84eddfecc14e161e627d2a39c15
| 29,566
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/network_interfaces_operations.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 2
|
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/network_interfaces_operations.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/network_interfaces_operations.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2015-06-15".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def delete(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkInterface
<azure.mgmt.network.v2015_06_15.models.NetworkInterface>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network
interface operation.
:type parameters: :class:`NetworkInterface
<azure.mgmt.network.v2015_06_15.models.NetworkInterface>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`NetworkInterface
<azure.mgmt.network.v2015_06_15.models.NetworkInterface>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkInterface')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', response)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkInterfacePaged
<azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkInterfacePaged
<azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_vm_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, custom_headers=None, raw=False, **operation_config):
"""Gets information about all network interfaces in a virtual machine in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkInterfacePaged
<azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkInterfacePaged
<azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_virtual_machine_scale_set_network_interface(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkInterface
<azure.mgmt.network.v2015_06_15.models.NetworkInterface>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 47.533762
| 242
| 0.663702
|
acfce45248ef3528e1d11462dbac03ee76474d6f
| 4,951
|
py
|
Python
|
Current/l_test_model.py
|
Alden-G878/AI_Traintime
|
3d405952bd2c30171358219591f806efcb157d49
|
[
"MIT"
] | null | null | null |
Current/l_test_model.py
|
Alden-G878/AI_Traintime
|
3d405952bd2c30171358219591f806efcb157d49
|
[
"MIT"
] | null | null | null |
Current/l_test_model.py
|
Alden-G878/AI_Traintime
|
3d405952bd2c30171358219591f806efcb157d49
|
[
"MIT"
] | null | null | null |
import random
import gym
import numpy as np
import tensorflow as tf
import os
from tensorflow import keras
from collections import deque
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
#from gym.cartpole.scores.score_logger import ScoreLogger
file_format = ".h5" # The file format that the model is saved as. If not h5, it would save as the SaveModel format, a tensorflow format
checkpoint_path = "./models/checkpoint"
# The checkpoint file (full filename would be 'checkpoint.h5') is saved every time the AI trains,
# which is every time the enviornment progresses one frame
save_path = "./models/save_run"
# The save_run file (full filename: 'save_run.h5') is updated (or created) every time a Run is completes.
# A run is completed when the 'terminal' variable is True, which is dictated by the OpenAI Gym library
# From what I've seen, a run is roughly 200 frames/steps, but I have not checked to see if that is
# constant or it depends on something else.
print(tf.config.list_physical_devices())
ENV_NAME = "MountainCar-v0"
# The ENV_NAME variable is the enviornment that the AI is training on. Check the OpenAI Gym website for the others.
# With this configuration, onmy [Box] (need to confirm name). Basically any enviornemnt that has a control scheme
# where there are no floating point values that scale the responce should work. I may assemble a list of the
# enviornments, but the best place to check is the OpenAI Gym website.
GAMMA = 0.95
LEARNING_RATE = 0.001
MEMORY_SIZE = 1000000000 #original: 1000000
BATCH_SIZE = 20
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995
class DQNSolver:
def __init__(self, observation_space, action_space):
self.exploration_rate = EXPLORATION_MAX
self.action_space = action_space
self.memory = deque(maxlen=MEMORY_SIZE)
self.model = Sequential()
self.model.add(Dense(24, input_shape=(observation_space,), activation="relu"))
self.model.add(Dense(24, activation="relu"))
self.model.add(Dense(self.action_space, activation="linear"))
#self.model.compile(optimizer="adam", loss="mse")#, lr=LEARNING_RATE)
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE), loss=tf.keras.losses.MeanSquaredError())
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done)) #looks to be a basic mem write
def act(self, state):
if np.random.rand() < self.exploration_rate:
return random.randrange(self.action_space) #random num gen
q_values = self.model.predict(state) #uses neural net to predict best action
return np.argmax(q_values[0])
def experience_replay(self):
if len(self.memory) < BATCH_SIZE:
return
batch = random.sample(self.memory, BATCH_SIZE)
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0])) #Updates the q table
q_values = self.model.predict(state)
q_values[0][action] = q_update
self.model.fit(state, q_values, verbose=0)
self.exploration_rate *= EXPLORATION_DECAY
self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)
def cartpole():
env = gym.make(ENV_NAME)
#score_logger = ScoreLogger(ENV_NAME)
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
dqn_solver = DQNSolver(observation_space, action_space)
run = 0
while True:
run += 1
state = env.reset()
state = np.reshape(state, [1, observation_space])
step = 0
while True:
step += 1
print("step: " + str(step))
env.render()
action = dqn_solver.act(state)
state_next, reward, terminal, info = env.step(action)
reward = reward if not terminal else -reward
state_next = np.reshape(state_next, [1, observation_space])
dqn_solver.remember(state, action, reward, state_next, terminal)
state = state_next
mem = dqn_solver.memory[step-1]
#print(mem)
save = tf.keras.Model(dqn_solver)
#print(save)
tf.keras.models.save_model(dqn_solver.model, checkpoint_path + file_format)
if terminal:
print("Run: " + str(run) + ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step))
tf.keras.models.save_model(dqn_solver.model, save_path + str(run) + file_format)
#score_logger.add_score(step, run)
break
dqn_solver.experience_replay()
if __name__ == "__main__":
cartpole()
| 40.917355
| 135
| 0.68168
|
acfce4f8296bd9dbe569b8ff0f5288836318909a
| 23,586
|
py
|
Python
|
stake-pool/py/stake_pool/actions.py
|
biw/solana-program-library
|
5611ad8bd595d9e3666f8b115cd28f8116038645
|
[
"Apache-2.0"
] | 3
|
2022-03-04T18:18:53.000Z
|
2022-03-04T18:24:37.000Z
|
stake-pool/py/stake_pool/actions.py
|
biw/solana-program-library
|
5611ad8bd595d9e3666f8b115cd28f8116038645
|
[
"Apache-2.0"
] | 78
|
2021-12-21T13:13:19.000Z
|
2022-03-29T08:31:44.000Z
|
stake-pool/py/stake_pool/actions.py
|
biw/solana-program-library
|
5611ad8bd595d9e3666f8b115cd28f8116038645
|
[
"Apache-2.0"
] | 2
|
2022-03-23T14:29:32.000Z
|
2022-03-24T01:07:13.000Z
|
from typing import Tuple
from solana.keypair import Keypair
from solana.publickey import PublicKey
from solana.rpc.async_api import AsyncClient
from solana.rpc.commitment import Confirmed
from solana.rpc.types import TxOpts
from solana.sysvar import SYSVAR_CLOCK_PUBKEY, SYSVAR_RENT_PUBKEY, SYSVAR_STAKE_HISTORY_PUBKEY
from solana.transaction import Transaction
import solana.system_program as sys
from spl.token.constants import TOKEN_PROGRAM_ID
from stake.constants import STAKE_PROGRAM_ID, STAKE_LEN, SYSVAR_STAKE_CONFIG_ID
import stake.instructions as st
from stake.state import StakeAuthorize
from stake_pool.constants import \
MAX_VALIDATORS_TO_UPDATE, \
STAKE_POOL_PROGRAM_ID, \
find_stake_program_address, \
find_transient_stake_program_address, \
find_withdraw_authority_program_address
from stake_pool.state import STAKE_POOL_LAYOUT, ValidatorList, Fee, StakePool
import stake_pool.instructions as sp
from stake.actions import create_stake
from spl_token.actions import create_mint, create_associated_token_account
async def create(client: AsyncClient, manager: Keypair,
stake_pool: Keypair, validator_list: Keypair,
pool_mint: PublicKey, reserve_stake: PublicKey,
manager_fee_account: PublicKey, fee: Fee, referral_fee: int):
resp = await client.get_minimum_balance_for_rent_exemption(STAKE_POOL_LAYOUT.sizeof())
pool_balance = resp['result']
txn = Transaction()
txn.add(
sys.create_account(
sys.CreateAccountParams(
from_pubkey=manager.public_key,
new_account_pubkey=stake_pool.public_key,
lamports=pool_balance,
space=STAKE_POOL_LAYOUT.sizeof(),
program_id=STAKE_POOL_PROGRAM_ID,
)
)
)
max_validators = 2950 # current supported max by the program, go big!
validator_list_size = ValidatorList.calculate_validator_list_size(max_validators)
resp = await client.get_minimum_balance_for_rent_exemption(validator_list_size)
validator_list_balance = resp['result']
txn.add(
sys.create_account(
sys.CreateAccountParams(
from_pubkey=manager.public_key,
new_account_pubkey=validator_list.public_key,
lamports=validator_list_balance,
space=validator_list_size,
program_id=STAKE_POOL_PROGRAM_ID,
)
)
)
await client.send_transaction(
txn, manager, stake_pool, validator_list, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
(withdraw_authority, seed) = find_withdraw_authority_program_address(
STAKE_POOL_PROGRAM_ID, stake_pool.public_key)
txn = Transaction()
txn.add(
sp.initialize(
sp.InitializeParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool.public_key,
manager=manager.public_key,
staker=manager.public_key,
withdraw_authority=withdraw_authority,
validator_list=validator_list.public_key,
reserve_stake=reserve_stake,
pool_mint=pool_mint,
manager_fee_account=manager_fee_account,
token_program_id=TOKEN_PROGRAM_ID,
epoch_fee=fee,
withdrawal_fee=fee,
deposit_fee=fee,
referral_fee=referral_fee,
max_validators=max_validators,
)
)
)
await client.send_transaction(
txn, manager, validator_list, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def create_all(client: AsyncClient, manager: Keypair, fee: Fee, referral_fee: int) -> Tuple[PublicKey, PublicKey]:
stake_pool = Keypair()
validator_list = Keypair()
(pool_withdraw_authority, seed) = find_withdraw_authority_program_address(
STAKE_POOL_PROGRAM_ID, stake_pool.public_key)
reserve_stake = Keypair()
await create_stake(client, manager, reserve_stake, pool_withdraw_authority, 1)
pool_mint = Keypair()
await create_mint(client, manager, pool_mint, pool_withdraw_authority)
manager_fee_account = await create_associated_token_account(
client,
manager,
manager.public_key,
pool_mint.public_key,
)
fee = Fee(numerator=1, denominator=1000)
referral_fee = 20
await create(
client, manager, stake_pool, validator_list, pool_mint.public_key,
reserve_stake.public_key, manager_fee_account, fee, referral_fee)
return (stake_pool.public_key, validator_list.public_key)
async def add_validator_to_pool(
client: AsyncClient, funder: Keypair,
stake_pool_address: PublicKey, validator: PublicKey
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
txn = Transaction()
txn.add(
sp.add_validator_to_pool_with_vote(
STAKE_POOL_PROGRAM_ID,
stake_pool_address,
stake_pool.staker,
stake_pool.validator_list,
funder.public_key,
validator,
)
)
await client.send_transaction(
txn, funder, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def remove_validator_from_pool(
client: AsyncClient, staker: Keypair,
stake_pool_address: PublicKey, validator: PublicKey
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
resp = await client.get_account_info(stake_pool.validator_list, commitment=Confirmed)
data = resp['result']['value']['data']
validator_list = ValidatorList.decode(data[0], data[1])
validator_info = next(x for x in validator_list.validators if x.vote_account_address == validator)
destination_stake = Keypair()
txn = Transaction()
txn.add(
sys.create_account(
sys.CreateAccountParams(
from_pubkey=staker.public_key,
new_account_pubkey=destination_stake.public_key,
lamports=0, # will get filled by split
space=STAKE_LEN,
program_id=STAKE_PROGRAM_ID,
)
)
)
txn.add(
sp.remove_validator_from_pool_with_vote(
STAKE_POOL_PROGRAM_ID,
stake_pool_address,
stake_pool.staker,
stake_pool.validator_list,
staker.public_key,
validator,
validator_info.transient_seed_suffix_start,
destination_stake.public_key
)
)
await client.send_transaction(
txn, staker, destination_stake,
opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def deposit_sol(
client: AsyncClient, funder: Keypair, stake_pool_address: PublicKey,
destination_token_account: PublicKey, amount: int,
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
(withdraw_authority, seed) = find_withdraw_authority_program_address(STAKE_POOL_PROGRAM_ID, stake_pool_address)
txn = Transaction()
txn.add(
sp.deposit_sol(
sp.DepositSolParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
withdraw_authority=withdraw_authority,
reserve_stake=stake_pool.reserve_stake,
funding_account=funder.public_key,
destination_pool_account=destination_token_account,
manager_fee_account=stake_pool.manager_fee_account,
referral_pool_account=destination_token_account,
pool_mint=stake_pool.pool_mint,
system_program_id=sys.SYS_PROGRAM_ID,
token_program_id=stake_pool.token_program_id,
amount=amount,
deposit_authority=None,
)
)
)
await client.send_transaction(
txn, funder, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def withdraw_sol(
client: AsyncClient, owner: Keypair, source_token_account: PublicKey,
stake_pool_address: PublicKey, destination_system_account: PublicKey, amount: int,
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
(withdraw_authority, seed) = find_withdraw_authority_program_address(STAKE_POOL_PROGRAM_ID, stake_pool_address)
txn = Transaction()
txn.add(
sp.withdraw_sol(
sp.WithdrawSolParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
withdraw_authority=withdraw_authority,
source_transfer_authority=owner.public_key,
source_pool_account=source_token_account,
reserve_stake=stake_pool.reserve_stake,
destination_system_account=destination_system_account,
manager_fee_account=stake_pool.manager_fee_account,
pool_mint=stake_pool.pool_mint,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
stake_history_sysvar=SYSVAR_STAKE_HISTORY_PUBKEY,
stake_program_id=STAKE_PROGRAM_ID,
token_program_id=stake_pool.token_program_id,
amount=amount,
sol_withdraw_authority=None,
)
)
)
await client.send_transaction(
txn, owner, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def deposit_stake(
client: AsyncClient,
deposit_stake_authority: Keypair,
stake_pool_address: PublicKey,
validator_vote: PublicKey,
deposit_stake: PublicKey,
destination_pool_account: PublicKey,
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
(withdraw_authority, _) = find_withdraw_authority_program_address(STAKE_POOL_PROGRAM_ID, stake_pool_address)
(validator_stake, _) = find_stake_program_address(
STAKE_POOL_PROGRAM_ID,
validator_vote,
stake_pool_address,
)
txn = Transaction()
txn.add(
st.authorize(
st.AuthorizeParams(
stake=deposit_stake,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
authority=deposit_stake_authority.public_key,
new_authority=stake_pool.stake_deposit_authority,
stake_authorize=StakeAuthorize.STAKER,
)
)
)
txn.add(
st.authorize(
st.AuthorizeParams(
stake=deposit_stake,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
authority=deposit_stake_authority.public_key,
new_authority=stake_pool.stake_deposit_authority,
stake_authorize=StakeAuthorize.WITHDRAWER,
)
)
)
txn.add(
sp.deposit_stake(
sp.DepositStakeParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
validator_list=stake_pool.validator_list,
deposit_authority=stake_pool.stake_deposit_authority,
withdraw_authority=withdraw_authority,
deposit_stake=deposit_stake,
validator_stake=validator_stake,
reserve_stake=stake_pool.reserve_stake,
destination_pool_account=destination_pool_account,
manager_fee_account=stake_pool.manager_fee_account,
referral_pool_account=destination_pool_account,
pool_mint=stake_pool.pool_mint,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
stake_history_sysvar=SYSVAR_STAKE_HISTORY_PUBKEY,
token_program_id=stake_pool.token_program_id,
stake_program_id=STAKE_PROGRAM_ID,
)
)
)
await client.send_transaction(
txn, deposit_stake_authority, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def withdraw_stake(
client: AsyncClient,
payer: Keypair,
source_transfer_authority: Keypair,
destination_stake: Keypair,
stake_pool_address: PublicKey,
validator_vote: PublicKey,
destination_stake_authority: PublicKey,
source_pool_account: PublicKey,
amount: int,
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
(withdraw_authority, _) = find_withdraw_authority_program_address(STAKE_POOL_PROGRAM_ID, stake_pool_address)
(validator_stake, _) = find_stake_program_address(
STAKE_POOL_PROGRAM_ID,
validator_vote,
stake_pool_address,
)
resp = await client.get_minimum_balance_for_rent_exemption(STAKE_LEN)
stake_rent_exemption = resp['result']
txn = Transaction()
txn.add(
sys.create_account(
sys.CreateAccountParams(
from_pubkey=payer.public_key,
new_account_pubkey=destination_stake.public_key,
lamports=stake_rent_exemption,
space=STAKE_LEN,
program_id=STAKE_PROGRAM_ID,
)
)
)
txn.add(
sp.withdraw_stake(
sp.WithdrawStakeParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
validator_list=stake_pool.validator_list,
withdraw_authority=withdraw_authority,
validator_stake=validator_stake,
destination_stake=destination_stake.public_key,
destination_stake_authority=destination_stake_authority,
source_transfer_authority=source_transfer_authority.public_key,
source_pool_account=source_pool_account,
manager_fee_account=stake_pool.manager_fee_account,
pool_mint=stake_pool.pool_mint,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
token_program_id=stake_pool.token_program_id,
stake_program_id=STAKE_PROGRAM_ID,
amount=amount,
)
)
)
signers = [payer, source_transfer_authority, destination_stake] \
if payer != source_transfer_authority else [payer, destination_stake]
await client.send_transaction(
txn, *signers, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def update_stake_pool(client: AsyncClient, payer: Keypair, stake_pool_address: PublicKey):
"""Create and send all instructions to completely update a stake pool after epoch change."""
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
resp = await client.get_account_info(stake_pool.validator_list, commitment=Confirmed)
data = resp['result']['value']['data']
validator_list = ValidatorList.decode(data[0], data[1])
(withdraw_authority, seed) = find_withdraw_authority_program_address(STAKE_POOL_PROGRAM_ID, stake_pool_address)
update_list_instructions = []
validator_chunks = [
validator_list.validators[i:i+MAX_VALIDATORS_TO_UPDATE]
for i in range(0, len(validator_list.validators), MAX_VALIDATORS_TO_UPDATE)
]
start_index = 0
for validator_chunk in validator_chunks:
validator_and_transient_stake_pairs = []
for validator in validator_chunk:
(validator_stake_address, _) = find_stake_program_address(
STAKE_POOL_PROGRAM_ID,
validator.vote_account_address,
stake_pool_address,
)
validator_and_transient_stake_pairs.append(validator_stake_address)
(transient_stake_address, _) = find_transient_stake_program_address(
STAKE_POOL_PROGRAM_ID,
validator.vote_account_address,
stake_pool_address,
validator.transient_seed_suffix_start,
)
validator_and_transient_stake_pairs.append(transient_stake_address)
update_list_instructions.append(
sp.update_validator_list_balance(
sp.UpdateValidatorListBalanceParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
withdraw_authority=withdraw_authority,
validator_list=stake_pool.validator_list,
reserve_stake=stake_pool.reserve_stake,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
stake_history_sysvar=SYSVAR_STAKE_HISTORY_PUBKEY,
stake_program_id=STAKE_PROGRAM_ID,
validator_and_transient_stake_pairs=validator_and_transient_stake_pairs,
start_index=start_index,
no_merge=False,
)
)
)
start_index += MAX_VALIDATORS_TO_UPDATE
if update_list_instructions:
last_instruction = update_list_instructions.pop()
for update_list_instruction in update_list_instructions:
txn = Transaction()
txn.add(update_list_instruction)
await client.send_transaction(
txn, payer, opts=TxOpts(skip_confirmation=True, preflight_commitment=Confirmed))
txn = Transaction()
txn.add(last_instruction)
await client.send_transaction(
txn, payer, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
txn = Transaction()
txn.add(
sp.update_stake_pool_balance(
sp.UpdateStakePoolBalanceParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
withdraw_authority=withdraw_authority,
validator_list=stake_pool.validator_list,
reserve_stake=stake_pool.reserve_stake,
manager_fee_account=stake_pool.manager_fee_account,
pool_mint=stake_pool.pool_mint,
token_program_id=stake_pool.token_program_id,
)
)
)
txn.add(
sp.cleanup_removed_validator_entries(
sp.CleanupRemovedValidatorEntriesParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
validator_list=stake_pool.validator_list,
)
)
)
await client.send_transaction(
txn, payer, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def increase_validator_stake(
client: AsyncClient, payer: Keypair, staker: Keypair, stake_pool_address: PublicKey,
validator_vote: PublicKey, lamports: int
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
resp = await client.get_account_info(stake_pool.validator_list, commitment=Confirmed)
data = resp['result']['value']['data']
validator_list = ValidatorList.decode(data[0], data[1])
(withdraw_authority, seed) = find_withdraw_authority_program_address(STAKE_POOL_PROGRAM_ID, stake_pool_address)
validator_info = next(x for x in validator_list.validators if x.vote_account_address == validator_vote)
transient_stake_seed = validator_info.transient_seed_suffix_start + 1 # bump up by one to avoid reuse
(transient_stake, _) = find_transient_stake_program_address(
STAKE_POOL_PROGRAM_ID,
validator_info.vote_account_address,
stake_pool_address,
transient_stake_seed,
)
txn = Transaction()
txn.add(
sp.increase_validator_stake(
sp.IncreaseValidatorStakeParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
staker=staker.public_key,
withdraw_authority=withdraw_authority,
validator_list=stake_pool.validator_list,
reserve_stake=stake_pool.reserve_stake,
transient_stake=transient_stake,
validator_vote=validator_vote,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
rent_sysvar=SYSVAR_RENT_PUBKEY,
stake_history_sysvar=SYSVAR_STAKE_HISTORY_PUBKEY,
stake_config_sysvar=SYSVAR_STAKE_CONFIG_ID,
system_program_id=sys.SYS_PROGRAM_ID,
stake_program_id=STAKE_PROGRAM_ID,
lamports=lamports,
transient_stake_seed=transient_stake_seed,
)
)
)
signers = [payer, staker] if payer != staker else [payer]
await client.send_transaction(
txn, *signers, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
async def decrease_validator_stake(
client: AsyncClient, payer: Keypair, staker: Keypair, stake_pool_address: PublicKey,
validator_vote: PublicKey, lamports: int
):
resp = await client.get_account_info(stake_pool_address, commitment=Confirmed)
data = resp['result']['value']['data']
stake_pool = StakePool.decode(data[0], data[1])
resp = await client.get_account_info(stake_pool.validator_list, commitment=Confirmed)
data = resp['result']['value']['data']
validator_list = ValidatorList.decode(data[0], data[1])
(withdraw_authority, seed) = find_withdraw_authority_program_address(STAKE_POOL_PROGRAM_ID, stake_pool_address)
validator_info = next(x for x in validator_list.validators if x.vote_account_address == validator_vote)
(validator_stake, _) = find_stake_program_address(
STAKE_POOL_PROGRAM_ID,
validator_info.vote_account_address,
stake_pool_address,
)
transient_stake_seed = validator_info.transient_seed_suffix_start + 1 # bump up by one to avoid reuse
(transient_stake, _) = find_transient_stake_program_address(
STAKE_POOL_PROGRAM_ID,
validator_info.vote_account_address,
stake_pool_address,
transient_stake_seed,
)
txn = Transaction()
txn.add(
sp.decrease_validator_stake(
sp.DecreaseValidatorStakeParams(
program_id=STAKE_POOL_PROGRAM_ID,
stake_pool=stake_pool_address,
staker=staker.public_key,
withdraw_authority=withdraw_authority,
validator_list=stake_pool.validator_list,
validator_stake=validator_stake,
transient_stake=transient_stake,
clock_sysvar=SYSVAR_CLOCK_PUBKEY,
rent_sysvar=SYSVAR_RENT_PUBKEY,
system_program_id=sys.SYS_PROGRAM_ID,
stake_program_id=STAKE_PROGRAM_ID,
lamports=lamports,
transient_stake_seed=transient_stake_seed,
)
)
)
signers = [payer, staker] if payer != staker else [payer]
await client.send_transaction(
txn, *signers, opts=TxOpts(skip_confirmation=False, preflight_commitment=Confirmed))
| 40.947917
| 120
| 0.675443
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.