hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e921adfc47af0014cf2b438fa5298e2ac03b8dd | 249 | py | Python | server.py | Anandsingh446/Docker-Jenkins-Demo | f439828e1a02a85675de784bb966642fc5f3d5c7 | [
"Apache-2.0"
] | null | null | null | server.py | Anandsingh446/Docker-Jenkins-Demo | f439828e1a02a85675de784bb966642fc5f3d5c7 | [
"Apache-2.0"
] | null | null | null | server.py | Anandsingh446/Docker-Jenkins-Demo | f439828e1a02a85675de784bb966642fc5f3d5c7 | [
"Apache-2.0"
] | null | null | null | from flask import Flask
PORT = 8000
MESSAGE = "I love you "
app = Flask(__name__)
@app.route("/")
def root():
result = MESSAGE.encode("utf-8")
return result
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=PORT)
| 14.647059 | 50 | 0.634538 |
649a9652033c1dbcb8184ea83fbaa13239c11684 | 16,101 | py | Python | tests/template_tests/filter_tests/test_urlize.py | Fak3/django | 1ae8014a0bbae0cc1d951c1ee0f7888b6141f582 | [
"PSF-2.0",
"BSD-3-Clause"
] | 19 | 2015-07-07T02:08:59.000Z | 2021-11-08T11:05:40.000Z | tests/template_tests/filter_tests/test_urlize.py | Fak3/django | 1ae8014a0bbae0cc1d951c1ee0f7888b6141f582 | [
"PSF-2.0",
"BSD-3-Clause"
] | 31 | 2018-08-26T14:01:16.000Z | 2018-10-19T07:35:57.000Z | tests/template_tests/filter_tests/test_urlize.py | Fak3/django | 1ae8014a0bbae0cc1d951c1ee0f7888b6141f582 | [
"PSF-2.0",
"BSD-3-Clause"
] | 145 | 2019-03-14T18:54:45.000Z | 2022-03-04T20:25:31.000Z | from django.template.defaultfilters import urlize
from django.test import SimpleTestCase
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from ..utils import setup
class UrlizeTests(SimpleTestCase):
@setup({'urlize01': '{% autoescape off %}{{ a|urlize }} {{ b|urlize }}{% endautoescape %}'})
def test_urlize01(self):
output = self.engine.render_to_string(
'urlize01',
{'a': 'http://example.com/?x=&y=', 'b': mark_safe('http://example.com?x=&y=<2>')},
)
self.assertEqual(
output,
'<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> '
'<a href="http://example.com?x=&y=%3C2%3E" rel="nofollow">http://example.com?x=&y=<2></a>'
)
@setup({'urlize02': '{{ a|urlize }} {{ b|urlize }}'})
def test_urlize02(self):
output = self.engine.render_to_string(
'urlize02',
{'a': "http://example.com/?x=&y=", 'b': mark_safe("http://example.com?x=&y=")},
)
self.assertEqual(
output,
'<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> '
'<a href="http://example.com?x=&y=" rel="nofollow">http://example.com?x=&y=</a>'
)
@setup({'urlize03': '{% autoescape off %}{{ a|urlize }}{% endautoescape %}'})
def test_urlize03(self):
output = self.engine.render_to_string('urlize03', {'a': mark_safe("a & b")})
self.assertEqual(output, 'a & b')
@setup({'urlize04': '{{ a|urlize }}'})
def test_urlize04(self):
output = self.engine.render_to_string('urlize04', {'a': mark_safe("a & b")})
self.assertEqual(output, 'a & b')
# This will lead to a nonsense result, but at least it won't be
# exploitable for XSS purposes when auto-escaping is on.
@setup({'urlize05': '{% autoescape off %}{{ a|urlize }}{% endautoescape %}'})
def test_urlize05(self):
output = self.engine.render_to_string('urlize05', {'a': "<script>alert('foo')</script>"})
self.assertEqual(output, "<script>alert('foo')</script>")
@setup({'urlize06': '{{ a|urlize }}'})
def test_urlize06(self):
output = self.engine.render_to_string('urlize06', {'a': "<script>alert('foo')</script>"})
self.assertEqual(output, '<script>alert('foo')</script>')
# mailto: testing for urlize
@setup({'urlize07': '{{ a|urlize }}'})
def test_urlize07(self):
output = self.engine.render_to_string('urlize07', {'a': "Email me at me@example.com"})
self.assertEqual(
output,
'Email me at <a href="mailto:me@example.com">me@example.com</a>',
)
@setup({'urlize08': '{{ a|urlize }}'})
def test_urlize08(self):
output = self.engine.render_to_string('urlize08', {'a': "Email me at <me@example.com>"})
self.assertEqual(
output,
'Email me at <<a href="mailto:me@example.com">me@example.com</a>>',
)
@setup({'urlize09': '{% autoescape off %}{{ a|urlize }}{% endautoescape %}'})
def test_urlize09(self):
output = self.engine.render_to_string('urlize09', {'a': "http://example.com/?x=&y=<2>"})
self.assertEqual(
output,
'<a href="http://example.com/?x=&y=%3C2%3E" rel="nofollow">http://example.com/?x=&y=<2></a>',
)
class FunctionTests(SimpleTestCase):
def test_urls(self):
self.assertEqual(
urlize('http://google.com'),
'<a href="http://google.com" rel="nofollow">http://google.com</a>',
)
self.assertEqual(
urlize('http://google.com/'),
'<a href="http://google.com/" rel="nofollow">http://google.com/</a>',
)
self.assertEqual(
urlize('www.google.com'),
'<a href="http://www.google.com" rel="nofollow">www.google.com</a>',
)
self.assertEqual(
urlize('djangoproject.org'),
'<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>',
)
self.assertEqual(
urlize('djangoproject.org/'),
'<a href="http://djangoproject.org/" rel="nofollow">djangoproject.org/</a>',
)
def test_url_split_chars(self):
# Quotes (single and double) and angle brackets shouldn't be considered
# part of URLs.
self.assertEqual(
urlize('www.server.com"abc'),
'<a href="http://www.server.com" rel="nofollow">www.server.com</a>"abc',
)
self.assertEqual(
urlize('www.server.com\'abc'),
'<a href="http://www.server.com" rel="nofollow">www.server.com</a>'abc',
)
self.assertEqual(
urlize('www.server.com<abc'),
'<a href="http://www.server.com" rel="nofollow">www.server.com</a><abc',
)
self.assertEqual(
urlize('www.server.com>abc'),
'<a href="http://www.server.com" rel="nofollow">www.server.com</a>>abc',
)
def test_email(self):
self.assertEqual(
urlize('info@djangoproject.org'),
'<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>',
)
def test_word_with_dot(self):
self.assertEqual(urlize('some.organization'), 'some.organization'),
def test_https(self):
self.assertEqual(
urlize('https://google.com'),
'<a href="https://google.com" rel="nofollow">https://google.com</a>',
)
def test_quoting(self):
"""
#9655 - Check urlize doesn't overquote already quoted urls. The
teststring is the urlquoted version of 'http://hi.baidu.com/重新开始'
"""
self.assertEqual(
urlize('http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B'),
'<a href="http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B" rel="nofollow">'
'http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B</a>',
)
def test_urlencoded(self):
self.assertEqual(
urlize('www.mystore.com/30%OffCoupons!'),
'<a href="http://www.mystore.com/30%25OffCoupons" rel="nofollow">'
'www.mystore.com/30%OffCoupons</a>!',
)
self.assertEqual(
urlize('https://en.wikipedia.org/wiki/Caf%C3%A9'),
'<a href="https://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
'https://en.wikipedia.org/wiki/Caf%C3%A9</a>',
)
def test_unicode(self):
self.assertEqual(
urlize('https://en.wikipedia.org/wiki/Café'),
'<a href="https://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
'https://en.wikipedia.org/wiki/Café</a>',
)
def test_parenthesis(self):
"""
#11911 - Check urlize keeps balanced parentheses
"""
self.assertEqual(
urlize('https://en.wikipedia.org/wiki/Django_(web_framework)'),
'<a href="https://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
'https://en.wikipedia.org/wiki/Django_(web_framework)</a>',
)
self.assertEqual(
urlize('(see https://en.wikipedia.org/wiki/Django_(web_framework))'),
'(see <a href="https://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
'https://en.wikipedia.org/wiki/Django_(web_framework)</a>)',
)
def test_nofollow(self):
"""
#12183 - Check urlize adds nofollow properly - see #12183
"""
self.assertEqual(
urlize('foo@bar.com or www.bar.com'),
'<a href="mailto:foo@bar.com">foo@bar.com</a> or '
'<a href="http://www.bar.com" rel="nofollow">www.bar.com</a>',
)
def test_idn(self):
"""
#13704 - Check urlize handles IDN correctly
"""
self.assertEqual(urlize('http://c✶.ws'), '<a href="http://xn--c-lgq.ws" rel="nofollow">http://c✶.ws</a>')
self.assertEqual(urlize('www.c✶.ws'), '<a href="http://www.xn--c-lgq.ws" rel="nofollow">www.c✶.ws</a>')
self.assertEqual(urlize('c✶.org'), '<a href="http://xn--c-lgq.org" rel="nofollow">c✶.org</a>')
self.assertEqual(urlize('info@c✶.org'), '<a href="mailto:info@xn--c-lgq.org">info@c✶.org</a>')
def test_malformed(self):
"""
#16395 - Check urlize doesn't highlight malformed URIs
"""
self.assertEqual(urlize('http:///www.google.com'), 'http:///www.google.com')
self.assertEqual(urlize('http://.google.com'), 'http://.google.com')
self.assertEqual(urlize('http://@foo.com'), 'http://@foo.com')
def test_tlds(self):
"""
#16656 - Check urlize accepts more TLDs
"""
self.assertEqual(urlize('usa.gov'), '<a href="http://usa.gov" rel="nofollow">usa.gov</a>')
def test_invalid_email(self):
"""
#17592 - Check urlize don't crash on invalid email with dot-starting
domain
"""
self.assertEqual(urlize('email@.stream.ru'), 'email@.stream.ru')
def test_uppercase(self):
"""
#18071 - Check urlize accepts uppercased URL schemes
"""
self.assertEqual(
urlize('HTTPS://github.com/'),
'<a href="https://github.com/" rel="nofollow">HTTPS://github.com/</a>',
)
def test_trailing_period(self):
"""
#18644 - Check urlize trims trailing period when followed by parenthesis
"""
self.assertEqual(
urlize('(Go to http://www.example.com/foo.)'),
'(Go to <a href="http://www.example.com/foo" rel="nofollow">http://www.example.com/foo</a>.)',
)
def test_trailing_multiple_punctuation(self):
self.assertEqual(
urlize('A test http://testing.com/example..'),
'A test <a href="http://testing.com/example" rel="nofollow">http://testing.com/example</a>..'
)
self.assertEqual(
urlize('A test http://testing.com/example!!'),
'A test <a href="http://testing.com/example" rel="nofollow">http://testing.com/example</a>!!'
)
self.assertEqual(
urlize('A test http://testing.com/example!!!'),
'A test <a href="http://testing.com/example" rel="nofollow">http://testing.com/example</a>!!!'
)
self.assertEqual(
urlize('A test http://testing.com/example.,:;)"!'),
'A test <a href="http://testing.com/example" rel="nofollow">http://testing.com/example</a>.,:;)"!'
)
def test_brackets(self):
"""
#19070 - Check urlize handles brackets properly
"""
self.assertEqual(
urlize('[see www.example.com]'),
'[see <a href="http://www.example.com" rel="nofollow">www.example.com</a>]',
)
self.assertEqual(
urlize('see test[at[example.com'),
'see <a href="http://test[at[example.com" rel="nofollow">test[at[example.com</a>',
)
self.assertEqual(
urlize('[http://168.192.0.1](http://168.192.0.1)'),
'[<a href="http://168.192.0.1](http://168.192.0.1)" rel="nofollow">'
'http://168.192.0.1](http://168.192.0.1)</a>',
)
def test_wrapping_characters(self):
wrapping_chars = (
('()', ('(', ')')),
('<>', ('<', '>')),
('[]', ('[', ']')),
('""', ('"', '"')),
("''", (''', ''')),
)
for wrapping_in, (start_out, end_out) in wrapping_chars:
with self.subTest(wrapping_in=wrapping_in):
start_in, end_in = wrapping_in
self.assertEqual(
urlize(start_in + 'https://www.example.org/' + end_in),
start_out +
'<a href="https://www.example.org/" rel="nofollow">https://www.example.org/</a>' +
end_out,
)
def test_ipv4(self):
self.assertEqual(
urlize('http://192.168.0.15/api/9'),
'<a href="http://192.168.0.15/api/9" rel="nofollow">http://192.168.0.15/api/9</a>',
)
def test_ipv6(self):
self.assertEqual(
urlize('http://[2001:db8:cafe::2]/api/9'),
'<a href="http://[2001:db8:cafe::2]/api/9" rel="nofollow">http://[2001:db8:cafe::2]/api/9</a>',
)
def test_quotation_marks(self):
"""
#20364 - Check urlize correctly include quotation marks in links
"""
self.assertEqual(
urlize('before "hi@example.com" afterwards', autoescape=False),
'before "<a href="mailto:hi@example.com">hi@example.com</a>" afterwards',
)
self.assertEqual(
urlize('before hi@example.com" afterwards', autoescape=False),
'before <a href="mailto:hi@example.com">hi@example.com</a>" afterwards',
)
self.assertEqual(
urlize('before "hi@example.com afterwards', autoescape=False),
'before "<a href="mailto:hi@example.com">hi@example.com</a> afterwards',
)
self.assertEqual(
urlize('before \'hi@example.com\' afterwards', autoescape=False),
'before \'<a href="mailto:hi@example.com">hi@example.com</a>\' afterwards',
)
self.assertEqual(
urlize('before hi@example.com\' afterwards', autoescape=False),
'before <a href="mailto:hi@example.com">hi@example.com</a>\' afterwards',
)
self.assertEqual(
urlize('before \'hi@example.com afterwards', autoescape=False),
'before \'<a href="mailto:hi@example.com">hi@example.com</a> afterwards',
)
def test_quote_commas(self):
"""
#20364 - Check urlize copes with commas following URLs in quotes
"""
self.assertEqual(
urlize('Email us at "hi@example.com", or phone us at +xx.yy', autoescape=False),
'Email us at "<a href="mailto:hi@example.com">hi@example.com</a>", or phone us at +xx.yy',
)
def test_exclamation_marks(self):
"""
#23715 - Check urlize correctly handles exclamation marks after TLDs
or query string
"""
self.assertEqual(
urlize('Go to djangoproject.com! and enjoy.'),
'Go to <a href="http://djangoproject.com" rel="nofollow">djangoproject.com</a>! and enjoy.',
)
self.assertEqual(
urlize('Search for google.com/?q=! and see.'),
'Search for <a href="http://google.com/?q=" rel="nofollow">google.com/?q=</a>! and see.',
)
self.assertEqual(
urlize('Search for google.com/?q=dj!`? and see.'),
'Search for <a href="http://google.com/?q=dj%21%60%3F" rel="nofollow">google.com/?q=dj!`?</a> and see.',
)
self.assertEqual(
urlize('Search for google.com/?q=dj!`?! and see.'),
'Search for <a href="http://google.com/?q=dj%21%60%3F" rel="nofollow">google.com/?q=dj!`?</a>! and see.',
)
def test_non_string_input(self):
self.assertEqual(urlize(123), '123')
def test_autoescape(self):
self.assertEqual(
urlize('foo<a href=" google.com ">bar</a>buz'),
'foo<a href=" <a href="http://google.com" rel="nofollow">google.com</a> ">bar</a>buz'
)
def test_autoescape_off(self):
self.assertEqual(
urlize('foo<a href=" google.com ">bar</a>buz', autoescape=False),
'foo<a href=" <a href="http://google.com" rel="nofollow">google.com</a> ">bar</a>buz',
)
def test_lazystring(self):
prepend_www = lazy(lambda url: 'www.' + url, str)
self.assertEqual(
urlize(prepend_www('google.com')),
'<a href="http://www.google.com" rel="nofollow">www.google.com</a>',
)
| 41.07398 | 119 | 0.55332 |
a9cf1083b0cacf1421a0b8e4d75c551af4d1472d | 2,578 | py | Python | src/spring-cloud/azext_spring_cloud/_client_factory.py | Sneezry/azure-cli-extensions | bd186fe31c8fbd8c8b945fb749349e7f243be532 | [
"MIT"
] | null | null | null | src/spring-cloud/azext_spring_cloud/_client_factory.py | Sneezry/azure-cli-extensions | bd186fe31c8fbd8c8b945fb749349e7f243be532 | [
"MIT"
] | null | null | null | src/spring-cloud/azext_spring_cloud/_client_factory.py | Sneezry/azure-cli-extensions | bd186fe31c8fbd8c8b945fb749349e7f243be532 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
from .vendored_sdks.appplatform.v2020_07_01 import AppPlatformManagementClient
from .vendored_sdks.appplatform.v2020_11_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20201101preview
)
from .vendored_sdks.appplatform.v2022_05_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20220501preview
)
from .vendored_sdks.appplatform.v2021_06_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20210601preview
)
from .vendored_sdks.appplatform.v2021_09_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20210901preview
)
def cf_spring_cloud_enterprise(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20220501preview)
def cf_spring_cloud(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient)
def cf_spring_cloud_20201101preview(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20201101preview)
def cf_spring_cloud_20210601preview(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20210601preview)
def cf_spring_cloud_20210901preview(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20210901preview)
def cf_resource_groups(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resource_groups
def cf_app_services(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).services
def cf_apps(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).apps
def cf_deployments(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).deployments
def cf_bindings(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).bindings
def cf_config_servers(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).config_servers
def cf_certificates(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).certificates
def cf_custom_domains(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).custom_domains
| 36.309859 | 94 | 0.766098 |
c64b92b33c02309900d1d3425e56ecb3a5affacc | 23,993 | py | Python | tensorflow/python/autograph/pyct/common_transformers/anf.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 56 | 2018-06-21T13:47:23.000Z | 2020-05-13T09:31:47.000Z | tensorflow/python/autograph/pyct/common_transformers/anf.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 6 | 2022-01-15T07:17:47.000Z | 2022-02-14T15:28:22.000Z | tensorflow/python/autograph/pyct/common_transformers/anf.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 15 | 2018-09-06T14:18:32.000Z | 2020-05-14T06:35:30.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion to A-normal form.
The general idea of A-normal form is that every intermediate value is
explicitly named with a variable. For more, see
https://en.wikipedia.org/wiki/A-normal_form.
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
import six
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
class DummyGensym(object):
"""A dumb gensym that suffixes a stem by sequential numbers from 1000."""
def __init__(self, ctx):
del ctx
# A proper implementation needs to account for:
# * ctx.info.namespace
# * all the symbols defined in the AST
# * the symbols generated so far
self._idx = 0
def new_name(self, stem='tmp'):
self._idx += 1
return stem + '_' + str(1000 + self._idx)
REPLACE = lambda _1, _2, _3: True
LEAVE = lambda _1, _2, _3: False
ANY = object()
class ASTEdgePattern(collections.namedtuple(
'ASTEdgePattern', ['parent', 'field', 'child'])):
"""A pattern defining a type of AST edge.
This consists of three components:
- The type of the parent node, checked with isinstance,
- The name of the field, checked with string equality, and
- The type of the child node, also checked with isinstance.
If all three match, the whole pattern is considered to match.
In all three slots, the special value `anf.ANY` is treated as "match
anything". The internal nodes are produced from the `gast` library rather
than the standard `ast` module, which may affect `isinstance` checks.
"""
__slots__ = ()
def matches(self, parent, field, child):
"""Computes whether this pattern matches the given edge."""
if self.parent is ANY or isinstance(parent, self.parent):
pass # OK
else:
return False
if self.field is ANY or field == self.field:
pass # OK
else:
return False
return self.child is ANY or isinstance(child, self.child)
class AnfTransformer(transformer.Base):
"""Performs the conversion to A-normal form (ANF)."""
# The algorithm is a postorder recursive tree walk. Any given node A may, in
# general, require creation of a series B of Assign statements, which compute
# and explicitly name the intermediate values needed to compute the value of
# A. If A was already a statement, it can be replaced with the sequence B +
# [A]. If A was an expression, B needs to be propagated up the tree until a
# statement is encountered. Since the `ast.NodeTransformer` framework makes
# no provision for subtraversals returning side information, this class
# accumulates the sequence B in an instance variable.
# The only other subtlety is that some Python statements (like `if`) have both
# expression fields (`test`) and statement list fields (`body` and `orelse`).
# Any additional assignments needed to name all the intermediate values in the
# `test` can be prepended to the `if` node, but assignments produced by
# processing the `body` and the `orelse` need to be kept together with them,
# and not accidentally lifted out of the `if`.
def __init__(self, ctx, config, gensym_source=None):
"""Creates an ANF transformer.
Args:
ctx: transformer.Context
config: Configuration
gensym_source: An optional object with the same interface as `DummyGensym`
for generating unique names
"""
super(AnfTransformer, self).__init__(ctx)
if config is None:
# These could be pulled out, but are generally considered to already be in
# A-normal form. Thus they are left in by default, but could be pulled
# out if the configuration calls for it.
try:
# TODO(b/140808434): Fix this.
# gast pre-0.3
literal_node_types = (
gast.Num, gast.Str, gast.Bytes, gast.NameConstant,
gast.Name # Name is here to cover True, False, and None in Python 2
)
except AttributeError:
# gast 0.3+
literal_node_types = (
gast.Constant,
gast.Name # Name is here to cover True, False, and None in Python 2
)
self._overrides = [
(ASTEdgePattern(ANY, ANY, literal_node_types), LEAVE),
(ASTEdgePattern(ANY, ANY, gast.expr), REPLACE)]
else:
self._overrides = config
if gensym_source is None:
self._gensym = DummyGensym(ctx)
else:
self._gensym = gensym_source(ctx)
self._pending_statements = []
def _consume_pending_statements(self):
ans = self._pending_statements
self._pending_statements = []
return ans
def _add_pending_statement(self, stmt):
self._pending_statements.append(stmt)
def _match(self, pattern, parent, field, child):
if pattern is ANY:
return True
else:
return pattern.matches(parent, field, child)
def _should_transform(self, parent, field, child):
for pat, result in self._overrides:
if self._match(pat, parent, field, child):
return result(parent, field, child)
# Fell off the end of the pattern list: do not transform
return False
def _do_transform_node(self, node):
temp_name = self._gensym.new_name()
temp_assign = templates.replace(
'temp_name = expr', temp_name=temp_name, expr=node)[0]
self._add_pending_statement(temp_assign)
answer = templates.replace('temp_name', temp_name=temp_name)[0]
return answer
def _ensure_node_in_anf(self, parent, field, node):
"""Puts `node` in A-normal form, by replacing it with a variable if needed.
The exact definition of A-normal form is given by the configuration. The
parent and the incoming field name are only needed because the configuration
may be context-dependent.
Args:
parent: An AST node, the parent of `node`.
field: The field name under which `node` is the child of `parent`.
node: An AST node, potentially to be replaced with a variable reference.
Returns:
node: An AST node; the argument if transformation was not necessary,
or the new variable reference if it was.
"""
if node is None:
return node
if _is_trivial(node):
return node
if isinstance(node, list):
# If something's field was actually a list, e.g., variadic arguments.
return [self._ensure_node_in_anf(parent, field, n) for n in node]
if isinstance(node, gast.keyword):
node.value = self._ensure_node_in_anf(parent, field, node.value)
return node
if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):
# These nodes aren't really extractable in their own right, but their
# subnodes might be. Propagate the parent and field name to the child
# nodes, instead of querying the configuration for children of, e.g.,
# gast.Starred.
return self._ensure_fields_in_anf(node, parent, field)
if self._should_transform(parent, field, node):
return self._do_transform_node(node)
else:
return node
def _ensure_fields_in_anf(self, node, parent=None, super_field=None):
for field in node._fields:
if field.startswith('__'):
continue
parent_supplied = node if parent is None else parent
field_supplied = field if super_field is None else super_field
setattr(node, field, self._ensure_node_in_anf(
parent_supplied, field_supplied, getattr(node, field)))
return node
def _visit_strict_statement(self, node, children_ok_to_transform=True):
assert not self._pending_statements
node = self.generic_visit(node)
if children_ok_to_transform:
self._ensure_fields_in_anf(node)
results = self._consume_pending_statements()
results.append(node)
return results
def _visit_trivial_only_statement(self, node, msg):
assert not self._pending_statements
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
if self._pending_statements:
raise ValueError(msg)
else:
return node
def _visit_strict_expression(self, node):
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
return node
def _visit_trivial_only_expression(self, node, msg):
k = len(self._pending_statements)
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
# This check relies on there being no opportunities to consume pending
# statements while traversing children of an expression.
if len(self._pending_statements) != k:
raise ValueError(msg)
else:
return node
# Note on code order: These are listed in the same order as the grammar
# elements on https://github.com/serge-sans-paille/gast
# FunctionDef, AsyncFunctionDef, and ClassDef should be correct by default.
def visit_Return(self, node):
return self._visit_strict_statement(node)
def visit_Delete(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Assign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_AugAssign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Print(self, node):
return self._visit_strict_statement(node)
def visit_For(self, node):
assert not self._pending_statements
# It's important to visit node.iter first, because any statements created
# thereby need to live outside the body.
self.visit(node.iter)
node.iter = self._ensure_node_in_anf(node, 'iter', node.iter)
iter_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.iter, but that is correct because by
# this point the node.iter link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.iter, as
# then it may be large and will be uselessly transformed again. This
# behavior is what causes the documented effect that configuration callables
# may be invoked more than once of the same links; if the code is rewritten
# not to do that (anywhere), the docstring of `transform` should be updated.
node = self.generic_visit(node)
assert not self._pending_statements
iter_stmts.append(node)
return iter_stmts
def visit_AsyncFor(self, node):
msg = ('Nontrivial AsyncFor nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_While(self, node):
assert not self._pending_statements
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
if self._pending_statements:
msg = ('While with nontrivial test not supported yet '
'(need to avoid precomputing the test).')
raise ValueError(msg)
# If traversing node.test yielded no statements extracted, the generic visit
# will do the right thing.
return self.generic_visit(node)
def visit_If(self, node):
assert not self._pending_statements
# It's important to visit node.test first, because any statements created
# thereby need to live outside the body.
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
condition_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.test, but that is correct because by
# this point the node.test link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.test, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
condition_stmts.append(node)
return condition_stmts
def visit_With(self, node):
assert not self._pending_statements
# It's important to visit node.items first, because any statements created
# thereby need to live outside the body.
for item in node.items:
self.visit(item)
node.items = [self._ensure_node_in_anf(node, 'items', n)
for n in node.items]
contexts_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.items, but that is correct because by
# this point the node.items link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.items, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
contexts_stmts.append(node)
return contexts_stmts
def visit_AsyncWith(self, node):
msg = ('Nontrivial AsyncWith nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_Raise(self, node):
return self._visit_strict_statement(node)
# Try should be correct by default.
def visit_Assert(self, node):
msg = ('Nontrivial Assert nodes not supported yet '
'(need to avoid computing the test when assertions are off, and '
'avoid computing the irritant when the assertion does not fire).')
return self._visit_trivial_only_statement(node, msg)
# Import and ImportFrom should be correct by default.
def visit_Exec(self, node):
return self._visit_strict_statement(node)
# Global and Nonlocal should be correct by default.
def visit_Expr(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
# Pass, Break, and Continue should be correct by default.
def visit_BoolOp(self, node):
msg = ('Nontrivial BoolOp nodes not supported yet '
'(need to preserve short-circuiting semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_BinOp(self, node):
return self._visit_strict_expression(node)
def visit_UnaryOp(self, node):
return self._visit_strict_expression(node)
def visit_Lambda(self, node):
msg = ('Nontrivial Lambda nodes not supported '
'(cannot insert statements into lambda bodies).')
return self._visit_trivial_only_expression(node, msg)
def visit_IfExp(self, node):
msg = ('Nontrivial IfExp nodes not supported yet '
'(need to convert to If statement, to evaluate branches lazily '
'and insert statements into them).')
return self._visit_trivial_only_expression(node, msg)
def visit_Dict(self, node):
return self._visit_strict_expression(node)
def visit_Set(self, node):
return self._visit_strict_expression(node)
def visit_ListComp(self, node):
msg = ('ListComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_SetComp(self, node):
msg = ('SetComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_DictComp(self, node):
msg = ('DictComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_GeneratorExp(self, node):
msg = ('GeneratorExp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_Await(self, node):
msg = ('Nontrivial Await nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_Yield(self, node):
return self._visit_strict_expression(node)
def visit_YieldFrom(self, node):
msg = ('Nontrivial YieldFrom nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Compare(self, node):
if len(node.ops) > 1:
msg = ('Multi-ary compare nodes not supported yet '
'(need to preserve short-circuiting semantics).')
raise ValueError(msg)
return self._visit_strict_expression(node)
def visit_Call(self, node):
return self._visit_strict_expression(node)
def visit_Repr(self, node):
msg = ('Nontrivial Repr nodes not supported yet '
'(need to research their syntax and semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_FormattedValue(self, node):
msg = ('Nontrivial FormattedValue nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_JoinedStr(self, node):
msg = ('Nontrivial JoinedStr nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Attribute(self, node):
return self._visit_strict_expression(node)
def visit_Subscript(self, node):
return self._visit_strict_expression(node)
# Starred and Name are correct by default, because the right thing to do is to
# just recur.
def visit_List(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def visit_Tuple(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def _is_py2_name_constant(node):
return isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']
def _is_trivial(node):
"""Returns whether to consider the given node 'trivial'.
The definition of 'trivial' is a node that can't meaningfully be pulled out
into its own assignment statement.
This is surprisingly difficult to do robustly across versions of Python and
gast, as the parsing of constants has changed, if I may, constantly.
Args:
node: An AST node to check for triviality
Returns:
trivial: A Python `bool` indicating whether the node is trivial.
"""
trivial_node_types = (
# Variable names
gast.Name,
# Non-nodes that show up as AST fields
bool, six.string_types,
# Binary operators
gast.Add, gast.Sub, gast.Mult, gast.Div, gast.Mod, gast.Pow,
gast.LShift, gast.RShift, gast.BitOr, gast.BitXor, gast.BitAnd,
gast.FloorDiv,
# Unary operators
gast.Invert, gast.Not, gast.UAdd, gast.USub,
# Comparison operators
gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt, gast.GtE,
gast.Is, gast.IsNot, gast.In, gast.NotIn,
# Other leaf nodes that don't make sense standalone.
gast.expr_context,
)
if isinstance(node, trivial_node_types) and not _is_py2_name_constant(node):
return True
try:
# gast pre-0.3
if isinstance(node, gast.Ellipsis):
return True
except AttributeError:
# gast 0.3+
if isinstance(node, gast.Constant) and node.value == Ellipsis:
return True
return False
def transform(node, ctx, config=None, gensym_source=None):
"""Converts the given node to A-normal form (ANF).
The general idea of A-normal form: https://en.wikipedia.org/wiki/A-normal_form
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
What exactly should be considered A-normal form for any given programming
language is not completely obvious. The transformation defined here is
therefore configurable as to which syntax to replace with a fresh variable and
which to leave be. The configuration is intentionally flexible enough to
define very precise variable insertion transformations, should that be
desired.
The configuration is a list of syntax rules, each of which is a 2-tuple:
- An `ASTEdgePattern` (which see) defining a type of AST edge, and
- Whether to transform children of such edges.
The special object `anf.ANY` may be used as a pattern that matches all edges.
Each replacement directive is one of three possible things:
- The object `anf.REPLACE`, meaning "Replace this child node with a variable",
- The object `anf.LEAVE`, meaning "Do not replace this child node with a
variable", or
- A Python callable. If a callable, it is called with the parent node, the
field name, and the child node, and must compute a boolean indicating
whether to transform the child node or not. The callable is free to use
whatever context information it chooses. The callable may be invoked more
than once on the same link, and must produce the same answer each time.
The syntax rules are tested in order, and the first match governs. If no rule
matches, the node is not transformed.
The above rules notwithstanding,
- Variable references are never replaced with (fresh) variables, as that would
accomplish nothing.
- The left-hand children of Assign and AugAssign nodes, and the children of
Del nodes, are never replaced with variables, as that would break their
semantics.
- The right-hand children of Assign nodes are never replaced with variables,
as the original assignment would still have to be present in the result
to define the new variable. (That is, there's no point in transforming
`x = sin(y)` into `tmp = sin(y); x = tmp`.)
- The right-hand children of AugAssign nodes are never replaced with variables
either, but only because the difference from Assign was considered a
potential source of confusion (and it would have been slightly awkward in
the code to treat the RHS differently than the LHS).
- Various special-purpose AST nodes are not exposed to the configuration, lest
the transform produce invalid syntax like, e.g., `tmp = +; x = 1 tmp 2`.
For example, the configuration
```python
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
gives explicit fresh names to all expressions regardless of context (except as
outlined above), whereas
```python
[(anf.ASTEdgePattern(gast.If, "test", anf.ANY), anf.REPLACE)]
```
only transforms the conditionals of `if` statements (but not, e.g., `while`).
If no configuration is supplied, the default behavior is to transform all
expressions except literal constants, which is defined as a configuration as
```python
# For Python 3, and gast library versions before 0.3
literals = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant)
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, literals), anf.LEAVE),
(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
Args:
node: The node to transform.
ctx: transformer.EntityInfo. TODO(mdan): What information does this
argument provide?
config: Optional ANF configuration. If omitted, ANF replaces all expression
expect literal constants.
gensym_source: An optional object with the same interface as `DummyGensym`
for generating unique names.
"""
return AnfTransformer(ctx, config, gensym_source=gensym_source).visit(node)
| 38.886548 | 80 | 0.711958 |
9f440f2005183d7000319dcb42ee8c329df60fab | 2,879 | py | Python | md3.py | liuwuliuyun/meta-data-api | aa768ae2178734a81c3a07a43e07bfafc7d670a6 | [
"MIT"
] | null | null | null | md3.py | liuwuliuyun/meta-data-api | aa768ae2178734a81c3a07a43e07bfafc7d670a6 | [
"MIT"
] | null | null | null | md3.py | liuwuliuyun/meta-data-api | aa768ae2178734a81c3a07a43e07bfafc7d670a6 | [
"MIT"
] | null | null | null | import datetime
import socket
sock = socket.socket()
# Bind the socket to the port
server_address = ('localhost', 5559)
sock.bind(server_address)
sock.listen(5)
#data structure
file_dict = {'root' : ['user']}
file_stat = {}
header = '[MD3'+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+']: '
# TEST CODE COMMENT WHEN DEPLOY
# test_string = ['/user/s1/LiuYun', '/user/s1/Github', '/user/s1/LiuYun/myprofile.py', '/user/s1/system.ini']
def mkdir(s):
dir_list = s.split('/')
start_dir = 'root'
for dir_item in dir_list[1:]:
if start_dir in file_dict:
if dir_item not in file_dict[start_dir]:
file_dict[start_dir].append(dir_item)
else:
file_dict[start_dir] = [dir_item]
start_dir = start_dir+'/'+dir_item
return file_dict
def create_file(s):
mkdir(s)
file_stat[s] = {'Access Time': header, 'Block Size': '233KB'}
def readdir(s):
s = 'root' + s
result_list = _readdir(s)
result_list = list(set(result_list))
result_list.sort()
return result_list
def _readdir(s):
result_list = []
if s in file_dict:
result_list += [s+'/'+i for i in file_dict[s]]
for item in result_list:
result_list += _readdir(item)
return result_list
def stat(s):
if s in file_stat:
return file_stat[s]
else:
return None
def rmfile(s):
global file_dict, file_stat
name = s.split('/')[-1]
file_stat.pop(s)
for k, v in file_dict.items():
if name in v:
v.remove(name)
file_dict[k] = v
while True:
con, clt_addr = sock.accept()
rec_str = con.recv(1024).decode()
try:
command = rec_str.split(' ')[0]
try:
dir_str = rec_str.split(' ')[1]
except:
dir_str = ''
if command == 'ls':
out_str = readdir(dir_str)
con.send(str(out_str).encode())
elif command == 'stat':
out_str = stat(dir_str)
con.send(str(out_str).encode())
elif command == 'touch':
create_file(dir_str)
con.send('File creation succeed ...'.encode())
elif command == 'mkdir':
mkdir(dir_str)
con.send('Directory insertion succeed ...'.encode())
elif command == 'rm':
rmfile(dir_str)
con.send(('File ' + dir_str + ' removed ...').encode())
else:
con.send('Can not parse this command, plz check ...'.encode())
except:
con.send('Error occured when processing command, check before proceed ...'.encode())
con.close()
#TEST CODE COMMENT WHEN DEPLOY
# mkdir(test_string[0])
# mkdir(test_string[1])
# create_file(test_string[2])
# create_file(test_string[3])
# print(readdir('/user'))
# print(file_dict)
# rmfile(test_string[3])
# print(file_dict)
# print(readdir('/user'))
| 26.906542 | 109 | 0.585967 |
233e24d775e28618520d022a082f2da339dc09bb | 1,263 | py | Python | src/marketdata/main.py | hyperwave-research/hyperwave | 561d2225666f2190213184e464e8e9376c241b43 | [
"MIT"
] | null | null | null | src/marketdata/main.py | hyperwave-research/hyperwave | 561d2225666f2190213184e464e8e9376c241b43 | [
"MIT"
] | null | null | null | src/marketdata/main.py | hyperwave-research/hyperwave | 561d2225666f2190213184e464e8e9376c241b43 | [
"MIT"
] | null | null | null | import argparse
import logging
from pandas.core.common import SettingWithCopyWarning
from ._commands import commands
import os
import pandas as pd
import warnings
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
def get_argparse():
parser = argparse.ArgumentParser()
parser.add_argument("-v", help="Set verbose mode")
parent_parser = argparse.ArgumentParser(
description="Hyperwave finder", add_help=False
)
parent_parser.add_argument(
"-v", required=False, action="count", help="Set the logging mode to verbose"
)
subparsers = parser.add_subparsers()
for command in commands:
command(subparsers, [parent_parser])
return parser
def Main():
parser = get_argparse()
args = parser.parse_args()
if args.v is not None:
logging.basicConfig(level=logging.DEBUG)
# Set pandas display for pretty print
columns = 80
rows = 50
try:
columns, rows = os.get_terminal_size()
except:
pass
pd.set_option("display.max_rows", 1000)
pd.set_option("display.max_columns", 999)
pd.set_option("display.width", columns)
pd.set_option("display.precision", 4)
args.func(args)
if __name__ == "__main__":
Main()
| 21.775862 | 84 | 0.688836 |
ba695c878ca86ba91f1c4e08ea7d30f61301d849 | 115 | py | Python | arquivos.py | ravysoares/projetos | 1b43a54be90cb2382799f9479d2f6610d2ecc53f | [
"Apache-2.0"
] | null | null | null | arquivos.py | ravysoares/projetos | 1b43a54be90cb2382799f9479d2f6610d2ecc53f | [
"Apache-2.0"
] | null | null | null | arquivos.py | ravysoares/projetos | 1b43a54be90cb2382799f9479d2f6610d2ecc53f | [
"Apache-2.0"
] | null | null | null | arquivos = open('numeros.txt','w')
for linha in range(1,1000):
arquivos.write('%d\n'%linha)
arquivos.close()
| 16.428571 | 34 | 0.66087 |
b3964e07d0b086b6d768338adceeda980f93377a | 6,804 | py | Python | reg_wsi/register_multiplex_wsi.py | EDRN/slide-image-registration | 76dd0428d0c23c8a409219a0b6b7a1b247f56882 | [
"Apache-2.0"
] | null | null | null | reg_wsi/register_multiplex_wsi.py | EDRN/slide-image-registration | 76dd0428d0c23c8a409219a0b6b7a1b247f56882 | [
"Apache-2.0"
] | null | null | null | reg_wsi/register_multiplex_wsi.py | EDRN/slide-image-registration | 76dd0428d0c23c8a409219a0b6b7a1b247f56882 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
'''
"Copyright 2020–2021, by the California Institute of Technology. ALL RIGHTS RESERVED.
United States Government Sponsorship acknowledged. Any commercial use must be negotiated
with the Office of Technology Transfer at the California Institute of Technology.
This software may be subject to U.S. export control laws. By accepting this software,
the user agrees to comply with all applicable U.S. export laws and regulations.
User has the responsibility to obtain export licenses, or other export authority as may be
required before exporting such information to foreign countries or providing access to foreign persons."
IMAGE TRANSLATION FOR MULTIPLEXED WHOLE SLIDE IMAGES
A multiplexed image consists of multiple rounds of imaging.
Each round has a DAPI frame along with several antigens.
Frames in each round are assumed to be registered, but DAPI channels
may need to be registered across rounds.
Rounds are identified by the integer ROUND_ID in each filename, separated
by dot '.', prior to file type and marker. For example, the following filename
UNMCPC.LIV.3rf77.1.DAPI.tif
Round ID = 1
Marker = DAPI
Type = tif
All frames are expected in TIF format (with tif extension).
The script translates all DAPI channels across all rounds.
The DAPI frame of a random round is fixed and all other rounds with DAPI
and corresponding antigens are translated with respect to this round.
Input frames are read from 'in_folder' and output is written to 'out_folder'
Inconsistent frame sizes are handled by padding all frames to the larges frame size.
Multichannel RGB frames are converted to grayscale frames prior to registration.
'''
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from skimage.color import rgb2gray
from skimage.util import img_as_ubyte
from skimage.registration import phase_cross_correlation
import tifffile
import glob2
import os
import sys
import psutil
def pad_frame(f, xy):
# pad frames to the largest frame size
if f.shape[0] < xy[0]:
f = np.pad(f, [(0,xy[0]-f.shape[0]), (0,0)], mode='constant')
if f.shape[1] < xy[1]:
f = np.pad(f, [(0,0), (0,xy[1]-f.shape[1])], mode='constant')
return f
def get_max_frame_xy(tif_files):
mx = []
my = []
for f in tif_files:
tif = tifffile.TiffFile(f)
x, y = tif.pages[0].shape[0:2]
print(x,y,os.path.basename(f))
mx.append(x)
my.append(y)
print('\nOutput frame size:', max(mx), max(my))
return (max(mx), max(my))
def mem_check(in_folder):
# check available memory for two frames
mem = psutil.virtual_memory()
files = glob2.glob(os.path.join(in_folder, '*.tif'))
sizes = [os.path.getsize(f) for f in files]
msize = max(sizes)
if msize * 2 > mem.available * 0.80:
return False
else:
return True
def get_gray_frame(f, xy):
# read tif file
im = tifffile.imread(f)
im = np.squeeze(im)
# convert multi-channel images to gray
if len(im.shape) > 2:
im = img_as_ubyte(rgb2gray(im))
# pad frame to max frame size
im = pad_frame(im, xy)
return im
def main(in_folder, out_folder):
# check all frames in
tif_files = glob2.glob(os.path.join(in_folder, '*.tif'))
if not mem_check(in_folder):
print('Not enough memory.')
exit()
# rounds
rounds = list(set([os.path.basename(x).split('.')[-3] for x in tif_files]))
max_rounds = max(rounds)
print('\nImaging rounds:', rounds)
# get dapi frames in each round
dapis = [x for x in tif_files if os.path.basename(x).split('.')[-2].lower()=='dapi']
print('\nNumber of DAPI frames:', len(dapis))
if not str(len(dapis)) == max_rounds:
print('The number of DAPI frames does not match with the number of imaging rounds.')
exit()
if len(dapis) < 2:
print('At least two DAPI frames needed for registration.')
exit()
# antigens
antigens = dict()
for r in rounds:
a = [x for x in tif_files if os.path.basename(x).split('.')[-3]==r and not os.path.basename(x).split('.')[-2].lower()=='dapi']
antigens[r] = a
xy = get_max_frame_xy(tif_files)
# write fixed round to destination
fix_dapi = dapis.pop()
fix_id = os.path.basename(fix_dapi).split('.')[-3]
fix_frames = antigens[fix_id]
for f in fix_frames:
im = get_gray_frame(f, xy)
tifffile.imwrite(f.replace(in_folder,out_folder), data=im)
# read fixed dapi only once
im_fix = get_gray_frame(fix_dapi, xy)
# write fixed dapi frame
tifffile.imwrite(fix_dapi.replace(in_folder, out_folder), data=im_fix)
down_sample = max(im_fix.shape) // 10000 + 1
im_fix_small = im_fix[0::down_sample, 0::down_sample]
print('\nFixed DAPI:', im_fix.shape, os.path.basename(fix_dapi))
print('\nSample factor:', down_sample)
errors = dict()
for r in rounds:
errors[r] = 0
# loop through the rounds
while dapis:
mov_dapi = dapis.pop()
mov_id = os.path.basename(mov_dapi).split('.')[-3]
mov_frames = antigens[mov_id]
# read the moving frame
im_mov = get_gray_frame(mov_dapi, xy)
im_mov_small = im_mov[0::down_sample, 0::down_sample]
print('\nRegistering round:', mov_id)
shift, error, diffphase = phase_cross_correlation(im_fix_small, im_mov_small, upsample_factor=down_sample)
shift = shift * [down_sample, down_sample]
errors[mov_id] = error
print('error: {:.2f}'.format(error), 'phase: {:.4E}'.format(diffphase))
im_mov_shifted = np.roll(im_mov, shift.astype(int), [0,1])
print('Writing:', os.path.basename(mov_dapi))
tifffile.imwrite(mov_dapi.replace(in_folder, out_folder), data=im_mov_shifted)
# translate all non-dapi channels in this round
for f in mov_frames:
im = get_gray_frame(f, xy)
im = np.roll(im, shift.astype(int), [0,1])
print('Writing:', os.path.basename(f))
tifffile.imwrite(f.replace(in_folder, out_folder), data=im)
# print errors
print('\nDisplacements:')
for k, v in {k: v for k, v in sorted(errors.items(), key=lambda item: item[1], reverse=True)}.items():
print('Round', k, 'displacement: {:.2f}'.format(v))
if __name__ == '__main__':
log_file = os.path.join(sys.argv[2],'logfile.log')
print('\nProcess started. Output is directed to ', log_file)
sys.stdout = open(log_file, 'a')
print('\nInput directory:', sys.argv[1])
print('Output directory:', sys.argv[2])
main(sys.argv[1], sys.argv[2])
| 32.4 | 134 | 0.652263 |
a932bf1057a31c887ff262f4c4c0f7cc112aa24d | 12 | py | Python | backend/services/web/api/__init__.py | noasck/EduARd | f4a95a92d513b017ff2f0b0c3591207a741b1110 | [
"MIT"
] | 3 | 2021-04-16T14:37:47.000Z | 2021-06-28T21:13:50.000Z | backend/services/web/api/__init__.py | noasck/EduARd | f4a95a92d513b017ff2f0b0c3591207a741b1110 | [
"MIT"
] | 1 | 2021-04-17T14:45:59.000Z | 2021-04-17T14:45:59.000Z | backend/services/web/api/__init__.py | noasck/EduARd | f4a95a92d513b017ff2f0b0c3591207a741b1110 | [
"MIT"
] | null | null | null | LOADING = 1
| 6 | 11 | 0.666667 |
7dcc0dc3da57a9a89c543e039f1604a730cb5952 | 2,342 | py | Python | salt/states/pecl.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | 2 | 2019-03-30T02:12:56.000Z | 2021-03-08T18:59:46.000Z | salt/states/pecl.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | null | null | null | salt/states/pecl.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | null | null | null | '''
Installation of PHP pecl extensions.
==============================================
A state module to manage php pecl extensions.
.. code-block:: yaml
mongo:
pecl.installed
'''
def installed(
name,
version=None):
'''
Make sure that a pecl extension is installed.
name
The pecl extension name to install
version
The pecl extension version to install. This option may be
ignored to install the latest stable version.
'''
# Check to see if we have a designated version
if not isinstance(version, basestring) and version is not None:
version = str(version)
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
installed_pecls = __salt__['pecl.list']()
if name in installed_pecls:
# The package is only installed if version is absent or matches
if version is None or installed_pecls[name][0] == version:
ret['result'] = True
ret['comment'] = 'Pecl is already installed.'
return ret
else:
# Modify the name to include the version and proceed.
name = '{0}-{1}'.format(name, version)
if __opts__['test']:
ret['comment'] = 'The pecl {0} would have been installed'.format(name)
return ret
if __salt__['pecl.install'](name):
ret['result'] = True
ret['changes'][name] = 'Installed'
ret['comment'] = 'Pecl was successfully installed'
else:
ret['result'] = False
ret['comment'] = 'Could not install pecl.'
return ret
def removed(name):
'''
Make sure that a pecl extension is not installed.
name
The pecl extension name to uninstall
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if name not in __salt__['pecl.list']():
ret['result'] = True
ret['comment'] = 'Pecl is not installed.'
return ret
if __opts__['test']:
ret['comment'] = 'The pecl {0} would have been removed'.format(name)
return ret
if __salt__['pecl.uninstall'](name):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Pecl was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove pecl.'
return ret
| 28.216867 | 78 | 0.576857 |
d5fe3cf83444bc2539f94380045bdd00c67a7c23 | 2,620 | py | Python | Skin2PDF/mcskin2pdf.py | hreese/minecraft_utils | 7dc4a0316709143c2a4bc19a95faaf83c7b32556 | [
"Apache-2.0"
] | null | null | null | Skin2PDF/mcskin2pdf.py | hreese/minecraft_utils | 7dc4a0316709143c2a4bc19a95faaf83c7b32556 | [
"Apache-2.0"
] | null | null | null | Skin2PDF/mcskin2pdf.py | hreese/minecraft_utils | 7dc4a0316709143c2a4bc19a95faaf83c7b32556 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# get player's heasd skin and create a large (A3) printable PDF
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
from reportlab.lib.colors import Color
from reportlab.lib.pagesizes import A3,A4
import urllib2
import PIL
import sys
import io
from contextlib import closing
from collections import OrderedDict
usernames = ['MineSprawl', 'dividuum', 'aykura', 'Labanane', 'FelixW80', 'Aachthor', 'JulianX4', 'vanyarw']
# head coords
skin_bb = OrderedDict(
{
'head_left' : (0,8,8,16),
'head_front' : (8,8,16,16),
'head_right' : (16,8,24,16),
'head_back' : (24,8,32,16),
'head_top' : (8,0,16,8),
}
)
headgear = {
'head_left' : (32, 8, 40, 16),
'head_front' : (40, 8, 48, 16),
'head_right' : (48, 8, 56, 16),
'head_back' : (56, 8, 64, 16),
'head_top' : (40, 0, 48, 8),
}
what_to_render = [ 'head_front' ]
# download skin
def get_mc_avatar(name = "MineSprawl"):
with closing(urllib2.urlopen("http://skins.minecraft.net/MinecraftSkins/%s.png" % name)) as avatar_response:
return PIL.Image.open( io.BytesIO(avatar_response.read()) )
# 25x25 (A3)
def create_pdf(username):
c = canvas.Canvas("%s.pdf" % username, pagesize=A3)
# todo: set metadata
return c
def write_texture_to_page(c, texture, scalef=cm):
# compensate for different coordinate systems
texture = texture.transpose(PIL.Image.FLIP_TOP_BOTTOM)
for x in xrange(texture.size[0]):
for y in xrange(texture.size[1]):
(r,g,b,a) = texture.getpixel((x,y))
pixelcolor = Color(r/255., g/255., b/255., alpha=a/255.)
c.setFillColor( pixelcolor)
c.setStrokeColor( pixelcolor)
#print("Created rect at %d,%d with fill color %d,%d,%d" % (x, y, r, g, b))
c.rect(scalef*x, scalef*y, scalef, scalef, stroke=True, fill=True)
def close_pdf(c):
c.save()
for username in usernames:
i=get_mc_avatar(username)
c = create_pdf("large_head_%s" % username)
for side in what_to_render:
c.drawString(2*cm, A3[1]-2*cm, "%s - %s" % (username, side))
c.translate(2*cm, 5.5*cm)
write_texture_to_page(c, i.crop(skin_bb[side]), 3.125*cm)
if headgear.has_key(side):
write_texture_to_page(c, i.crop(headgear[side]), 3.125*cm)
c.showPage()
close_pdf(c)
#def main(argv):
# if len(argv) != 2:
# sys.stderr.write("Usage: %s MineCraftUserName\n" % argv[0])
# return 2
# else:
# username = argv[1]
# create_pdf(username)
#
#if __name__ == "__main__":
# sys.exit(main(sys.argv))
| 29.438202 | 112 | 0.625191 |
21c5306750b2a026a091dce4792b0bf3d6b60e0c | 6,475 | py | Python | pymanifest/__init__.py | claybrooks/pymanifest | 8be1297b7d57352fd91047afbcb33e8b5317c37b | [
"Unlicense"
] | null | null | null | pymanifest/__init__.py | claybrooks/pymanifest | 8be1297b7d57352fd91047afbcb33e8b5317c37b | [
"Unlicense"
] | null | null | null | pymanifest/__init__.py | claybrooks/pymanifest | 8be1297b7d57352fd91047afbcb33e8b5317c37b | [
"Unlicense"
] | null | null | null | import argparse
import os
import fnmatch
import logging
class ArgSet:
def __init__(
self, files,
directories,
recurse_directories,
manifests,
patterns
):
self.files = files
self.directories = directories
self.recurse_directories = recurse_directories
self.manifests = manifests
self.patterns = patterns
DEFAULT_ARG_MAP = {
'--file' : '--file',
'--directory' : '--directory',
'--recurse-directory' : '--recurse-directory',
'--manifest' : '--manifest',
'--exclude-file' : '--exclude-file',
'--exclude-directory' : '--exclude-directory',
'--exclude-recurse-directory' : '--exclude-recurse-directory',
'--exclude-manifest' : '--exclude-manifest',
'--pattern' : '--pattern',
'--exclude-pattern' : '--exclude-pattern',
}
def __mimic_ap_rename(arg):
if arg.startswith('--'):
arg = arg[2:]
arg = arg.replace('-', '_')
return arg
def add_args(ap: argparse.ArgumentParser, arg_map=None):
if arg_map is None:
arg_map = dict(DEFAULT_ARG_MAP)
ap.add_argument(
arg_map['--file'],
action='append',
default=[],
help='Full path to a file to include.'
)
ap.add_argument(
arg_map['--directory'],
action='append',
default=[],
help='Full path to a directory of files to include.'
)
ap.add_argument(
arg_map['--recurse-directory'],
action='append',
default=[],
help='Full path to a directory of files and subdirectories to include.'
)
ap.add_argument(
arg_map['--manifest'],
action='append',
default=[],
help='Full path to a file that contains newline delimited paths to files and directories to include.'
)
ap.add_argument(
arg_map['--exclude-file'],
action='append',
default=[],
help='Full path to file to exclude.'
)
ap.add_argument(
arg_map['--exclude-directory'],
action='append',
default=[],
help='Full path to a directory to exclude.'
)
ap.add_argument(
arg_map['--exclude-recurse-directory'],
action='append',
default=[],
help='Full path to a directory to exclude.'
)
ap.add_argument(
arg_map['--exclude-manifest'],
action='append',
default=[],
help='Full path to a file that contains newline delimited paths to files and directories to exclude.'
)
ap.add_argument(
arg_map['--pattern'],
action='append',
default=[],
help='Patterns of files to include.'
)
ap.add_argument(
arg_map['--exclude-pattern'],
action='append',
default=[],
help='Pattern of files to exclude.'
)
def __process_file(path, out, err):
path = os.path.realpath(path)
out.add(path) if os.path.isfile(path) else err.add(path)
def __process_directory(path, out, err):
path = os.path.realpath(path)
out.update([os.path.join(path, x) for x in next(os.walk(path))[2]]) if os.path.isdir(path) else err.add(path)
def __process_recurse_directory(path, out, err):
path = os.path.realpath(path)
if not os.path.isdir(path):
err.add(path)
else:
for dirpath, dirnames, filenames in os.walk(path):
out.update([os.path.join(dirpath, x) for x in filenames])
def __process_manifest(path, out, err):
if not os.path.isfile(path):
return
with open(path, 'r') as f:
for line in f:
line = line.strip()
if os.path.isfile(line):
__process_file(line, out, err)
elif os.path.isdir(line):
__process_directory(line, out, err)
else:
err.add(line)
def __process_patterns(fromList, filters):
matched = []
for filt in filters:
matched.extend(fnmatch.filter(fromList, filt))
return set(matched)
def __process_items(items, include, missing, processor):
for item in items:
processor(item, include, missing)
def __process_arg_set(arg_set):
files = set()
err_files = set()
err_directories = set()
__process_items(arg_set.files, files, err_files, __process_file)
__process_items(arg_set.directories, files, err_directories, __process_directory)
__process_items(arg_set.recurse_directories,files, err_directories, __process_recurse_directory)
__process_items(arg_set.manifests, files, err_files, __process_manifest)
return files, err_files, err_directories
def process_from_args(args, arg_map=None, fail_on_missing=False):
if arg_map is None:
arg_map = dict(DEFAULT_ARG_MAP)
attr_map = {}
for k,v in arg_map.items():
attr_map[k] = __mimic_ap_rename(v)
include = ArgSet(
getattr(args, attr_map['--file']),
getattr(args, attr_map['--directory']),
getattr(args, attr_map['--recurse-directory']),
getattr(args, attr_map['--manifest']),
getattr(args, attr_map['--pattern'])
)
exclude = ArgSet(
getattr(args, attr_map['--exclude-file']),
getattr(args, attr_map['--exclude-directory']),
getattr(args, attr_map['--exclude-recurse-directory']),
getattr(args, attr_map['--exclude-manifest']),
getattr(args, attr_map['--exclude-pattern'])
)
return process(include, exclude, fail_on_missing)
def process(include, exclude, fail_on_missing=False):
include_files, err_include_files, err_include_directories = __process_arg_set(include)
exclude_files, err_exclude_files, err_exclude_directories = __process_arg_set(exclude)
# get the first difference between what is to be included and excluded
curr_matched = include_files - exclude_files
# only run pattern matching if something was provided
if len(include.patterns) > 0:
include = __process_patterns(curr_matched, set(include.patterns))
curr_matched = curr_matched.intersection(include)
# only run pattern matching if something was provided
if len(exclude.patterns) > 0:
exclude = __process_patterns(curr_matched, set(exclude.patterns))
curr_matched = curr_matched - exclude
return curr_matched
| 28.650442 | 113 | 0.611429 |
867422d50fbf393dc6b41d0efaac56d033e77bb5 | 7,669 | py | Python | vnegmas/backend/src/dynetx/readwrite/edgelist.py | YueNing/vnegmas | e95adc56ee9aab8d6cd6f28cce04383e199dc2b8 | [
"MIT"
] | 3 | 2019-06-29T11:40:29.000Z | 2019-09-07T02:15:09.000Z | vnegmas/backend/src/dynetx/readwrite/edgelist.py | YueNing/vnegmas | e95adc56ee9aab8d6cd6f28cce04383e199dc2b8 | [
"MIT"
] | null | null | null | vnegmas/backend/src/dynetx/readwrite/edgelist.py | YueNing/vnegmas | e95adc56ee9aab8d6cd6f28cce04383e199dc2b8 | [
"MIT"
] | null | null | null | """
Read and write DyNetx graphs as edge lists.
The multi-line adjacency list format is useful for graphs with nodes
that can be meaningfully represented as strings.
With the edgelist format simple edge data can be stored but node or graph data is not.
There is no way of representing isolated nodes unless the node has a self-loop edge.
Format
------
You can read or write three formats of edge lists with these functions.
Node pairs with **timestamp** (u, v, t):
>>> 1 2 0
Sequence of **Interaction** events (u, v, +/-, t):
>>> 1 2 + 0
>>> 1 2 - 3
"""
import past.builtins
from ..classes import DynDiGraph, DynGraph
from ..utils import compact_timeslot, make_str, open_file
__author__ = "Giulio Rossetti"
__license__ = "GPL"
__email__ = "giulio.rossetti@gmail.com"
__all__ = [
"write_interactions",
"generate_interactions",
"parse_interactions",
"read_interactions",
"generate_snapshots",
"write_snapshots",
"parse_snapshots",
"read_snapshots",
]
def generate_interactions(G, delimiter=" "):
for e in G.stream_interactions():
yield delimiter.join(map(make_str, e))
@open_file(1, mode="wb")
def write_interactions(G, path, delimiter=" ", encoding="utf-8"):
"""Write a DyNetx graph in interaction list format.
Parameters
----------
G : graph
A DyNetx graph.
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
for line in generate_interactions(G, delimiter):
line += "\n"
path.write(line.encode(encoding))
@open_file(0, mode="rb")
def read_interactions(
path,
comments="#",
directed=False,
delimiter=None,
nodetype=None,
timestamptype=None,
encoding="utf-8",
keys=False,
):
"""Read a DyNetx graph from interaction list format.
Parameters
----------
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
ids = None
lines = (line.decode(encoding) for line in path)
if keys:
ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype)
return parse_interactions(
lines,
comments=comments,
directed=directed,
delimiter=delimiter,
nodetype=nodetype,
timestamptype=timestamptype,
keys=ids,
)
def parse_interactions(
lines,
comments="#",
directed=False,
delimiter=None,
nodetype=None,
timestamptype=None,
keys=None,
):
if not directed:
G = DynGraph()
else:
G = DynDiGraph()
for line in lines:
p = line.find(comments)
if p >= 0:
line = line[:p]
if not len(line):
continue
s = line.strip().split(delimiter)
if len(s) != 4:
continue
else:
u = s.pop(0)
v = s.pop(0)
op = s.pop(0)
s = s.pop(0)
if nodetype is not None:
try:
u = nodetype(u)
v = nodetype(v)
except:
raise TypeError(
"Failed to convert nodes %s,%s to type %s." % (u, v, nodetype)
)
if timestamptype is not None:
try:
s = timestamptype(s)
except:
raise TypeError(
"Failed to convert timestamp %s to type %s." % (s, nodetype)
)
if keys is not None:
s = keys[s]
if op == "+":
G.add_interaction(u, v, t=s)
else:
timestamps = G.adj[u][v]["t"]
if len(timestamps) > 0 and timestamps[-1][1] < s:
for t in range(timestamps[-1][1], s):
G.add_interaction(u, v, t=t)
return G
def generate_snapshots(G, delimiter=" "):
for u, v, d in G.interactions():
if "t" not in d:
raise NotImplemented
for t in d["t"]:
e = [u, v, t[0]]
if t[1] is not None:
if t[0] != t[1]:
for s in past.builtins.xrange(t[0], t[1] + 1):
e = [u, v, s]
yield delimiter.join(map(make_str, e))
else:
yield delimiter.join(map(make_str, e))
else:
yield delimiter.join(map(make_str, e))
@open_file(1, mode="wb")
def write_snapshots(G, path, delimiter=" ", encoding="utf-8"):
"""Write a DyNetx graph in snapshot graph list format.
Parameters
----------
G : graph
A DyNetx graph.
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
for line in generate_snapshots(G, delimiter):
line += "\n"
path.write(line.encode(encoding))
def parse_snapshots(
lines,
comments="#",
directed=False,
delimiter=None,
nodetype=None,
timestamptype=None,
keys=None,
):
if not directed:
G = DynGraph()
else:
G = DynDiGraph()
for line in lines:
p = line.find(comments)
if p >= 0:
line = line[:p]
if not len(line):
continue
# split line, should have 2 or more
s = line.strip().split(delimiter)
if len(s) < 3:
continue
if len(s) == 3:
u = s.pop(0)
v = s.pop(0)
t = s.pop(0)
e = None
else:
u = s.pop(0)
v = s.pop(0)
t = s.pop(0)
e = s.pop(0)
if nodetype is not None:
try:
u = nodetype(u)
v = nodetype(v)
except:
raise TypeError(
"Failed to convert nodes %s,%s to type %s." % (u, v, nodetype)
)
if timestamptype is not None:
try:
t = timestamptype(t)
if e is not None:
e = timestamptype(e)
except:
raise TypeError(
"Failed to convert timestamp %s to type %s." % (t, nodetype)
)
if keys is not None:
t = keys[t]
if e is not None:
e = keys[e]
G.add_interaction(u, v, t=t, e=e)
return G
@open_file(0, mode="rb")
def read_snapshots(
path,
comments="#",
directed=False,
delimiter=None,
nodetype=None,
timestamptype=None,
encoding="utf-8",
keys=False,
):
"""Read a DyNetx graph from snapshot graph list format.
Parameters
----------
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
ids = None
lines = (line.decode(encoding) for line in path)
if keys:
ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype)
return parse_snapshots(
lines,
comments=comments,
directed=directed,
delimiter=delimiter,
nodetype=nodetype,
timestamptype=timestamptype,
keys=ids,
)
def read_ids(path, delimiter=None, timestamptype=None):
f = open(path)
ids = {}
for line in f:
s = line.rstrip().split(delimiter)
ids[timestamptype(s[-1])] = None
if len(line) == 4:
if s[-2] not in ["+", "-"]:
ids[timestamptype(s[-2])] = None
f.flush()
f.close()
ids = compact_timeslot(ids.keys())
return ids
| 23.169184 | 86 | 0.520798 |
4a7ddf3ae68954287c471878d9e109d59849a8fd | 830 | py | Python | yaksh_data/yaksh/urls_password_reset.py | amitpeshwani/question_template | b906dad9140e81ef2d520b9f146c8fa3376b7700 | [
"Python-2.0"
] | 1 | 2022-03-21T11:14:17.000Z | 2022-03-21T11:14:17.000Z | yaksh_data/yaksh/urls_password_reset.py | amitpeshwani/question_template | b906dad9140e81ef2d520b9f146c8fa3376b7700 | [
"Python-2.0"
] | null | null | null | yaksh_data/yaksh/urls_password_reset.py | amitpeshwani/question_template | b906dad9140e81ef2d520b9f146c8fa3376b7700 | [
"Python-2.0"
] | null | null | null | from django.conf.urls import url
from django.contrib.auth.views import password_reset, password_reset_confirm,\
password_reset_done, password_reset_complete, password_change,\
password_change_done
urlpatterns = [
url(r'^forgotpassword/$', password_reset,
name="password_reset"),
url(r'^password_reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
password_reset_confirm,
name='password_reset_confirm'),
url(r'^password_reset/mail_sent/$', password_reset_done,
name='password_reset_done'),
url(r'^password_reset/complete/$', password_reset_complete,
name='password_reset_complete'),
url(r'^changepassword/$', password_change,
name='password_change'),
url(r'^password_change/done/$', password_change_done,
name='password_change_done'),
]
| 39.52381 | 78 | 0.701205 |
1b011066aa16f16f2b0dceb0d90c86f0ae625557 | 697 | py | Python | config/__init__.py | wangzishuo111/bk_prometheus | c6aa16d8a547a3d00fbca317f6846ad35b1297ea | [
"MIT"
] | null | null | null | config/__init__.py | wangzishuo111/bk_prometheus | c6aa16d8a547a3d00fbca317f6846ad35b1297ea | [
"MIT"
] | 2 | 2021-02-08T20:48:38.000Z | 2021-06-10T23:03:39.000Z | config/__init__.py | wangzishuo111/bk_prometheus | c6aa16d8a547a3d00fbca317f6846ad35b1297ea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
__all__ = ['celery_app', 'RUN_VER', 'APP_CODE', 'SECRET_KEY', 'BK_URL', 'BASE_DIR']
import os
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from blueapps.core.celery import celery_app
# app 基本信息
# SaaS运行版本,如非必要请勿修改
RUN_VER = 'open'
# SaaS应用ID
APP_CODE = 'bkprometheus'
# SaaS安全密钥,注意请勿泄露该密钥
SECRET_KEY = '7c4593ff-034a-4c5f-99dd-f12ad7f1ae0e'
# 蓝鲸SaaS平台URL,例如 http://paas.bking.com
BK_URL = 'http://paas.growing.com'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
__file__)))
| 26.807692 | 83 | 0.736011 |
a82d7e112f93b46aef8d34a050fbcf42c8a5bfd3 | 2,771 | py | Python | AutoTicketsBot/util.py | y95847frank/AutomatedTicketBot | 66754758430c7a1240b69259e32fcb452639c134 | [
"MIT"
] | 1 | 2021-03-26T05:07:20.000Z | 2021-03-26T05:07:20.000Z | AutoTicketsBot/util.py | y95847frank/AutomatedTicketBot | 66754758430c7a1240b69259e32fcb452639c134 | [
"MIT"
] | null | null | null | AutoTicketsBot/util.py | y95847frank/AutomatedTicketBot | 66754758430c7a1240b69259e32fcb452639c134 | [
"MIT"
] | null | null | null | import sys
import os
import yaml
import schedule
import time
import datetime
import argparse
def addArgs():
parser = argparse.ArgumentParser(description='Set config yaml.')
parser.add_argument("-a", "--username", help="set the account name")
parser.add_argument("-p", "--password", help="set the password")
parser.add_argument("-u", "--homePage", help="set the home page url")
parser.add_argument("-t", "--ticketPage", help="set the ticket page url")
parser.add_argument("-e", "--executable_path", help="set path of the web driver")
parser.add_argument("-c", "--ticketCount", help="set number of tickets")
parser.add_argument("-s", "--startTime", help="set time for buying tickets")
args = parser.parse_args()
return args
def configRead(fileName):
with open(os.path.join(os.getcwd(), fileName)) as file:
config = yaml.safe_load(file)
return config
def configWrite(fileName, args, config):
writeFlag = False
for arg in vars(args):
if getattr(args, arg) is not None:
config['Config'][arg] = getattr(args, arg)
writeFlag = True
if writeFlag is True:
with open(os.path.join(os.getcwd(), fileName), 'w') as file:
yaml.dump(config, file)
return True
else:
return False
def notifyUser(title, text):
os.system("""
osascript -e 'display notification "{}" with title "{}"'
""".format(text, title))
def terminateBot(ticketsBot, waitTime=0):
time.sleep(waitTime)
ticketsBot.quit()
def websiteSignIn(ticketsBot, retryCounter=5):
ticketsBot.initBrowser()
ticketsBot.visitHomePage()
iteration = 0
while iteration < retryCounter:
try:
ticketsBot.signInHomePage()
ticketsBot.signInChecker(wait_time=3)
break
except RuntimeError as e:
print(e)
iteration += 1
print('Retrying {} time...'.format(iteration))
if iteration >= retryCounter:
raise RuntimeError("Failed to sign in to the website. Please verify your account information and restart the program.")
def buyTickets(ticketsBot):
ticketsBot.enterTicketPage()
ticketsBot.selectTicket()
def buyTicketsPipeline(ticketsBot):
buyTickets(ticketsBot)
notifyUser('AutoTicketsBot Notification', 'Got tickets!!!!!')
terminateBot(ticketsBot, waitTime=600)
def scheduleBot(ticketsBot, startTime):
twoMinDelta = datetime.datetime.strptime(startTime, "%H:%M") - datetime.timedelta(minutes=2)
schedule.every().day.at(twoMinDelta.strftime("%H:%M")).do(websiteSignIn, ticketsBot, 3)
schedule.every().day.at(startTime).do(buyTicketsPipeline, ticketsBot)
while True:
schedule.run_pending()
time.sleep(1) | 33.792683 | 124 | 0.664381 |
8cd8c9609ad3ca096a9497e433f92e5d2036d3fb | 1,233 | py | Python | torchmetrics/__about__.py | karthikrangasai/metrics | 93cb842f24d15804dd2e7677ca7fc6631b234773 | [
"Apache-2.0"
] | 4 | 2021-03-22T09:02:31.000Z | 2021-03-23T07:35:39.000Z | torchmetrics/__about__.py | karthikrangasai/metrics | 93cb842f24d15804dd2e7677ca7fc6631b234773 | [
"Apache-2.0"
] | 4 | 2021-06-14T08:40:18.000Z | 2021-07-27T20:01:08.000Z | torchmetrics/__about__.py | karthikrangasai/metrics | 93cb842f24d15804dd2e7677ca7fc6631b234773 | [
"Apache-2.0"
] | null | null | null | __version__ = "0.7.0dev"
__author__ = "PyTorchLightning et al."
__author_email__ = "name@pytorchlightning.ai"
__license__ = "Apache-2.0"
__copyright__ = f"Copyright (c) 2020-2021, {__author__}."
__homepage__ = "https://github.com/PyTorchLightning/metrics"
__docs__ = "PyTorch native Metrics"
__docs_url__ = "https://torchmetrics.readthedocs.io/en/stable/"
__long_doc__ = """
Torchmetrics is a metrics API created for easy metric development and usage in both PyTorch and
[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of
Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics
implemented without having to install Pytorch Lightning (even though we would love for you to try it out).
We currently have around 25+ metrics implemented and we continuously is adding more metrics, both within
already covered domains (classification, regression ect.) but also new domains (object detection ect.).
We make sure that all our metrics are rigorously tested such that you can trust them.
"""
__all__ = [
"__author__",
"__author_email__",
"__copyright__",
"__docs__",
"__homepage__",
"__license__",
"__version__",
]
| 44.035714 | 106 | 0.76399 |
f57152276073d7dba3206f9b6a673c245f6fa77a | 18,555 | py | Python | utils/matrix_utils.py | michchr/HybridControlPy | 75d64810956fade5360f18b81332a781b31eebf9 | [
"MIT"
] | 1 | 2020-05-16T07:10:51.000Z | 2020-05-16T07:10:51.000Z | utils/matrix_utils.py | michchr/HybridControlPy | 75d64810956fade5360f18b81332a781b31eebf9 | [
"MIT"
] | null | null | null | utils/matrix_utils.py | michchr/HybridControlPy | 75d64810956fade5360f18b81332a781b31eebf9 | [
"MIT"
] | 1 | 2022-02-10T03:15:28.000Z | 2022-02-10T03:15:28.000Z | import inspect
from abc import ABCMeta, abstractmethod
from copy import deepcopy as _deepcopy, copy as _copy
import sympy as sp
import wrapt
import itertools
from utils.func_utils import get_cached_func_spec, make_function
from structdict import StructDict, OrderedStructDict
import numpy as np
from numpy.lib.stride_tricks import as_strided as _as_strided
import scipy.linalg as scl
import scipy.sparse as scs
from collections import namedtuple as NamedTuple
from utils.decorator_utils import cache_hashable_args
import functools
def is_scalar_like(val):
shape = getattr(val, 'shape', (1,))
return all([d==1 for d in shape])
def matmul(self, other):
if any(map(is_scalar_like, (self, other))):
return self * other
else:
return self @ other
def atleast_2d_col(arr, dtype=None, order=None) -> np.ndarray:
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis]
else:
result = arr
return result
def _atleast_3d_col(arr, dtype=None, order=None):
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis, np.newaxis]
elif arr.ndim == 2:
result = arr[np.newaxis, :]
else:
result = arr
return result
def block_diag_dense_same_shape(mats, format=None, dtype=None):
arrs = _atleast_3d_col(mats, dtype=dtype)
k, n, m = arrs.shape
arrs = arrs.reshape(k * n, m)
vals = np.zeros(shape=(k * n, k * m), dtype=arrs.dtype)
vals[:, :m] = arrs
item_size = arrs.itemsize
shape = (k, n, k * m)
strides = ((k * n - 1) * m * item_size, k * m * item_size, item_size)
strided = np.ascontiguousarray(_as_strided(vals, shape=shape, strides=strides))
block_diag = strided.reshape(n * k, m * k)
return block_diag
def block_diag_dense(mats, format=None, dtype=None):
# scl.blockdiag is faster for large matrices or a large number of matrices.
a_mats = _atleast_3d_col(mats)
if a_mats.dtype != np.object_ and np.prod(a_mats.shape) < 720:
block_diag = block_diag_dense_same_shape(a_mats, format=format, dtype=dtype)
else:
block_diag = scl.block_diag(*a_mats)
if dtype is not None:
block_diag = block_diag.astype(dtype)
return block_diag
import timeit
def block_diag_test(a, number=1000):
def t1():
return block_diag_dense(a)
def t2():
return scl.block_diag(*a)
tt1 = timeit.timeit("t1()", globals=locals(), number=number)
print("block_diag_dense", tt1)
tt2 = timeit.timeit("t2()", globals=locals(), number=number)
print("scl.block_diag", tt2)
t1 = t1()
t2 = t2()
print("t1", t1.dtype)
print("t2", t2.dtype)
return np.array_equal(t1, t2)
def create_object_array(tup):
try:
obj_arr = np.empty(len(tup), dtype=np.object_)
except TypeError:
raise TypeError("tup must be array like.")
for ind, item in enumerate(tup):
obj_arr[ind] = item
return obj_arr
def block_toeplitz(c_tup, r_tup=None, sparse=False):
"""
Based on scipy.linalg.toeplitz method but applied in a block fashion.
"""
try:
c = np.array(c_tup)
except ValueError:
c = create_object_array(c_tup)
if r_tup is None:
if np.issubdtype(c.dtype, np.number):
r = c.conjugate()
else:
r = c
else:
try:
r = np.array(r_tup)
except ValueError:
r = create_object_array(r_tup)
c = _atleast_3d_col(c)
r = _atleast_3d_col(r)
# # Form a array containing a reversed c followed by r[1:] that could be strided to give us a toeplitz matrix.
try:
vals = np.concatenate((c[::-1], r[1:]))
except ValueError as ve:
raise ValueError("Incompatible dimensions in c_tup or between c_tup and r_tup - " + ve.args[0])
stride_shp = (c.shape[0], c.shape[1], r.shape[0], r.shape[2])
out_shp = (c.shape[0] * c.shape[1], r.shape[0] * r.shape[2])
n, m, k = vals.strides
strided = np.ascontiguousarray(_as_strided(vals[c.shape[0] - 1:], shape=stride_shp, strides=(-n, m, n, k)))
np_toeplitz = strided.reshape(out_shp)
if sparse:
if np_toeplitz.dtype != np.object_:
return scs.csr_matrix(np_toeplitz)
elif all(isinstance(block, scs.csr_matrix) for block in np_toeplitz.flat):
v_stacked = [scs.bmat(np.atleast_2d(col).T).tocsc() for col in np_toeplitz.T]
return scs.bmat(np.atleast_2d(v_stacked)).tocsr()
else:
h_stacked = [scs.bmat(np.atleast_2d(row)).tocsr() for row in np_toeplitz]
return scs.bmat(np.atleast_2d(h_stacked).T).tocsc()
else:
return np_toeplitz
def block_toeplitz_alt(c_tup, r_tup=None, sparse=False):
c = create_object_array(c_tup)
if r_tup is None:
try:
r = c.conjugate()
except AttributeError:
r = c
else:
r = create_object_array(r_tup)
# # Form a 1D array containing a reversed c followed by r[1:] that could be
# # strided to give us toeplitz matrix.
vals = np.concatenate((c[::-1], r[1:]))
out_shp = c.shape[0], r.shape[0]
n = vals.strides[0]
strided = _as_strided(vals[len(c) - 1:], shape=out_shp, strides=(-n, n))
np_toep = np.block(strided.tolist())
if sparse:
if all(isinstance(block, scs.csr_matrix) for block in np_toep.flat):
v_stacked = [scs.bmat(np.atleast_2d(col).T).tocsc() for col in np_toep.T]
return scs.bmat(np.atleast_2d(v_stacked)).tocsr()
else:
h_stacked = [scs.bmat(np.atleast_2d(row)).tocsr() for row in np_toep]
return scs.bmat(np.atleast_2d(h_stacked).T).tocsc()
else:
return np_toep
_MatOpsNames = ['package',
'linalg',
'sclinalg',
'block_diag',
'vmatrix',
'hmatrix',
'zeros',
'vstack',
'hstack',
'matmul']
_MatOpsNameTup = NamedTuple('MatOps', _MatOpsNames)
def pass_through(a):
return a
@cache_hashable_args(maxsize=2)
def get_mat_ops(sparse=False):
if sparse:
mat_ops = _MatOpsNameTup(
package=scs,
linalg=scs,
sclinalg=scs,
block_diag=scs.block_diag,
vmatrix=scs.csr_matrix,
hmatrix=scs.csc_matrix,
zeros=scs.csr_matrix,
vstack=scs.vstack,
hstack=scs.hstack,
matmul=functools.partial(matmul, sparse=True)
)
else:
mat_ops = _MatOpsNameTup(
package=np,
linalg=np.linalg,
sclinalg=scl,
block_diag=block_diag_dense,
vmatrix=np.atleast_2d,
hmatrix=np.atleast_2d,
zeros=np.zeros,
vstack=np.vstack,
hstack=np.hstack,
matmul=matmul
)
return mat_ops
def get_expr_shape(expr):
try:
expr_shape = expr.shape
except AttributeError:
pass
else:
if len(expr_shape) <= 2:
return expr_shape
else:
raise NotImplementedError("Maximum supported dimension is 2, got {}".format(len(expr_shape)))
if expr is None:
return (0, 0)
elif np.isscalar(expr) or isinstance(expr, sp.Expr):
return (1, 1)
elif callable(expr):
expr = CallableMatrix(expr)
return expr.shape
else:
raise TypeError("Invalid expression type: '{0}', for expr: '{1!s}'".format(type(expr), expr))
def get_expr_shapes(*exprs, get_max_dim=False):
if not exprs:
return None
if isinstance(exprs[0], dict):
shapes = StructDict({expr_id: get_expr_shape(expr) for expr_id, expr in exprs[0].items()})
else:
shapes = [get_expr_shape(expr) for expr in exprs]
if get_max_dim:
shapes = list(shapes.values()) if isinstance(shapes, dict) else shapes
return tuple(np.maximum.reduce(shapes))
else:
return shapes
class CallableMatrixMeta(ABCMeta):
def __new__(cls, *args, **kwargs):
kls = super(CallableMatrixMeta, cls).__new__(cls, *args, **kwargs)
mro = kls.mro()
all_slots = set(itertools.chain.from_iterable(klass.__dict__.get("__slots__", ()) for klass in mro))
all_slots.discard('__dict__')
kls._all_slots = tuple(all_slots)
return kls
def __call__(cls, *args, **kwargs):
return cls.__new__(cls, *args, **kwargs)
class CallableMatrixBase(metaclass=CallableMatrixMeta):
@staticmethod
def constant_matrix_func(constant):
def _constant_matrix_func():
return constant
_constant_matrix_func.__qualname__ = _constant_matrix_func.__name__ = 'constant_matrix_func'
return _constant_matrix_func
@classmethod
def _constructor(cls, *args, **kwargs):
self = super(CallableMatrixBase, cls).__new__(cls)
self.__init__(*args, **kwargs)
return self
def _constructor_from_self(self):
obj = super(CallableMatrixBase, type(self)).__new__(type(self))
for attr in self._all_slots:
setattr(obj, attr, getattr(self, attr))
obj.__init__(self)
return obj
def copy(self):
return self._constructor_from_self()
__copy__ = copy
def deepcopy(self, memo=None):
return self._constructor_from_self()
__deepcopy__ = deepcopy
def __new__(cls, matrix, matrix_name=None):
matrix_func = cls._process_matrix_func(matrix)
nan_call = cls._nan_call(matrix_func)
if np.all(np.isfinite(nan_call)):
return CallableMatrixConstant._constructor(matrix_func, matrix_name, _nan_call=nan_call)
else:
return CallableMatrix._constructor(matrix_func, matrix_name, _nan_call=nan_call)
@abstractmethod
def __init__(self, *args, **kwargs):
super(CallableMatrixBase, self).__init__(*args, **kwargs)
@classmethod
def _process_matrix_func(cls, matrix):
if inspect.isfunction(matrix):
func = matrix
elif inspect.ismethod(matrix):
func = matrix.__func__
elif isinstance(matrix, (sp.Expr, sp.Matrix)):
system_matrix = sp.Matrix(matrix)
param_sym_tup = cls._get_param_sym_tup(system_matrix)
func = sp.lambdify(param_sym_tup, system_matrix, modules="numpy", dummify=False)
else:
func = cls.constant_matrix_func(atleast_2d_col(matrix))
return func
@staticmethod
def _nan_call(matrix_func):
f_spec = get_cached_func_spec(matrix_func, reset_cache=True)
kwargs = {param_name: np.NaN for param_name in f_spec.all_kw_params}
args = [np.NaN] * len(f_spec.pos_only_params)
try:
ret_val = atleast_2d_col(matrix_func(*args, **kwargs))
ret_val.setflags(write=False)
return ret_val
except TypeError:
msg = f"_nan_call() failed, it is likely that the matrix function does not have a constant shape.\n"
note = (
"Note: all callable expressions must return with a constant array shape that does not depend on its "
"arguments. Shape is determined by calling the function with all arguments set to a float with value "
"NaN.")
raise TypeError(msg + note)
@staticmethod
def _get_param_sym_tup(expr):
try:
sym_dict = {str(sym): sym for sym in expr.free_symbols}
param_sym_tup = tuple([sym_dict.get(sym) for sym in sorted(sym_dict.keys())])
except AttributeError:
param_sym_tup = ()
return param_sym_tup
class CallableMatrix(CallableMatrixBase, wrapt.decorators.AdapterWrapper):
__slots__ = ('_self_matrix_name', '_self_wrapped_name', '_self_adapter_spec', '_self_shape', '_self_size',
'_self_ndim', '_self_dtype', '_self_nbytes', '_self_itemsize', '_self_is_empty', '_self_is_all_zero',
'_self_is_constant')
def __init__(self, matrix, matrix_name=None, **kwargs):
if isinstance(matrix, type(self)):
matrix_func = matrix.__wrapped__
super(CallableMatrix, self).__init__(wrapped=matrix_func, wrapper=matrix._self_wrapper, enabled=None,
adapter=matrix._self_adapter)
else:
matrix_func = self._process_matrix_func(matrix)
self._self_matrix_name = matrix_name if matrix_name is not None else matrix_func.__name__
self._self_wrapped_name = matrix_func.__name__
matrix_func.__name__ = self._self_matrix_name
matrix_func.__qualname__ = (
"".join(matrix_func.__qualname__.rsplit('.', 1)[:-1] + ['.', matrix_func.__name__]).lstrip('.'))
self._self_wrapped_f_spec = get_cached_func_spec(matrix_func)
adapter = self._gen_callable_matrix_adapter(self._self_wrapped_f_spec)
self._self_adapter_spec = get_cached_func_spec(adapter, bypass_cache=True)
wrapper = self._matrix_wrapper
super(CallableMatrix, self).__init__(wrapped=matrix_func, wrapper=wrapper, enabled=None, adapter=adapter)
_nan_call = kwargs.get('_nan_call')
nan_call = _nan_call if _nan_call is not None else self._nan_call(matrix_func)
self._self_shape = get_expr_shape(nan_call)
self._self_size = nan_call.size
self._self_ndim = nan_call.ndim
self._self_dtype = nan_call.dtype
self._self_nbytes = nan_call.nbytes
self._self_itemsize = nan_call.itemsize
self._self_is_empty = False if self._self_size else True
self._self_is_all_zero = np.all(nan_call == 0)
self._self_is_constant = np.all(np.isfinite(nan_call))
if self._self_is_constant:
if type(self) == CallableMatrix:
raise TypeError(f"Cannot initialize {type(self).__name__} object with constant matrix.")
self._self_constant = nan_call
else:
self._self_constant = None
def _matrix_wrapper(self, wrapped, instance, args, kwargs):
param_struct = kwargs.pop('param_struct', None)
if param_struct and self._self_wrapped_f_spec.all_kw_params:
try:
duplicates = set(kwargs).intersection(param_struct) if kwargs else None
kwargs.update(
{name: param_struct[name] for name in
set(self._self_wrapped_f_spec.all_kw_params).intersection(param_struct)})
except TypeError as te:
msg = f"'param_struct' must be dictionary like or None: {te.args[0]}"
raise TypeError(msg).with_traceback(te.__traceback__) from None
else:
if duplicates:
raise TypeError(
f"{wrapped.__name__}() got multiple values for argument '{duplicates.pop()}' - values in "
f"kwargs are duplicated in param_struct.")
try:
retval = wrapped(*args, **kwargs)
except TypeError as te:
msg = te.args[0].replace(self._self_wrapped_name, wrapped.__name__)
raise TypeError(msg).with_traceback(te.__traceback__) from None
if getattr(retval, 'ndim', 0) < 2:
retval = atleast_2d_col(retval)
if isinstance(retval, np.ndarray):
retval.setflags(write=False)
return retval
def _gen_callable_matrix_adapter(self, f_spec):
f_args_spec_struct = OrderedStructDict(f_spec.arg_spec._asdict()).deepcopy()
f_args_spec_struct.kwonlyargs.append('param_struct')
if f_args_spec_struct.kwonlydefaults:
f_args_spec_struct.kwonlydefaults.update({'param_struct': None})
else:
f_args_spec_struct.kwonlydefaults = {'param_struct': None}
f_args_spec = inspect.FullArgSpec(**f_args_spec_struct)
adapter = make_function(f_args_spec, name='adapter')
return adapter
def __reduce__(self):
return (type(self), (self.__wrapped__, self._self_matrix_name))
@property
def __name__(self):
return self._self_matrix_name
@property
def __class__(self):
return type(self)
@property
def _f_spec(self):
return self._self_adapter_spec
@_f_spec.setter
def _f_spec(self, f_spec):
self._self_adapter_spec = f_spec
@property
def __signature__(self):
return self._self_adapter_spec.signature
@property
def required_params(self):
return self._self_wrapped_f_spec.all_kw_params
@property
def matrix_name(self):
return self._self_matrix_name
@property
def shape(self):
return self._self_shape
@property
def size(self):
return self._self_size
@property
def ndim(self):
return self._self_ndim
@property
def dtype(self):
return self._self_dtype
@property
def nbytes(self):
return self._self_nbytes
@property
def itemsize(self):
return self._self_itemsize
@property
def is_empty(self):
return self._self_is_empty
@property
def is_all_zero(self):
return self._self_is_all_zero
@property
def is_constant(self):
return self._self_is_constant
def __repr__(self):
empty_str = f", shape={self._self_shape}" if not self._self_size else ""
return f"<{self.__class__.__name__} {self.__name__}{self.__signature__}{empty_str}>"
def __str__(self):
return self.__repr__()
def __dir__(self):
wrapped_dir = set(dir(self.__wrapped__))
added_dir = set(itertools.chain.from_iterable([kls.__dict__ for kls in type(self).mro()]))
rv = wrapped_dir | added_dir
return sorted(rv)
class CallableMatrixConstant(CallableMatrix):
def __init__(self, matrix, matrix_name=None, **kwargs):
super(CallableMatrixConstant, self).__init__(matrix, matrix_name=matrix_name, **kwargs)
if not self.is_constant:
raise TypeError(f"Cannot initialize {type(self).__name__} object with non-constant matrix.")
def __call__(self, *, param_struct=None):
return self._self_constant
| 32.957371 | 118 | 0.631205 |
22884c81bd33731528e7eb53a4b2d4683fd3cb35 | 6,621 | py | Python | test/hummingbot/connector/exchange/probit/test_probit_api_user_stream_data_source.py | pecuniafinance/hummingbot | 2cbb19c187a429d3e6000dc938617ca2a1f9f357 | [
"Apache-2.0"
] | 542 | 2021-12-17T22:34:31.000Z | 2022-03-31T14:36:23.000Z | test/hummingbot/connector/exchange/probit/test_probit_api_user_stream_data_source.py | pecuniafinance/hummingbot | 2cbb19c187a429d3e6000dc938617ca2a1f9f357 | [
"Apache-2.0"
] | 291 | 2021-12-17T20:07:53.000Z | 2022-03-31T11:07:23.000Z | test/hummingbot/connector/exchange/probit/test_probit_api_user_stream_data_source.py | pecuniafinance/hummingbot | 2cbb19c187a429d3e6000dc938617ca2a1f9f357 | [
"Apache-2.0"
] | 220 | 2021-12-17T12:41:23.000Z | 2022-03-31T23:03:22.000Z | import asyncio
import json
import unittest
from collections import Awaitable
from typing import Optional
from unittest.mock import patch, AsyncMock
from aiohttp import WSMsgType
from hummingbot.connector.exchange.probit.probit_api_user_stream_data_source import (
ProbitAPIUserStreamDataSource
)
from hummingbot.connector.exchange.probit.probit_auth import ProbitAuth
from hummingbot.connector.exchange.probit import probit_constants as CONSTANTS
from test.hummingbot.connector.network_mocking_assistant import NetworkMockingAssistant
class ProbitAPIUserStreamDataSourceTest(unittest.TestCase):
# logging.Level required to receive logs from the data source logger
level = 0
@classmethod
def setUpClass(cls) -> None:
cls.base_asset = "BTC"
cls.quote_asset = "USDT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
def setUp(self) -> None:
super().setUp()
self.ev_loop = asyncio.get_event_loop()
self.api_key = "someKey"
self.api_secret = "someSecret"
self.auth = ProbitAuth(self.api_key, self.api_secret)
self.data_source = ProbitAPIUserStreamDataSource(
self.auth, trading_pairs=[self.trading_pair]
)
self.data_source.logger().setLevel(1)
self.data_source.logger().addHandler(self)
self.log_records = []
self.mocking_assistant = NetworkMockingAssistant()
self.async_task: Optional[asyncio.Task] = None
def tearDown(self) -> None:
self.async_task and self.async_task.cancel()
super().tearDown()
def handle(self, record):
self.log_records.append(record)
def check_is_logged(self, log_level: str, message: str) -> bool:
return any(
record.levelname == log_level and record.getMessage() == message
for record in self.log_records
)
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
@patch("aiohttp.client.ClientSession.ws_connect", new_callable=AsyncMock)
@patch(
"hummingbot.connector.exchange.probit.probit_auth.ProbitAuth.get_ws_auth_payload",
new_callable=AsyncMock,
)
def test_listen_for_user_stream(self, get_ws_auth_payload_mock, ws_connect_mock):
auth_msg = {
"type": "authorization",
"token": "someToken"
}
get_ws_auth_payload_mock.return_value = auth_msg
ws_connect_mock.return_value = self.mocking_assistant.create_websocket_mock()
self.mocking_assistant.add_websocket_json_message(
ws_connect_mock.return_value, message={"result": "ok"} # authentication
)
self.mocking_assistant.add_websocket_aiohttp_message(
ws_connect_mock.return_value, message=json.dumps({"my_msg": "test"}) # first message
)
output_queue = asyncio.Queue()
self.async_task = self.ev_loop.create_task(
self.data_source.listen_for_user_stream(output_queue)
)
self.mocking_assistant.run_until_all_json_messages_delivered(ws_connect_mock.return_value)
self.mocking_assistant.run_until_all_aiohttp_messages_delivered(ws_connect_mock.return_value)
self.assertFalse(output_queue.empty())
sent_text_msgs = self.mocking_assistant.text_messages_sent_through_websocket(ws_connect_mock.return_value)
self.assertEqual(auth_msg, json.loads(sent_text_msgs[0]))
sent_json_msgs = self.mocking_assistant.json_messages_sent_through_websocket(ws_connect_mock.return_value)
for sent_json_msg in sent_json_msgs:
self.assertEqual("subscribe", sent_json_msg["type"])
self.assertIn(sent_json_msg["channel"], CONSTANTS.WS_PRIVATE_CHANNELS)
CONSTANTS.WS_PRIVATE_CHANNELS.remove(sent_json_msg["channel"])
self.assertEqual(0, len(CONSTANTS.WS_PRIVATE_CHANNELS))
self.assertNotEqual(0, self.data_source.last_recv_time)
@patch("aiohttp.client.ClientSession.ws_connect")
@patch(
"hummingbot.connector.exchange.probit.probit_api_user_stream_data_source.ProbitAPIUserStreamDataSource._sleep",
new_callable=AsyncMock,
)
def test_listen_for_user_stream_attempts_again_on_exception(self, sleep_mock, ws_connect_mock):
called_event = asyncio.Event()
async def _sleep(delay):
called_event.set()
await asyncio.sleep(delay)
sleep_mock.side_effect = _sleep
ws_connect_mock.side_effect = Exception
self.async_task = self.ev_loop.create_task(
self.data_source.listen_for_user_stream(asyncio.Queue())
)
self.async_run_with_timeout(called_event.wait())
self.check_is_logged(
log_level="ERROR",
message="Unexpected error with Probit WebSocket connection. Retrying after 30 seconds...",
)
@patch("aiohttp.client.ClientSession.ws_connect")
def test_listen_for_user_stream_stops_on_asyncio_cancelled_error(self, ws_connect_mock):
ws_connect_mock.side_effect = asyncio.CancelledError
with self.assertRaises(asyncio.CancelledError):
self.async_run_with_timeout(
self.data_source.listen_for_user_stream(asyncio.Queue())
)
@patch("aiohttp.client.ClientSession.ws_connect", new_callable=AsyncMock)
@patch(
"hummingbot.connector.exchange.probit.probit_auth.ProbitAuth.get_ws_auth_payload",
new_callable=AsyncMock,
)
def test_listen_for_user_stream_registers_ping_msg(self, get_ws_auth_payload_mock, ws_connect_mock):
auth_msg = {
"type": "authorization",
"token": "someToken"
}
get_ws_auth_payload_mock.return_value = auth_msg
ws_connect_mock.return_value = self.mocking_assistant.create_websocket_mock()
self.mocking_assistant.add_websocket_json_message(
ws_connect_mock.return_value, message={"result": "ok"} # authentication
)
self.mocking_assistant.add_websocket_aiohttp_message(
ws_connect_mock.return_value, message="", message_type=WSMsgType.PING
)
output_queue = asyncio.Queue()
self.async_task = self.ev_loop.create_task(
self.data_source.listen_for_user_stream(output_queue)
)
self.mocking_assistant.run_until_all_aiohttp_messages_delivered(ws_connect_mock.return_value)
self.assertTrue(output_queue.empty())
ws_connect_mock.return_value.pong.assert_called()
| 39.177515 | 119 | 0.711675 |
cb5454ff109f62c4911edb768f032f7b6de80fdb | 887 | py | Python | PyMesh/python/pymesh/meshutils/manifold_check.py | VincentLefevre/3D-parallax | 8eab905fcc591e1bd7ddbbb01ad21427286c02e3 | [
"MIT"
] | 73 | 2021-01-05T07:25:51.000Z | 2022-03-17T20:46:01.000Z | PyMesh/python/pymesh/meshutils/manifold_check.py | VincentLefevre/3D-parallax | 8eab905fcc591e1bd7ddbbb01ad21427286c02e3 | [
"MIT"
] | 1 | 2021-01-05T11:45:36.000Z | 2021-01-05T20:55:28.000Z | PyMesh/python/pymesh/meshutils/manifold_check.py | VincentLefevre/3D-parallax | 8eab905fcc591e1bd7ddbbb01ad21427286c02e3 | [
"MIT"
] | 4 | 2021-01-05T10:27:50.000Z | 2021-01-06T12:02:57.000Z | import PyMesh
from .. import Mesh
def is_vertex_manifold(mesh):
""" Vertex manifold check. The result is stored as a per-vertex scalar
field named "vertex_manifold".
"""
vertex_manifold = PyMesh.is_vertex_manifold(mesh.faces)
mesh.add_attribute("vertex_manifold")
mesh.set_attribute("vertex_manifold", vertex_manifold)
def is_edge_manifold(mesh):
""" Edge manifold check. The result is stored as a per-edge scalar
field named "edge_manifold".
"""
edge_manifold = PyMesh.is_edge_manifold(mesh.faces)
mesh.add_attribute("edge_manifold")
mesh.set_attribute("edge_manifold", edge_manifold)
def cut_to_manifold(mesh):
""" Cut an input mesh along nonmanifold edges to it becomes manifold.
Note that cutting will produce duplicated vertices.
"""
return Mesh(PyMesh.cut_to_manifold(mesh.raw_mesh))
| 32.851852 | 76 | 0.713641 |
15960cb39b460bb10b10fbc80a7cc812710cdcdd | 1,490 | py | Python | MSMetaEnhancer/libs/utils/ConverterBuilder.py | RECETOX/MSMetaEnhancer | 863077330740a5b2c91f005599460cee238b2e6f | [
"MIT"
] | null | null | null | MSMetaEnhancer/libs/utils/ConverterBuilder.py | RECETOX/MSMetaEnhancer | 863077330740a5b2c91f005599460cee238b2e6f | [
"MIT"
] | 33 | 2021-11-16T15:12:45.000Z | 2022-03-31T08:59:50.000Z | MSMetaEnhancer/libs/utils/ConverterBuilder.py | RECETOX/MSMetaEnhancer | 863077330740a5b2c91f005599460cee238b2e6f | [
"MIT"
] | null | null | null | from MSMetaEnhancer.libs.converters.web import *
from MSMetaEnhancer.libs.converters.web import __all__ as web_converters
from MSMetaEnhancer.libs.converters.compute import *
from MSMetaEnhancer.libs.converters.compute import __all__ as compute_converters
from MSMetaEnhancer.libs.utils.Errors import UnknownConverter
class ConverterBuilder:
@staticmethod
def validate_converters(converters):
"""
Check if converters do exist.
Raises UnknownConverter if a converter does not exist.
:param converters: given list of converters names
"""
for converter in converters:
try:
eval(converter)
except NameError:
raise UnknownConverter(f'Converter {converter} unknown.')
@staticmethod
def build_converters(session, converters: list):
"""
Create provided converters.
:param session: given aiohttp session
:param converters: list of converters to be built
:return: built converters
"""
built_web_converters, built_converters = dict(), dict()
for converter in converters:
if converter in web_converters:
built_web_converters[converter] = eval(converter)(session)
elif converter in compute_converters:
built_converters[converter] = eval(converter)()
built_converters.update(built_web_converters)
return built_converters, built_web_converters
| 37.25 | 80 | 0.685906 |
935b3be9bba1daed28e5ddb24bb028b3fc1b9558 | 17,508 | py | Python | ogr/services/pagure/project.py | lachmanfrantisek/ogr | 4bdf2ce1a78b69df28cd890f8775ad546befa5bf | [
"MIT"
] | null | null | null | ogr/services/pagure/project.py | lachmanfrantisek/ogr | 4bdf2ce1a78b69df28cd890f8775ad546befa5bf | [
"MIT"
] | null | null | null | ogr/services/pagure/project.py | lachmanfrantisek/ogr | 4bdf2ce1a78b69df28cd890f8775ad546befa5bf | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from typing import List, Optional, Dict, Set
from urllib.parse import urlparse
from ogr.abstract import (
PRStatus,
GitTag,
CommitFlag,
CommitComment,
CommitStatus,
PullRequest,
Issue,
IssueStatus,
Release,
AccessLevel,
)
from ogr.exceptions import (
OurPagureRawRequest,
PagureAPIException,
OgrException,
OperationNotSupported,
)
from ogr.read_only import if_readonly, GitProjectReadOnly
from ogr.services import pagure as ogr_pagure
from ogr.services.base import BaseGitProject
from ogr.services.pagure.flag import PagureCommitFlag
from ogr.services.pagure.issue import PagureIssue
from ogr.services.pagure.pull_request import PagurePullRequest
from ogr.services.pagure.release import PagureRelease
from ogr.utils import RequestResponse
logger = logging.getLogger(__name__)
class PagureProject(BaseGitProject):
service: "ogr_pagure.PagureService"
def __init__(
self,
repo: str,
namespace: Optional[str],
service: "ogr_pagure.PagureService",
username: str = None,
is_fork: bool = False,
) -> None:
super().__init__(repo, service, namespace)
self.read_only = service.read_only
self._is_fork = is_fork
self._username = username
self.repo = repo
self.namespace = namespace
def __str__(self) -> str:
fork_info = ""
if self._is_fork:
fork_info = f', username="{self._username}", is_fork={self._is_fork}'
return f'PagureProject(namespace="{self.namespace}", repo="{self.repo}"{fork_info})'
def __eq__(self, o: object) -> bool:
if not isinstance(o, PagureProject):
return False
return (
self.repo == o.repo
and self.namespace == o.namespace
and self.service == o.service
and self._username == o._username
and self._is_fork == o._is_fork
and self.read_only == o.read_only
)
@property
def _user(self) -> str:
if not self._username:
self._username = self.service.user.get_username()
return self._username
def _call_project_api(
self,
*args,
add_fork_part: bool = True,
add_api_endpoint_part=True,
method: str = None,
params: dict = None,
data: dict = None,
) -> dict:
"""
Call project API endpoint.
:param args: str parts of the url (e.g. "a", "b" will call "project/a/b")
:param add_fork_part: If the projects is a fork, use "fork/username" prefix, True by default
:param add_api_endpoint_part: Add part with API endpoint "/api/0/"
:param method: "GET"/"POST"/...
:param params: http(s) query parameters
:param data: data to be sent
:return: dict
"""
request_url = self._get_project_url(
*args,
add_api_endpoint_part=add_api_endpoint_part,
add_fork_part=add_fork_part,
)
return self.service.call_api(
url=request_url, method=method, params=params, data=data
)
def _call_project_api_raw(
self,
*args,
add_fork_part: bool = True,
add_api_endpoint_part=True,
method: str = None,
params: dict = None,
data: dict = None,
) -> RequestResponse:
"""
Call project API endpoint.
:param args: str parts of the url (e.g. "a", "b" will call "project/a/b")
:param add_fork_part: If the projects is a fork, use "fork/username" prefix, True by default
:param add_api_endpoint_part: Add part with API endpoint "/api/0/"
:param method: "GET"/"POST"/...
:param params: http(s) query parameters
:param data: data to be sent
:return: RequestResponse
"""
request_url = self._get_project_url(
*args,
add_api_endpoint_part=add_api_endpoint_part,
add_fork_part=add_fork_part,
)
return self.service.call_api_raw(
url=request_url, method=method, params=params, data=data
)
def _get_project_url(self, *args, add_fork_part=True, add_api_endpoint_part=True):
additional_parts = []
if self._is_fork and add_fork_part:
additional_parts += ["fork", self._user]
return self.service.get_api_url(
*additional_parts,
self.namespace,
self.repo,
*args,
add_api_endpoint_part=add_api_endpoint_part,
)
def get_project_info(self):
return self._call_project_api(method="GET")
def get_branches(self) -> List[str]:
return_value = self._call_project_api("git", "branches", method="GET")
return return_value["branches"]
def get_description(self) -> str:
return self.get_project_info()["description"]
def get_owners(self) -> List[str]:
project = self.get_project_info()
return project["access_users"]["owner"]
def who_can_close_issue(self) -> Set[str]:
users: Set[str] = set()
project = self.get_project_info()
users.update(project["access_users"]["admin"])
users.update(project["access_users"]["commit"])
users.update(project["access_users"]["ticket"])
users.update(project["access_users"]["owner"])
return users
def who_can_merge_pr(self) -> Set[str]:
users: Set[str] = set()
project = self.get_project_info()
users.update(project["access_users"]["admin"])
users.update(project["access_users"]["commit"])
users.update(project["access_users"]["owner"])
return users
def can_merge_pr(self, username) -> bool:
return username in self.who_can_merge_pr()
def request_access(self):
raise NotImplementedError("Not possible on Pagure")
def get_issue_list(
self,
status: IssueStatus = IssueStatus.open,
author: Optional[str] = None,
assignee: Optional[str] = None,
labels: Optional[List[str]] = None,
) -> List[Issue]:
return PagureIssue.get_list(
project=self, status=status, author=author, assignee=assignee, labels=labels
)
def get_issue(self, issue_id: int) -> Issue:
return PagureIssue.get(project=self, id=issue_id)
def create_issue(
self,
title: str,
body: str,
private: Optional[bool] = None,
labels: Optional[List[str]] = None,
) -> Issue:
return PagureIssue.create(
project=self, title=title, body=body, labels=labels, private=private
)
def get_pr_list(
self, status: PRStatus = PRStatus.open, assignee=None, author=None
) -> List[PullRequest]:
return PagurePullRequest.get_list(
project=self, status=status, assignee=assignee, author=author
)
def get_pr(self, pr_id: int) -> PullRequest:
return PagurePullRequest.get(project=self, id=pr_id)
@if_readonly(return_function=GitProjectReadOnly.create_pr)
def create_pr(
self,
title: str,
body: str,
target_branch: str,
source_branch: str,
fork_username: str = None,
) -> PullRequest:
return PagurePullRequest.create(
project=self,
title=title,
body=body,
target_branch=target_branch,
source_branch=source_branch,
)
@if_readonly(return_function=GitProjectReadOnly.fork_create)
def fork_create(self) -> "PagureProject":
request_url = self.service.get_api_url("fork")
self.service.call_api(
url=request_url,
method="POST",
data={"repo": self.repo, "namespace": self.namespace, "wait": True},
)
return self._construct_fork_project()
def _construct_fork_project(self) -> "PagureProject":
return PagureProject(
service=self.service,
repo=self.repo,
namespace=self.namespace,
username=self._user,
is_fork=True,
)
def get_fork(self, create: bool = True) -> Optional["PagureProject"]:
"""
Provide GitProject instance of a fork of this project.
Returns None if this is a fork.
:param create: create a fork if it doesn't exist
:return: instance of GitProject or None
"""
if self.is_fork:
raise OgrException("Cannot create fork from fork.")
for fork in self.get_forks():
fork_info = fork.get_project_info()
if self._user in fork_info["user"]["name"]:
return fork
if not self.is_forked():
if create:
return self.fork_create()
else:
logger.info(
f"Fork of {self.repo}"
" does not exist and we were asked not to create it."
)
return None
return self._construct_fork_project()
def exists(self) -> bool:
response = self._call_project_api_raw()
return response.ok
def is_private(self) -> bool:
"""
Is this repo private? (accessible only by users with granted access)
:return: if yes, return True
"""
host = urlparse(self.service.instance_url).hostname
if host in [
"git.centos.org",
"git.stg.centos.org",
"pagure.io",
"src.fedoraproject.org",
"src.stg.fedoraproject.org",
]:
# private repositories are not allowed on generally used pagure instances
return False
raise NotImplementedError(
f"is_private is not implemented for {self.service.instance_url}."
f"Please open issue in https://github.com/packit/ogr"
)
def is_forked(self) -> bool:
"""
Is this repo forked by the authenticated user?
:return: if yes, return True
"""
f = self._construct_fork_project()
return bool(f.exists() and f.parent.exists())
def get_is_fork_from_api(self) -> bool:
return bool(self.get_project_info()["parent"])
@property
def is_fork(self) -> bool:
return self._is_fork
@property
def parent(self) -> Optional["PagureProject"]:
"""
Return parent project if this project is a fork, otherwise return None
"""
if self.get_is_fork_from_api():
return PagureProject(
repo=self.repo,
namespace=self.get_project_info()["parent"]["namespace"],
service=self.service,
)
return None
def get_git_urls(self) -> Dict[str, str]:
return_value = self._call_project_api("git", "urls")
return return_value["urls"]
def add_user(self, user: str, access_level: AccessLevel) -> None:
"""
AccessLevel.pull => ticket
AccessLevel.triage => ticket
AccessLevel.push => commit
AccessLevel.admin => commit
AccessLevel.maintain => admin
"""
self.add_user_or_group(user, access_level, "user")
def add_group(self, group: str, access_level: AccessLevel):
"""
AccessLevel.pull => ticket
AccessLevel.triage => ticket
AccessLevel.push => commit
AccessLevel.admin => commit
AccessLevel.maintain => admin
"""
self.add_user_or_group(group, access_level, "group")
def add_user_or_group(
self, user: str, access_level: AccessLevel, user_type
) -> None:
access_dict = {
AccessLevel.pull: "ticket",
AccessLevel.triage: "ticket",
AccessLevel.push: "commit",
AccessLevel.admin: "commit",
AccessLevel.maintain: "admin",
}
response = self._call_project_api_raw(
"git",
"modifyacls",
method="POST",
data={
"user_type": user_type,
"name": user,
"acl": access_dict[access_level],
},
)
if response.status_code == 401:
raise PagureAPIException("You are not allowed to modify ACL's")
def change_token(self, new_token: str) -> None:
"""
Change an API token.
Only for this instance.
"""
self.service.change_token(new_token)
def get_file_content(self, path: str, ref="master") -> str:
try:
result = self._call_project_api_raw(
"raw", ref, "f", path, add_api_endpoint_part=False
)
if not result or result.reason == "NOT FOUND":
raise FileNotFoundError(f"File '{path}' on {ref} not found")
return result.content.decode()
except OurPagureRawRequest as ex:
raise FileNotFoundError(f"Problem with getting file '{path}' on {ref}", ex)
def get_sha_from_tag(self, tag_name: str) -> str:
tags_dict = self.get_tags_dict()
if tag_name not in tags_dict:
raise PagureAPIException(f"Tag '{tag_name}' not found.")
return tags_dict[tag_name].commit_sha
def commit_comment(
self, commit: str, body: str, filename: str = None, row: int = None
) -> CommitComment:
raise OperationNotSupported("Commit comments are not supported on Pagure.")
@if_readonly(return_function=GitProjectReadOnly.set_commit_status)
def set_commit_status(
self,
commit: str,
state: CommitStatus,
target_url: str,
description: str,
context: str,
percent: int = None,
uid: str = None,
trim: bool = False,
) -> "CommitFlag":
return PagureCommitFlag.set(
project=self,
commit=commit,
state=state,
target_url=target_url,
description=description,
context=context,
percent=percent,
trim=trim,
uid=uid,
)
def get_commit_statuses(self, commit: str) -> List[CommitFlag]:
return PagureCommitFlag.get(project=self, commit=commit)
def get_tags(self) -> List[GitTag]:
response = self._call_project_api("git", "tags", params={"with_commits": True})
return [GitTag(name=n, commit_sha=c) for n, c in response["tags"].items()]
def get_tags_dict(self) -> Dict[str, GitTag]:
response = self._call_project_api("git", "tags", params={"with_commits": True})
return {n: GitTag(name=n, commit_sha=c) for n, c in response["tags"].items()}
def get_releases(self) -> List[Release]:
# git tag for Pagure is shown as Release in Pagure UI
git_tags = self.get_tags()
return [self._release_from_git_tag(git_tag) for git_tag in git_tags]
def _release_from_git_tag(self, git_tag: GitTag) -> PagureRelease:
return PagureRelease(
tag_name=git_tag.name,
url="",
created_at="",
tarball_url="",
git_tag=git_tag,
project=self,
)
def get_forks(self) -> List["PagureProject"]:
"""
Get forks of the project.
:return: [PagureProject]
"""
forks_url = self.service.get_api_url("projects")
projects_response = self.service.call_api(
url=forks_url, params={"fork": True, "pattern": self.repo}
)
return [
PagureProject(
repo=fork["name"],
namespace=fork["namespace"],
service=self.service,
username=fork["user"]["name"],
is_fork=True,
)
for fork in projects_response["projects"]
]
def get_web_url(self) -> str:
"""
Get web URL of the project.
:return: str
"""
return f'{self.service.instance_url}/{self.get_project_info()["url_path"]}'
@property
def full_repo_name(self) -> str:
"""
Get repo name with namespace
e.g. 'rpms/python-docker-py'
:return: str
"""
fork = f"fork/{self._user}/" if self.is_fork else ""
namespace = f"{self.namespace}/" if self.namespace else ""
return f"{fork}{namespace}{self.repo}"
| 32.84803 | 100 | 0.602582 |
96706fdb7deee4b00dcf76e329fc06982041e33c | 7,806 | py | Python | neutron/debug/debug_agent.py | kevinbenton/neutron | f27fba3ad77d907713e3e1cbfa45d33e0135c08b | [
"Apache-2.0"
] | null | null | null | neutron/debug/debug_agent.py | kevinbenton/neutron | f27fba3ad77d907713e3e1cbfa45d33e0135c08b | [
"Apache-2.0"
] | null | null | null | neutron/debug/debug_agent.py | kevinbenton/neutron | f27fba3ad77d907713e3e1cbfa45d33e0135c08b | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import socket
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.dhcp_agent import DictModel
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NETWORK_PROBE = 'network:probe'
DEVICE_OWNER_COMPUTE_PROBE = 'compute:probe'
class NeutronDebugAgent():
OPTS = [
# Needed for drivers
cfg.BoolOpt('use_namespaces', default=True,
help=_("Use Linux network namespaces")),
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
]
def __init__(self, conf, client, driver):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.client = client
self.driver = driver
def _get_namespace(self, port):
return "qprobe-%s" % port.id
def create_probe(self, network_id, device_owner='network'):
network = self._get_network(network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
port = self._create_port(network, device_owner)
port.network = network
interface_name = self.driver.get_device_name(port)
namespace = None
if self.conf.use_namespaces:
namespace = self._get_namespace(port)
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
bridge=bridge,
namespace=namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace)
return port
def _get_subnet(self, subnet_id):
subnet_dict = self.client.show_subnet(subnet_id)['subnet']
return DictModel(subnet_dict)
def _get_network(self, network_id):
network_dict = self.client.show_network(network_id)['network']
network = DictModel(network_dict)
network.external = network_dict.get('router:external')
obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets]
network.subnets = obj_subnet
return network
def clear_probe(self):
ports = self.client.list_ports(
device_id=socket.gethostname(),
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
self.delete_probe(port['id'])
def delete_probe(self, port_id):
port = DictModel(self.client.show_port(port_id)['port'])
network = self._get_network(port.network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces and ip.netns.exists(namespace):
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge,
namespace=namespace)
try:
ip.netns.delete(namespace)
except Exception:
LOG.warn(_('Failed to delete namespace %s'), namespace)
else:
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge)
self.client.delete_port(port.id)
def list_probes(self):
ports = self.client.list_ports(
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
port['device_name'] = self.driver.get_device_name(DictModel(port))
return info
def exec_command(self, port_id, command=None):
port = DictModel(self.client.show_port(port_id)['port'])
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces:
if not command:
return "sudo ip netns exec %s" % self._get_namespace(port)
namespace = ip.ensure_namespace(namespace)
return namespace.netns.execute(shlex.split(command))
else:
return utils.execute(shlex.split(command))
def ensure_probe(self, network_id):
ports = self.client.list_ports(network_id=network_id,
device_id=socket.gethostname(),
device_owner=DEVICE_OWNER_NETWORK_PROBE)
info = ports.get('ports', [])
if info:
return DictModel(info[0])
else:
return self.create_probe(network_id)
def ping_all(self, network_id=None, timeout=1):
if network_id:
ports = self.client.list_ports(network_id=network_id)['ports']
else:
ports = self.client.list_ports()['ports']
result = ""
for port in ports:
probe = self.ensure_probe(port['network_id'])
if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE:
continue
for fixed_ip in port['fixed_ips']:
address = fixed_ip['ip_address']
subnet = self._get_subnet(fixed_ip['subnet_id'])
if subnet.ip_version == 4:
ping_command = 'ping'
else:
ping_command = 'ping6'
result += self.exec_command(probe.id,
'%s -c 1 -w %s %s' % (ping_command,
timeout,
address))
return result
def _create_port(self, network, device_owner):
body = dict(port=dict(
admin_state_up=True,
network_id=network.id,
device_id='%s' % socket.gethostname(),
device_owner='%s:probe' % device_owner,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s.id) for s in network.subnets]))
port_dict = self.client.create_port(body)['port']
port = DictModel(port_dict)
port.network = network
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id)
return port
| 38.643564 | 79 | 0.593005 |
05e64c864731e330dd6f465041d2bc1d20971d0e | 11,352 | py | Python | category_encoders/m_estimate.py | RoyalTS/category_encoders | a810a4b7abfce9fc4eb7fc401e3d37f2c1c6e402 | [
"BSD-3-Clause"
] | 1 | 2021-07-09T08:14:31.000Z | 2021-07-09T08:14:31.000Z | category_encoders/m_estimate.py | RoyalTS/category_encoders | a810a4b7abfce9fc4eb7fc401e3d37f2c1c6e402 | [
"BSD-3-Clause"
] | null | null | null | category_encoders/m_estimate.py | RoyalTS/category_encoders | a810a4b7abfce9fc4eb7fc401e3d37f2c1c6e402 | [
"BSD-3-Clause"
] | null | null | null | """M-probability estimate"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from category_encoders.ordinal import OrdinalEncoder
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class MEstimateEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""M-probability estimate of likelihood.
Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.
This is a simplified version of target encoder, which goes under names like m-probability estimate or
additive smoothing with known incidence rates. In comparison to target encoder, m-probability estimate
has only one tunable parameter (`m`), while target encoder has two tunable parameters (`min_samples_leaf`
and `smoothing`).
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop encoded columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
handle_unknown: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
randomized: bool,
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma: float
standard deviation (spread or "width") of the normal distribution.
m: float
this is the "m" in the m-probability estimate. Higher value of m results into stronger shrinking.
M is non-negative.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target > 22.5
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = MEstimateEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, equation 7, from
https://dl.acm.org/citation.cfm?id=507538
.. [2] On estimating probabilities in tree pruning, equation 1, from
https://link.springer.com/chapter/10.1007/BFb0017010
.. [3] Additive smoothing, from
https://en.wikipedia.org/wiki/Additive_smoothing#Generalized_to_the_case_of_known_incidence_rates
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, randomized=False, sigma=0.05, m=1.0):
self.verbose = verbose
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.cols = cols
self.ordinal_encoder = None
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._sum = None
self._count = None
self.random_state = random_state
self.randomized = randomized
self.sigma = sigma
self.m = m
self.feature_names = None
# noinspection PyUnusedLocal
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and binary y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Binary target values.
Returns
-------
self : encoder
Returns self.
"""
# Unite parameters into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
# The lengths must be equal
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# If columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
X_ordinal = self.ordinal_encoder.transform(X)
# Training
self.mapping = self._train(X_ordinal, y)
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# Store column names with approximately constant variance on the training data
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
When the data are used for model training, it is important to also pass the target in order to apply leave one out.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# Unite the input into pandas types
X = util.convert_input(X)
# Then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# If we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
# Do not modify the input argument
X = X.copy(deep=True)
X = self.ordinal_encoder.transform(X)
if self.handle_unknown == 'error':
if X[self.cols].isin([-1]).any().any():
raise ValueError('Unexpected categories found in dataframe')
# Loop over the columns and replace the nominal values with the numbers
X = self._score(X, y)
# Postprocessing
# Note: We should not even convert these columns.
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def _train(self, X, y):
# Initialize the output
mapping = {}
# Calculate global statistics
self._sum = y.sum()
self._count = y.count()
prior = self._sum/self._count
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['sum', 'count']) # Count of x_{i,+} and x_i
# Calculate the m-probability estimate
estimate = (stats['sum'] + prior * self.m) / (stats['count'] + self.m)
# Ignore unique columns. This helps to prevent overfitting on id-like columns
if len(stats['count']) == self._count:
estimate[:] = prior
if self.handle_unknown == 'return_nan':
estimate.loc[-1] = np.nan
elif self.handle_unknown == 'value':
estimate.loc[-1] = prior
if self.handle_missing == 'return_nan':
estimate.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
estimate.loc[-2] = prior
# Store the m-probability estimate for transform() function
mapping[col] = estimate
return mapping
def _score(self, X, y):
for col in self.cols:
# Score the column
X[col] = X[col].map(self.mapping[col])
# Randomization is meaningful only for training data -> we do it only if y is present
if self.randomized and y is not None:
random_state_generator = check_random_state(self.random_state)
X[col] = (X[col] * random_state_generator.normal(1., self.sigma, X[col].shape[0]))
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError("Estimator has to be fitted to return feature names.")
else:
return self.feature_names
| 36.619355 | 137 | 0.614341 |
281f00ba0126c47931a04c1c44347ecde284f8c4 | 554 | py | Python | day1/src/puzzle2.py | DirkdenHoedt/AdventOfCode2020 | 98e85a12ca3630dcf698f21124fb89b66418c263 | [
"MIT"
] | null | null | null | day1/src/puzzle2.py | DirkdenHoedt/AdventOfCode2020 | 98e85a12ca3630dcf698f21124fb89b66418c263 | [
"MIT"
] | null | null | null | day1/src/puzzle2.py | DirkdenHoedt/AdventOfCode2020 | 98e85a12ca3630dcf698f21124fb89b66418c263 | [
"MIT"
] | null | null | null | #!/bin/env python3
numbers = []
def puzzle2():
# Read in all the numbers
with open('input.txt', 'r') as input:
for line in input:
numbers.append(int(line))
# Compare all the numbers
for x in numbers:
for y in numbers:
for z in numbers:
if(x + y + z == 2020):
print('{} + {} + {} = {}'.format(x, y, z, x+y+z))
print('{} * {} * {} = {}'.format(x, y, z, x*y*z))
return
if __name__ == "__main__":
puzzle2()
| 26.380952 | 69 | 0.433213 |
02d4b225fa34e310d206a592b485b8ff707cdd17 | 670 | py | Python | chemprop/web/wsgi.py | shomikverma/chemprop | ddaa874fe1f6ef4fbdf6b980c3157bed2ae85faa | [
"MIT"
] | 689 | 2020-02-14T20:22:33.000Z | 2022-03-31T13:45:09.000Z | chemprop/web/wsgi.py | shomikverma/chemprop | ddaa874fe1f6ef4fbdf6b980c3157bed2ae85faa | [
"MIT"
] | 214 | 2020-02-23T19:54:15.000Z | 2022-03-30T21:47:06.000Z | chemprop/web/wsgi.py | shomikverma/chemprop | ddaa874fe1f6ef4fbdf6b980c3157bed2ae85faa | [
"MIT"
] | 296 | 2020-02-14T15:39:13.000Z | 2022-03-28T16:27:17.000Z | """
Runs the web interface version of Chemprop.
Designed to be used for production only, along with Gunicorn.
"""
from chemprop.web.app import app, db
from chemprop.web.utils import clear_temp_folder, set_root_folder
def build_app(*args, **kwargs):
# Set up root folder and subfolders
set_root_folder(
app=app,
root_folder=kwargs.get('root_folder', None),
create_folders=True
)
clear_temp_folder(app=app)
db.init_app(app)
if 'init_db' in kwargs:
with app.app_context():
db.init_db()
print("-- INITIALIZED DATABASE --")
app.config['DEMO'] = kwargs.get('demo', False)
return app
| 24.814815 | 65 | 0.658209 |
cd63b4372793f477ba5ac658c7d721167f372a1d | 25,304 | py | Python | astropy/io/registry.py | jayvdb/astropy | bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f | [
"BSD-3-Clause"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | astropy/io/registry.py | jayvdb/astropy | bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f | [
"BSD-3-Clause"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | astropy/io/registry.py | jayvdb/astropy | bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f | [
"BSD-3-Clause"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import contextlib
import pathlib
import re
import sys
import inspect
import os
from collections import OrderedDict
from operator import itemgetter
import numpy as np
__all__ = ['register_reader', 'register_writer', 'register_identifier',
'identify_format', 'get_reader', 'get_writer', 'read', 'write',
'get_formats', 'IORegistryError', 'delay_doc_updates',
'UnifiedReadWriteMethod', 'UnifiedReadWrite']
__doctest_skip__ = ['register_identifier']
_readers = OrderedDict()
_writers = OrderedDict()
_identifiers = OrderedDict()
PATH_TYPES = (str, pathlib.Path)
class IORegistryError(Exception):
"""Custom error for registry clashes.
"""
pass
# If multiple formats are added to one class the update of the docs is quite
# expensive. Classes for which the doc update is temporarly delayed are added
# to this set.
_delayed_docs_classes = set()
@contextlib.contextmanager
def delay_doc_updates(cls):
"""Contextmanager to disable documentation updates when registering
reader and writer. The documentation is only built once when the
contextmanager exits.
.. versionadded:: 1.3
Parameters
----------
cls : class
Class for which the documentation updates should be delayed.
Notes
-----
Registering multiple readers and writers can cause significant overhead
because the documentation of the corresponding ``read`` and ``write``
methods are build every time.
.. warning::
This contextmanager is experimental and may be replaced by a more
general approach.
Examples
--------
see for example the source code of ``astropy.table.__init__``.
"""
_delayed_docs_classes.add(cls)
yield
_delayed_docs_classes.discard(cls)
_update__doc__(cls, 'read')
_update__doc__(cls, 'write')
def get_formats(data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a Table.
Parameters
----------
data_class : classobj, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``). If None
search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : Table
Table of available I/O formats.
"""
from astropy.table import Table
format_classes = sorted(set(_readers) | set(_writers), key=itemgetter(0))
rows = []
for format_class in format_classes:
if (data_class is not None and not _is_best_match(
data_class, format_class[1], format_classes)):
continue
has_read = 'Yes' if format_class in _readers else 'No'
has_write = 'Yes' if format_class in _writers else 'No'
has_identify = 'Yes' if format_class in _identifiers else 'No'
# Check if this is a short name (e.g. 'rdb') which is deprecated in
# favor of the full 'ascii.rdb'.
ascii_format_class = ('ascii.' + format_class[0], format_class[1])
deprecated = 'Yes' if ascii_format_class in format_classes else ''
rows.append((format_class[1].__name__, format_class[0], has_read,
has_write, has_identify, deprecated))
if readwrite is not None:
if readwrite == 'Read':
rows = [row for row in rows if row[2] == 'Yes']
elif readwrite == 'Write':
rows = [row for row in rows if row[3] == 'Yes']
else:
raise ValueError('unrecognized value for "readwrite": {0}.\n'
'Allowed are "Read" and "Write" and None.')
# Sorting the list of tuples is much faster than sorting it after the table
# is created. (#5262)
if rows:
# Indices represent "Data Class", "Deprecated" and "Format".
data = list(zip(*sorted(rows, key=itemgetter(0, 5, 1))))
else:
data = None
format_table = Table(data, names=('Data class', 'Format', 'Read', 'Write',
'Auto-identify', 'Deprecated'))
if not np.any(format_table['Deprecated'] == 'Yes'):
format_table.remove_column('Deprecated')
return format_table
def _update__doc__(data_class, readwrite):
"""
Update the docstring to include all the available readers / writers for the
``data_class.read`` or ``data_class.write`` functions (respectively).
"""
FORMATS_TEXT = 'The available built-in formats are:'
# Get the existing read or write method and its docstring
class_readwrite_func = getattr(data_class, readwrite)
if not isinstance(class_readwrite_func.__doc__, str):
# No docstring--could just be test code, or possibly code compiled
# without docstrings
return
lines = class_readwrite_func.__doc__.splitlines()
# Find the location of the existing formats table if it exists
sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line]
if sep_indices:
# Chop off the existing formats table, including the initial blank line
chop_index = sep_indices[0]
lines = lines[:chop_index]
# Find the minimum indent, skipping the first line because it might be odd
matches = [re.search(r'(\S)', line) for line in lines[1:]]
left_indent = ' ' * min(match.start() for match in matches if match)
# Get the available unified I/O formats for this class
# Include only formats that have a reader, and drop the 'Data class' column
format_table = get_formats(data_class, readwrite.capitalize())
format_table.remove_column('Data class')
# Get the available formats as a table, then munge the output of pformat()
# a bit and put it into the docstring.
new_lines = format_table.pformat(max_lines=-1, max_width=80)
table_rst_sep = re.sub('-', '=', new_lines[1])
new_lines[1] = table_rst_sep
new_lines.insert(0, table_rst_sep)
new_lines.append(table_rst_sep)
# Check for deprecated names and include a warning at the end.
if 'Deprecated' in format_table.colnames:
new_lines.extend(['',
'Deprecated format names like ``aastex`` will be '
'removed in a future version. Use the full ',
'name (e.g. ``ascii.aastex``) instead.'])
new_lines = [FORMATS_TEXT, ''] + new_lines
lines.extend([left_indent + line for line in new_lines])
# Depending on Python version and whether class_readwrite_func is
# an instancemethod or classmethod, one of the following will work.
if isinstance(class_readwrite_func, UnifiedReadWrite):
class_readwrite_func.__class__.__doc__ = '\n'.join(lines)
else:
try:
class_readwrite_func.__doc__ = '\n'.join(lines)
except AttributeError:
class_readwrite_func.__func__.__doc__ = '\n'.join(lines)
def register_reader(data_format, data_class, function, force=False):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : classobj
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
"""
if not (data_format, data_class) in _readers or force:
_readers[(data_format, data_class)] = function
else:
raise IORegistryError("Reader for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'read')
def unregister_reader(data_format, data_class):
"""
Unregister a reader function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that the reader produces.
"""
if (data_format, data_class) in _readers:
_readers.pop((data_format, data_class))
else:
raise IORegistryError("No reader defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'read')
def register_writer(data_format, data_class, function, force=False):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : classobj
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
"""
if not (data_format, data_class) in _writers or force:
_writers[(data_format, data_class)] = function
else:
raise IORegistryError("Writer for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'write')
def unregister_writer(data_format, data_class):
"""
Unregister a writer function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that can be written.
"""
if (data_format, data_class) in _writers:
_writers.pop((data_format, data_class))
else:
raise IORegistryError("No writer defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'write')
def register_identifier(data_format, data_class, identifier, force=False):
"""
Associate an identifier function with a specific data type.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
identifier : function
A function that checks the argument specified to `read` or `write` to
determine whether the input can be interpreted as a table of type
``data_format``. This function should take the following arguments:
- ``origin``: A string ``"read"`` or ``"write"`` identifying whether
the file is to be opened for reading or writing.
- ``path``: The path to the file.
- ``fileobj``: An open file object to read the file's contents, or
`None` if the file could not be opened.
- ``*args``: Positional arguments for the `read` or `write`
function.
- ``**kwargs``: Keyword arguments for the `read` or `write`
function.
One or both of ``path`` or ``fileobj`` may be `None`. If they are
both `None`, the identifier will need to work from ``args[0]``.
The function should return True if the input can be identified
as being of format ``data_format``, and False otherwise.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
Examples
--------
To set the identifier based on extensions, for formats that take a
filename as a first argument, you can do for example::
>>> def my_identifier(*args, **kwargs):
... return isinstance(args[0], str) and args[0].endswith('.tbl')
>>> register_identifier('ipac', Table, my_identifier)
"""
if not (data_format, data_class) in _identifiers or force:
_identifiers[(data_format, data_class)] = identifier
else:
raise IORegistryError("Identifier for format '{}' and class '{}' is "
'already defined'.format(data_format,
data_class.__name__))
def unregister_identifier(data_format, data_class):
"""
Unregister an identifier function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that can be read/written.
"""
if (data_format, data_class) in _identifiers:
_identifiers.pop((data_format, data_class))
else:
raise IORegistryError("No identifier defined for format '{}' and class"
" '{}'".format(data_format, data_class.__name__))
def identify_format(origin, data_class_required, path, fileobj, args, kwargs):
"""Loop through identifiers to see which formats match.
Parameters
----------
origin : str
A string ``"read`` or ``"write"`` identifying whether the file is to be
opened for reading or writing.
data_class_required : object
The specified class for the result of `read` or the class that is to be
written.
path : str, other path object or None
The path to the file or None.
fileobj : File object or None.
An open file object to read the file's contents, or ``None`` if the
file could not be opened.
args : sequence
Positional arguments for the `read` or `write` function. Note that
these must be provided as sequence.
kwargs : dict-like
Keyword arguments for the `read` or `write` function. Note that this
parameter must be `dict`-like.
Returns
-------
valid_formats : list
List of matching formats.
"""
valid_formats = []
for data_format, data_class in _identifiers:
if _is_best_match(data_class_required, data_class, _identifiers):
if _identifiers[(data_format, data_class)](
origin, path, fileobj, *args, **kwargs):
valid_formats.append(data_format)
return valid_formats
def _get_format_table_str(data_class, readwrite):
format_table = get_formats(data_class, readwrite=readwrite)
format_table.remove_column('Data class')
format_table_str = '\n'.join(format_table.pformat(max_lines=-1))
return format_table_str
def get_reader(data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in _readers if fmt == data_format]
for reader_format, reader_class in readers:
if _is_best_match(data_class, reader_class, readers):
return _readers[(reader_format, reader_class)]
else:
format_table_str = _get_format_table_str(data_class, 'Read')
raise IORegistryError(
"No reader defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def get_writer(data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in _writers if fmt == data_format]
for writer_format, writer_class in writers:
if _is_best_match(data_class, writer_class, writers):
return _writers[(writer_format, writer_class)]
else:
format_table_str = _get_format_table_str(data_class, 'Write')
raise IORegistryError(
"No writer defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def read(cls, *args, format=None, **kwargs):
"""
Read in data.
The arguments passed to this method depend on the format.
"""
ctx = None
try:
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
from astropy.utils.data import get_readable_fileobj
# path might be a pathlib.Path object
if isinstance(args[0], pathlib.Path):
args = (str(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(args[0], encoding='binary')
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = _get_valid_format(
'read', cls, path, fileobj, args, kwargs)
reader = get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError('could not convert reader output to {} '
'class.'.format(cls.__name__))
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
def write(data, *args, format=None, **kwargs):
"""
Write out data.
The arguments passed to this method depend on the format.
"""
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a pathlib.Path object
if isinstance(args[0], pathlib.Path):
args = (str(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = _get_valid_format(
'write', data.__class__, path, fileobj, args, kwargs)
writer = get_writer(format, data.__class__)
writer(data, *args, **kwargs)
def _is_best_match(class1, class2, format_classes):
"""
Determine if class2 is the "best" match for class1 in the list
of classes. It is assumed that (class2 in classes) is True.
class2 is the the best match if:
- ``class1`` is a subclass of ``class2`` AND
- ``class2`` is the nearest ancestor of ``class1`` that is in classes
(which includes the case that ``class1 is class2``)
"""
if issubclass(class1, class2):
classes = {cls for fmt, cls in format_classes}
for parent in class1.__mro__:
if parent is class2: # class2 is closest registered ancestor
return True
if parent in classes: # class2 was superceded
return False
return False
def _get_valid_format(mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = identify_format(mode, cls, path, fileobj, args, kwargs)
if len(valid_formats) == 0:
format_table_str = _get_format_table_str(cls, mode.capitalize())
raise IORegistryError("Format could not be identified based on the"
" file name or contents, please provide a"
" 'format' argument.\n"
"The available formats are:\n"
"{}".format(format_table_str))
elif len(valid_formats) > 1:
raise IORegistryError(
"Format is ambiguous - options are: {}".format(
', '.join(sorted(valid_formats, key=itemgetter(0)))))
return valid_formats[0]
class UnifiedReadWrite:
"""Base class for the worker object used in unified read() or write() methods.
This lightweight object is created for each `read()` or `write()` call
via ``read`` / ``write`` descriptors on the data object class. The key
driver is to allow complete format-specific documentation of available
method options via a ``help()`` method, e.g. ``Table.read.help('fits')``.
Subclasses must define a ``__call__`` method which is what actually gets
called when the data object ``read()`` or ``write()`` method is called.
For the canonical example see the `~astropy.table.Table` class
implementation (in particular the ``connect.py`` module there).
Parameters
----------
instance : object
Descriptor calling instance or None if no instance
cls : type
Descriptor calling class (either owner class or instance class)
method_name : str
Method name, either 'read' or 'write'
"""
def __init__(self, instance, cls, method_name):
self._instance = instance
self._cls = cls
self._method_name = method_name # 'read' or 'write'
def help(self, format=None, out=None):
"""Output help documentation for the specified unified I/O ``format``.
By default the help output is printed to the console via ``pydoc.pager``.
Instead one can supplied a file handle object as ``out`` and the output
will be written to that handle.
Parameters
----------
format : str
Unified I/O format name, e.g. 'fits' or 'ascii.ecsv'
out : None or file handle object
Output destination (default is stdout via a pager)
"""
cls = self._cls
method_name = self._method_name
# Get reader or writer function
get_func = get_reader if method_name == 'read' else get_writer
try:
if format:
read_write_func = get_func(format, cls)
except IORegistryError as err:
reader_doc = 'ERROR: ' + str(err)
else:
if format:
# Format-specific
header = ("{}.{}(format='{}') documentation\n"
.format(cls.__name__, method_name, format))
doc = read_write_func.__doc__
else:
# General docs
header = ('{}.{} general documentation\n'
.format(cls.__name__, method_name))
doc = getattr(cls, method_name).__doc__
reader_doc = re.sub('.', '=', header)
reader_doc += header
reader_doc += re.sub('.', '=', header)
reader_doc += os.linesep
reader_doc += inspect.cleandoc(doc)
if out is None:
import pydoc
pydoc.pager(reader_doc)
else:
out.write(reader_doc)
def list_formats(self, out=None):
"""Print a list of available formats to console (or ``out`` filehandle)
out : None or file handle object
Output destination (default is stdout via a pager)
"""
tbl = get_formats(self._cls, self._method_name.capitalize())
del tbl['Data class']
if out is None:
tbl.pprint(max_lines=-1, max_width=-1)
else:
out.write('\n'.join(tbl.pformat(max_lines=-1, max_width=-1)))
return out
class UnifiedReadWriteMethod:
"""Descriptor class for creating read() and write() methods in unified I/O.
The canonical example is in the ``Table`` class, where the ``connect.py``
module creates subclasses of the ``UnifiedReadWrite`` class. These have
custom ``__call__`` methods that do the setup work related to calling the
registry read() or write() functions. With this, the ``Table`` class
defines read and write methods as follows::
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
Parameters
----------
func : `~astropy.io.registry.UnifiedReadWrite` subclass
Class that defines read or write functionality
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner_cls):
return self.func(instance, owner_cls)
| 34.758242 | 82 | 0.616701 |
b188328364586521b798f7c607a85ab44d684920 | 13,714 | py | Python | bin/metawrap-scripts/binning_refiner.py | qi-lee/metaWRAP | f5b4737e748af69681a604ed584fafd7b4eb664d | [
"MIT"
] | 251 | 2018-02-17T22:11:47.000Z | 2022-03-30T07:16:29.000Z | bin/metawrap-scripts/binning_refiner.py | qi-lee/metaWRAP | f5b4737e748af69681a604ed584fafd7b4eb664d | [
"MIT"
] | 419 | 2018-02-14T18:09:17.000Z | 2022-03-30T22:42:02.000Z | bin/metawrap-scripts/binning_refiner.py | qi-lee/metaWRAP | f5b4737e748af69681a604ed584fafd7b4eb664d | [
"MIT"
] | 152 | 2018-03-23T03:31:36.000Z | 2022-03-13T06:08:49.000Z | #!/usr/bin/env python2.7
# Copyright (C) 2017, Weizhi Song, Torsten Thomas.
# songwz03@gmail.com
# t.thomas@unsw.edu.au
# Binning_refiner is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Binning_refiner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# metaWRAP author notes:
# I thank the original creator of this script! This is a great idea! To make
# this script more usable as part of the metaWRAP binning pipeline, I
# removed unnecessary visual aaspects of the original Bin_refiner script
# and made it python2 compatible.
# Check out the original program: https://github.com/songweizhi/Binning_refiner
# And the publication: https://www.ncbi.nlm.nih.gov/pubmed/28186226
import os
import glob
import shutil
import argparse
from time import sleep
from sys import stdout
from Bio import SeqIO
##################################################### CONFIGURATION ####################################################
parser = argparse.ArgumentParser()
parser.add_argument('-1',
required=True,
help='first bin folder name')
parser.add_argument('-2',
required=True,
help='second bin folder name')
parser.add_argument('-3',
required=False,
help='third bin folder name')
parser.add_argument('-o',
required=True,
help='output folder name')
parser.add_argument('-ms',
required=False,
default=524288,
type=int,
help='(optional) minimum size for refined bins, default = 524288 (0.5Mbp)')
args = vars(parser.parse_args())
output_dir = args['o']
if output_dir[-1]=='/':
output_dir=output_dir[:-1]
input_bin_folder_1 = args['1']
if input_bin_folder_1[-1] == '/':
input_bin_folder_1 = input_bin_folder_1[:-1]
input_bin_folder_2 = args['2']
if input_bin_folder_2[-1] == '/':
input_bin_folder_2 = input_bin_folder_2[:-1]
if args['3'] != None:
input_bin_folder_3 = args['3']
if input_bin_folder_3[-1] == '/':
input_bin_folder_3 = input_bin_folder_3[:-1]
bin_size_cutoff = args['ms']
bin_size_cutoff_MB = float("{0:.2f}".format(bin_size_cutoff / (1024 * 1024)))
# get input bin folder list
input_bin_folder_list = []
if args['3'] == None:
print('Specified 2 input bin sets: -1 %s -2 %s' % (input_bin_folder_1, input_bin_folder_2))
input_bin_folder_list = [input_bin_folder_1, input_bin_folder_2]
else:
print('Specified 3 input bin sets: -1 %s -2 %s -3 %s' % (input_bin_folder_1, input_bin_folder_2, input_bin_folder_3))
input_bin_folder_list = [input_bin_folder_1, input_bin_folder_2, input_bin_folder_3]
################################################ Define folder/file name ###############################################
wd = os.getcwd()
output_folder = output_dir
pwd_output_folder = '%s/%s' % (wd, output_folder)
########################################################################################################################
# get bin name list
bin_folder_1_bins_files = '%s/%s/*.fa*' % (wd, input_bin_folder_1)
bin_folder_2_bins_files = '%s/%s/*.fa*' % (wd, input_bin_folder_2)
# check input files
folder_bins_dict = {}
all_input_bins_list = []
all_input_bins_number_list = []
for bin_folder in input_bin_folder_list:
bins_files = '%s/%s/*.fa*' % (wd, bin_folder)
bin_folder_bins = [os.path.basename(file_name) for file_name in glob.glob(bins_files)]
all_input_bins_list.append(bin_folder_bins)
all_input_bins_number_list.append(len(bin_folder_bins))
folder_bins_dict[bin_folder] = bin_folder_bins
if len(bin_folder_bins) == 0:
print('No input bin detected from %s folder, please double-check!' % (bin_folder))
exit()
bin_folder_bins_ext_list = []
for bin in bin_folder_bins:
bin_file_name, bin_file_ext = os.path.splitext(bin)
bin_folder_bins_ext_list.append(bin_file_ext)
bin_folder_bins_ext_list_uniq = []
for each in bin_folder_bins_ext_list:
if each not in bin_folder_bins_ext_list_uniq:
bin_folder_bins_ext_list_uniq.append(each)
else:
pass
# check whether bins in the same folder have same extension, exit if not
if len(bin_folder_bins_ext_list_uniq) > 1:
print('Different file extensions were found from %s bins, please use same extension (fa, fas or fasta) '
'for all bins in the same folder.' % (bin_folder))
exit()
else:
pass
# create output folder
if os.path.isdir(output_folder):
shutil.rmtree(output_folder)
os.mkdir(output_folder)
else:
os.mkdir(output_folder)
# create folder to hold bins with renamed contig name
combined_all_bins_file = '%s/%s/combined_all_bins.fasta' % (wd, output_folder)
separator = '__'
for each_folder in input_bin_folder_list:
sleep(1)
print('Add folder/bin name to contig name for %s bins' % each_folder)
os.mkdir('%s/%s/%s_new' % (wd, output_folder, each_folder))
# add binning program and bin id to metabat_bin's contig name
each_folder_bins = folder_bins_dict[each_folder]
for each_bin in each_folder_bins:
bin_file_name, bin_file_ext = os.path.splitext(each_bin)
each_bin_content = SeqIO.parse('%s/%s/%s' % (wd, each_folder, each_bin), 'fasta')
new = open('%s/%s/%s_new/%s_%s.fasta' % (wd, output_folder, each_folder, each_folder, bin_file_name), 'w')
for each_contig in each_bin_content:
each_contig_new_id = '%s%s%s%s%s' % (each_folder, separator, bin_file_name, separator, each_contig.id)
each_contig.id = each_contig_new_id
each_contig.description = ''
SeqIO.write(each_contig, new, 'fasta')
new.close()
# Combine all new bins
os.system('cat %s/%s/%s_new/*.fasta > %s/%s/combined_%s_bins.fa' % (wd, output_folder, each_folder, wd, output_folder, each_folder))
os.system('rm -r %s/%s/%s_new' % (wd, output_folder, each_folder))
# combine all modified bins together
sleep(1)
print('Combine all bins together')
if len(input_bin_folder_list) == 2:
pwd_combined_folder_1_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
pwd_combined_folder_2_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_2)
os.system('cat %s %s > %s' % (pwd_combined_folder_1_bins, pwd_combined_folder_2_bins, combined_all_bins_file))
if len(input_bin_folder_list) == 3:
pwd_combined_folder_1_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
pwd_combined_folder_2_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_2)
pwd_combined_folder_3_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_3)
os.system('cat %s %s %s > %s' % (pwd_combined_folder_1_bins, pwd_combined_folder_2_bins, pwd_combined_folder_3_bins, combined_all_bins_file))
combined_all_bins = SeqIO.parse(combined_all_bins_file, 'fasta')
contig_bin_dict = {}
contig_length_dict = {}
for each in combined_all_bins:
each_id_split = each.id.split(separator)
folder_name = each_id_split[0]
bin_name = each_id_split[1]
contig_id = each_id_split[2]
length = len(each.seq)
if contig_id not in contig_bin_dict:
contig_bin_dict[contig_id] = ['%s%s%s' % (folder_name, separator, bin_name)]
contig_length_dict[contig_id] = length
elif contig_id in contig_bin_dict:
contig_bin_dict[contig_id].append('%s%s%s' % (folder_name, separator, bin_name))
contig_assignments_file = '%s/%s/contig_assignments.txt' % (wd, output_folder)
contig_assignments = open(contig_assignments_file, 'w')
for each in contig_bin_dict:
if len(contig_bin_dict[each]) == len(input_bin_folder_list):
contig_assignments.write('%s\t%s\t%s\n' % ('\t'.join(contig_bin_dict[each]), each, contig_length_dict[each]))
contig_assignments.close()
contig_assignments_file_sorted = '%s/%s/contig_assignments_sorted.txt' % (wd, output_folder)
contig_assignments_file_sorted_one_line = '%s/%s/contig_assignments_sorted_one_line.txt' % (wd, output_folder)
os.system('cat %s | sort > %s' % (contig_assignments_file, contig_assignments_file_sorted))
contig_assignments_sorted = open(contig_assignments_file_sorted)
contig_assignments_sorted_one_line = open(contig_assignments_file_sorted_one_line, 'w')
current_match = ''
current_match_contigs = []
current_length_total = 0
n = 1
for each in contig_assignments_sorted:
each_split = each.strip().split('\t')
current_contig = each_split[-2]
current_length = int(each_split[-1])
matched_bins = '\t'.join(each_split[:-2])
if current_match == '':
current_match = matched_bins
current_match_contigs.append(current_contig)
current_length_total += current_length
elif current_match == matched_bins:
current_match_contigs.append(current_contig)
current_length_total += current_length
elif current_match != matched_bins:
refined_bin_name = 'refined_bin%s' % n
if current_length_total >= bin_size_cutoff:
contig_assignments_sorted_one_line.write('Refined_%s\t%s\t%sbp\t%s\n' % (n, current_match, current_length_total,'\t'.join(current_match_contigs)))
n += 1
current_match = matched_bins
current_match_contigs = []
current_match_contigs.append(current_contig)
current_length_total = 0
current_length_total += current_length
if current_length_total >= bin_size_cutoff:
contig_assignments_sorted_one_line.write('Refined_%s\t%s\t%sbp\t%s\n' % (n, current_match, current_length_total,'\t'.join(current_match_contigs)))
else:
n -= 1
contig_assignments_sorted_one_line.close()
refined_bin_number = n
sleep(1)
print('The number of refined bins: %s' % refined_bin_number)
# Export refined bins and prepare input for GoogleVis
sleep(1)
print('Exporting refined bins...')
separated_1 = '%s/%s/Refined_bins_sources_and_length.txt' % (wd, output_folder)
separated_2 = '%s/%s/Refined_bins_contigs.txt' % (wd, output_folder)
googlevis_input_file = '%s/%s/GoogleVis_Sankey_%sMbp.csv' % (wd, output_folder, bin_size_cutoff_MB)
os.mkdir('%s/%s/Refined' % (wd, output_folder))
refined_bins = open(contig_assignments_file_sorted_one_line)
googlevis_input_handle = open(googlevis_input_file, 'w')
separated_1_handle = open(separated_1, 'w')
separated_2_handle = open(separated_2, 'w')
googlevis_input_handle.write('C1,C2,Length (Mbp)\n')
for each_refined_bin in refined_bins:
each_refined_bin_split = each_refined_bin.strip().split('\t')
each_refined_bin_name = each_refined_bin_split[0]
each_refined_bin_length = 0
each_refined_bin_contig = []
if len(input_bin_folder_list) == 2:
each_refined_bin_source = each_refined_bin_split[1:3]
each_refined_bin_length = int(each_refined_bin_split[3][:-2])
each_refined_bin_contig = each_refined_bin_split[4:]
separated_1_handle.write('%s\t%sbp\t%s\n' % (each_refined_bin_name, each_refined_bin_length, '\t'.join(each_refined_bin_source)))
separated_2_handle.write('%s\n%s\n' % (each_refined_bin_name, '\t'.join(each_refined_bin_contig)))
if len(input_bin_folder_list) == 3:
each_refined_bin_source = each_refined_bin_split[1:4]
each_refined_bin_length = int(each_refined_bin_split[4][:-2])
each_refined_bin_contig = each_refined_bin_split[5:]
separated_1_handle.write('%s\t%sbp\t%s\n' % (each_refined_bin_name, each_refined_bin_length, '\t'.join(each_refined_bin_source)))
separated_2_handle.write('%s\n%s\n' % (each_refined_bin_name, '\t'.join(each_refined_bin_contig)))
each_refined_bin_length_mbp = float("{0:.2f}".format(each_refined_bin_length / (1024 * 1024)))
m = 0
while m < len(each_refined_bin_source)-1:
googlevis_input_handle.write('%s,%s,%s\n' % (each_refined_bin_source[m], each_refined_bin_source[m+1], each_refined_bin_length_mbp))
m += 1
stdout.write('\rExtracting refined bin: %s.fasta' % each_refined_bin_name)
refined_bin_file = '%s/%s/Refined/%s.fasta' % (wd, output_folder, each_refined_bin_name)
refined_bin_handle = open(refined_bin_file, 'w')
input_contigs_file = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
input_contigs = SeqIO.parse(input_contigs_file, 'fasta')
for each_input_contig in input_contigs:
each_input_contig_id = each_input_contig.id.split(separator)[-1]
if each_input_contig_id in each_refined_bin_contig:
each_input_contig.id = each_input_contig_id
each_input_contig.description = ''
SeqIO.write(each_input_contig, refined_bin_handle, 'fasta')
refined_bin_handle.close()
googlevis_input_handle.close()
separated_1_handle.close()
separated_2_handle.close()
# remove temporary files
sleep(1)
print('\nDeleting temporary files')
os.system('rm %s' % contig_assignments_file)
os.system('rm %s' % (combined_all_bins_file))
os.system('rm %s/%s/*.fa' % (wd, output_folder))
os.system('rm %s' % (contig_assignments_file_sorted))
os.system('rm %s' % (contig_assignments_file_sorted_one_line))
sleep(1)
print('\nAll done!')
| 42.590062 | 158 | 0.700817 |
e9402b582313b1a6d95766ae71c3037c639b7830 | 4,610 | py | Python | dkt/trainer.py | ysb06/boostcamp-p4-dkt | 1ab3cf4d060dcb77f23180d1abe0554d559961cf | [
"MIT"
] | null | null | null | dkt/trainer.py | ysb06/boostcamp-p4-dkt | 1ab3cf4d060dcb77f23180d1abe0554d559961cf | [
"MIT"
] | null | null | null | dkt/trainer.py | ysb06/boostcamp-p4-dkt | 1ab3cf4d060dcb77f23180d1abe0554d559961cf | [
"MIT"
] | null | null | null | import os
import torch
import numpy as np
import torch.nn as nn
from sklearn.metrics import roc_auc_score, accuracy_score
def train(
train_loader, model, optimizer,
clip_grad: int,
logging_step: int,
device: torch.device,
):
model.train()
total_preds = []
total_targets = []
losses = []
for step, batch in enumerate(train_loader):
input = process_batch(batch, device)
preds = model(input)
targets = input[3] # correct
loss = compute_loss(preds, targets)
update_params(loss, model, optimizer, clip_grad)
if step % logging_step == 0:
print(f"Training steps: {step} Loss: {str(loss.item())}")
# predictions
preds = preds[:, -1]
targets = targets[:, -1]
if device.type == 'cuda':
preds = preds.to('cpu').detach().numpy()
targets = targets.to('cpu').detach().numpy()
else: # cpu
preds = preds.detach().numpy()
targets = targets.detach().numpy()
total_preds.append(preds)
total_targets.append(targets)
losses.append(loss)
total_preds = np.concatenate(total_preds)
total_targets = np.concatenate(total_targets)
# Train AUC / ACC
auc, acc = get_metric(total_targets, total_preds)
loss_avg = sum(losses)/len(losses)
print(f'TRAIN AUC : {auc} ACC : {acc}')
return auc, acc, loss_avg
def validate(valid_loader, model, device: torch.device):
model.eval()
total_preds = []
total_targets = []
for step, batch in enumerate(valid_loader):
input = process_batch(batch, device)
preds = model(input)
targets = input[3] # correct
# predictions
preds = preds[:,-1]
targets = targets[:,-1]
if device.type == 'cuda':
preds = preds.to('cpu').detach().numpy()
targets = targets.to('cpu').detach().numpy()
else: # cpu
preds = preds.detach().numpy()
targets = targets.detach().numpy()
total_preds.append(preds)
total_targets.append(targets)
total_preds = np.concatenate(total_preds)
total_targets = np.concatenate(total_targets)
# Train AUC / ACC
auc, acc = get_metric(total_targets, total_preds)
print(f'VALID AUC : {auc} ACC : {acc}\n')
return auc, acc, total_preds, total_targets
# 배치 전처리
def process_batch(batch, device: torch.device):
test, question, tag, correct, mask = batch
# change to float
mask = mask.type(torch.FloatTensor)
correct = correct.type(torch.FloatTensor)
# interaction을 임시적으로 correct를 한칸 우측으로 이동한 것으로 사용
# saint의 경우 decoder에 들어가는 input이다
interaction = correct + 1 # 패딩을 위해 correct값에 1을 더해준다.
interaction = interaction.roll(shifts=1, dims=1)
interaction[:, 0] = 0 # set padding index to the first sequence
interaction = (interaction * mask).to(torch.int64)
# print(interaction)
# exit()
# test_id, question_id, tag
test = ((test + 1) * mask).to(torch.int64)
question = ((question + 1) * mask).to(torch.int64)
tag = ((tag + 1) * mask).to(torch.int64)
# gather index
# 마지막 sequence만 사용하기 위한 index
gather_index = torch.tensor(np.count_nonzero(mask, axis=1))
gather_index = gather_index.view(-1, 1) - 1
# device memory로 이동
test = test.to(device)
question = question.to(device)
tag = tag.to(device)
correct = correct.to(device)
mask = mask.to(device)
interaction = interaction.to(device)
gather_index = gather_index.to(device)
return (test, question,
tag, correct, mask,
interaction, gather_index)
# loss계산하고 parameter update!
def compute_loss(preds, targets):
"""
Args :
preds : (batch_size, max_seq_len)
targets : (batch_size, max_seq_len)
"""
loss = nn.BCELoss(reduction="none")
loss = loss(preds, targets)
#마지막 시퀀드에 대한 값만 loss 계산
loss = loss[:,-1]
loss = torch.mean(loss)
return loss
def get_metric(targets, preds):
auc = roc_auc_score(targets, preds)
acc = accuracy_score(targets, np.where(preds >= 0.5, 1, 0))
return auc, acc
def update_params(loss, model, optimizer, clip_grad):
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
optimizer.step()
optimizer.zero_grad()
def save_checkpoint(state, model_dir, model_filename):
print('saving model ...')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
torch.save(state, os.path.join(model_dir, model_filename))
| 26.494253 | 69 | 0.62321 |
f0e9313378e3d2f69d3d78c56db2b026d7ce085e | 1,363 | py | Python | 14.py | jeslinmx/advent2020 | 3b4ff9860af18be1b5dbf2a406f6cfd445ecc7da | [
"MIT"
] | 1 | 2020-12-23T20:15:12.000Z | 2020-12-23T20:15:12.000Z | 14.py | jeslinmx/advent2020 | 3b4ff9860af18be1b5dbf2a406f6cfd445ecc7da | [
"MIT"
] | null | null | null | 14.py | jeslinmx/advent2020 | 3b4ff9860af18be1b5dbf2a406f6cfd445ecc7da | [
"MIT"
] | 1 | 2020-12-23T16:09:50.000Z | 2020-12-23T16:09:50.000Z | # Problem: https://adventofcode.com/2020/day/14
# Input
from sys import stdin
program = list(stdin)
# Part 1
from collections import defaultdict
mem = defaultdict(int)
for line in program:
if line.startswith("mask = "):
mask = line.strip()[-36:]
ones_mask = int(mask.replace("X", "0"), base=2)
zeroes_mask = int(mask.replace("X", "1"), base=2)
else:
address, value = line[4:].split("] = ")
# override ones using OR, and override zeroes using AND
mem[int(address)] = (int(value) | ones_mask) & zeroes_mask
print(sum(mem.values()))
# Part 2
from itertools import product
mem = defaultdict(int)
for line in program:
if line.startswith("mask = "):
mask = line.strip()[-36:]
ones_mask = int(mask.replace("X", "0"), base=2)
floating_mask = int(mask.replace("0", "1").replace("X", "0"), base=2)
floating_values = [
(0, 1 << position)
for position, char in enumerate(reversed(mask))
if char == "X"
]
else:
address, value = line[4:].split("] = ")
# override ones, and override floating bits to zeroes
base_address = (int(address) | ones_mask) & floating_mask
for floating_value in product(*floating_values):
mem[base_address + sum(floating_value)] = int(value)
print(sum(mem.values())) | 34.075 | 77 | 0.603815 |
58a30d989233d96a254075c965fb24a6445476b7 | 3,991 | py | Python | dffml/source/dir.py | SGeetansh/dffml | 04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b | [
"MIT"
] | 3 | 2021-03-08T18:41:21.000Z | 2021-06-05T20:15:14.000Z | dffml/source/dir.py | NikhilBartwal/dffml | 16180144f388924d9e5840c4aa80d08970af5e60 | [
"MIT"
] | 24 | 2020-05-20T23:29:57.000Z | 2021-04-14T04:18:21.000Z | dffml/source/dir.py | NikhilBartwal/dffml | 16180144f388924d9e5840c4aa80d08970af5e60 | [
"MIT"
] | 1 | 2021-04-19T23:58:26.000Z | 2021-04-19T23:58:26.000Z | """
Loads files from a directory
"""
import os
import glob
import pathlib
from typing import List
from ..record import Record
from ..base import config, field
from .memory import MemorySource
from ..util.entrypoint import entrypoint
from ..source.source import BaseSource
from ..configloader.configloader import ConfigLoaders
from ..high_level import save
class FolderNotFoundError(Exception):
"""
Folder doesn't exist.
"""
@config
class DirectorySourceConfig:
foldername: str
feature: str = field("Name of the feature the data will be referenced as")
labels: List[str] = field(
"Image labels", default_factory=lambda: ["unlabelled"]
)
save: BaseSource = None
@entrypoint("dir")
class DirectorySource(MemorySource):
"""
Source to read files in a folder.
"""
CONFIG = DirectorySourceConfig
CONFIG_LOADER = ConfigLoaders()
def __init__(self, config):
super().__init__(config)
if isinstance(getattr(self.config, "foldername", None), str):
self.config.foldername = pathlib.Path(self.config.foldername)
async def __aenter__(self) -> "BaseSourceContext":
await self._open()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self._close()
async def _open(self):
if not os.path.exists(self.config.foldername) and not os.path.isdir(
self.config.foldername
):
raise FolderNotFoundError(f"Folder path: {self.config.foldername}")
if (
self.config.labels != ["unlabelled"]
and len(self.config.labels) == 1
):
if os.path.isfile(self.config.labels[0]):
# Update labels with list read from the file
self.config.labels = pathlib.Path.read_text(
pathlib.Path(self.config.labels[0])
).split(",")
elif self.config.labels != ["unlabelled"]:
label_folders = [
labels
for labels in os.listdir(self.config.foldername)
if os.path.isdir(os.path.join(self.config.foldername, labels))
]
# Check if all existing label folders are given to `labels` list
if set(label_folders) > set(self.config.labels):
self.logger.warning(
"All labels not specified. Folders present: %s \nLabels entered: %s",
label_folders,
self.config.labels,
)
await self.load_fd()
async def _close(self):
if self.config.save:
await save(self.config.save, self.mem)
async def load_fd(self):
self.mem = {}
# Iterate over the labels list
for label in self.config.labels:
if self.config.labels == ["unlabelled"]:
folders = self.config.foldername
else:
folders = self.config.foldername.joinpath(label)
# Go through all image files and read them using pngconfigloader
for file_name in map(
os.path.basename, glob.glob(str(folders) + "/*")
):
image_filename = folders.joinpath(file_name)
async with self.CONFIG_LOADER as cfgl:
_, feature_data = await cfgl.load_file(image_filename)
if self.config.labels != ["unlabelled"]:
file_name = label + "/" + file_name
self.mem[file_name] = Record(
file_name,
data={
"features": {
self.config.feature: feature_data,
"label": label,
}
},
)
if self.config.labels == ["unlabelled"]:
del self.mem[file_name].features()["label"]
self.logger.debug("%r loaded %d records", self, len(self.mem))
| 31.674603 | 89 | 0.567527 |
443a6ba099886f5c437e951e657f65e3cc23430c | 999 | py | Python | kAFL-Fuzzer/kafl_debug.py | SafeBreach-Labs/hAFL2 | f607d2b4973f1b2ca689dbe8e467dbd7dbac0881 | [
"BSD-3-Clause"
] | 102 | 2021-08-05T16:50:26.000Z | 2022-03-08T19:30:17.000Z | kAFL-Fuzzer/kafl_debug.py | SafeBreach-Labs/hAFL2 | f607d2b4973f1b2ca689dbe8e467dbd7dbac0881 | [
"BSD-3-Clause"
] | 1 | 2022-02-24T09:13:14.000Z | 2022-02-24T09:13:14.000Z | kAFL-Fuzzer/kafl_debug.py | SB-GC-Labs/hAFL1 | 2f4c49bff11163f25e282b12acbd021a13cdb00d | [
"BSD-3-Clause"
] | 18 | 2021-08-04T22:45:39.000Z | 2022-03-25T06:38:56.000Z | #!/usr/bin/env python3
#
# Copyright (C) 2017-2019 Sergej Schumilo, Cornelius Aschermann, Tim Blazytko
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Execute a given kAFL target with individual test inputs for purpose of debug/inspection.
"""
import os
import sys
import common.color
from common.self_check import self_check
from common.config import DebugConfiguration
KAFL_ROOT = os.path.dirname(os.path.realpath(__file__)) + "/"
KAFL_BANNER = KAFL_ROOT + "banner.txt"
KAFL_CONFIG = KAFL_ROOT + "kafl.ini"
def main():
with open(KAFL_BANNER) as f:
for line in f:
print(line.replace("\n", ""))
print("<< " + common.color.BOLD + common.color.OKGREEN +
sys.argv[0] + ": kAFL Debugger " + common.color.ENDC + ">>\n")
if not self_check(KAFL_ROOT):
return 1
import debug.core
cfg = DebugConfiguration(KAFL_CONFIG)
return debug.core.start(cfg)
if __name__ == "__main__":
main()
| 23.785714 | 88 | 0.683684 |
1449bf0d4f5a75df4ebca2888cc8e2b28b1c7b04 | 321 | py | Python | app/endpoint/routes.py | vladimirze/Gimme-JSON-backend | 37fba97fcc99a2e631606f97fc48e6c280775ed0 | [
"MIT"
] | null | null | null | app/endpoint/routes.py | vladimirze/Gimme-JSON-backend | 37fba97fcc99a2e631606f97fc48e6c280775ed0 | [
"MIT"
] | null | null | null | app/endpoint/routes.py | vladimirze/Gimme-JSON-backend | 37fba97fcc99a2e631606f97fc48e6c280775ed0 | [
"MIT"
] | null | null | null | from app.endpoint import api
from flask import Blueprint
blueprint = Blueprint('endpoint', __name__)
blueprint.add_url_rule('/endpoint/', view_func=api.EndpointCollection.as_view('endpoint_collection'))
blueprint.add_url_rule('/endpoint/<string:endpoint_id>/', view_func=api.EndpointEntity.as_view('endpoint_entity'))
| 35.666667 | 114 | 0.813084 |
4675602a002364d12f00ae1279376f7a3d612348 | 783 | py | Python | runway/_cli/commands/_takeoff.py | avosper-intellaegis/runway | 757d4e7db269ec16479b044ac82a69f25fa2a450 | [
"Apache-2.0"
] | 134 | 2018-02-26T21:35:23.000Z | 2022-03-03T00:30:27.000Z | runway/_cli/commands/_takeoff.py | asksmruti/runway | 8aca76df9372e3d13eb35e12f81758f618e89e74 | [
"Apache-2.0"
] | 937 | 2018-03-08T22:04:35.000Z | 2022-03-30T12:21:47.000Z | runway/_cli/commands/_takeoff.py | asksmruti/runway | 8aca76df9372e3d13eb35e12f81758f618e89e74 | [
"Apache-2.0"
] | 70 | 2018-02-26T23:48:11.000Z | 2022-03-02T18:44:30.000Z | """``runway takeoff`` command."""
# docs: file://./../../../docs/source/commands.rst
import logging
from typing import TYPE_CHECKING, Any, cast
import click
from .. import options
from ._deploy import deploy
if TYPE_CHECKING:
from ..._logging import RunwayLogger
LOGGER = cast("RunwayLogger", logging.getLogger(__name__.replace("._", ".")))
@click.command("takeoff", short_help="alias of deploy")
@options.ci
@options.debug
@options.deploy_environment
@options.no_color
@options.tags
@options.verbose
@click.pass_context
def takeoff(ctx: click.Context, **kwargs: Any) -> None:
"""Alias of "runway deploy".
For more information, refer to the output of "runway deploy --help".
"""
LOGGER.verbose("forwarding to deploy...")
ctx.forward(deploy, **kwargs)
| 23.727273 | 77 | 0.708812 |
aee45c5b8bb1b7ec425d184d701e7be58af48bc4 | 4,445 | py | Python | nanopb_helpers/__init__.py | wheeler-microfluidics/nanopb-helpers | 29d2e265905005113817303d8da9af0a5efb7110 | [
"BSD-3-Clause"
] | null | null | null | nanopb_helpers/__init__.py | wheeler-microfluidics/nanopb-helpers | 29d2e265905005113817303d8da9af0a5efb7110 | [
"BSD-3-Clause"
] | null | null | null | nanopb_helpers/__init__.py | wheeler-microfluidics/nanopb-helpers | 29d2e265905005113817303d8da9af0a5efb7110 | [
"BSD-3-Clause"
] | null | null | null | '''
Provide an API for cross-platform compiling Protocol Buffer definitions for the
following targets:
- `nanopb` C
- Google C++
- Python
__NB__ The compilation is performed using bundled [`nanopb`][1]
[binary distributions][2].
`nanopb` is Copyright (c) 2011 Petteri Aimonen <jpa at nanopb.mail.kapsi.fi>
See [license][3] for more info.
[1]: http://koti.kapsi.fi/~jpa/nanopb
[2]: http://koti.kapsi.fi/~jpa/nanopb/download/
[3]: https://code.google.com/p/nanopb/source/browse/LICENSE.txt
'''
from __future__ import absolute_import
import os
import platform
import sys
import tempfile
from path_helpers import path
from subprocess import check_call
#: .. versionadded:: 0.8
import conda_helpers as ch
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def get_base_path():
return path(__file__).parent.abspath()
def package_path():
return path(__file__).parent
def get_lib_directory():
'''
Return directory containing the Arduino library headers.
'''
return package_path().joinpath('Arduino', 'library')
def get_exe_postfix():
'''
Return the file extension for executable files.
'''
if platform.system() in ('Linux', 'Darwin'):
return ''
elif platform.system() == 'Windows':
return '.exe'
raise 'Unsupported platform: %s' % platform.system()
def get_script_postfix():
'''
Return the file extension for executable files.
'''
if platform.system() in ('Linux', 'Darwin'):
return ''
elif platform.system() == 'Windows':
return '.bat'
raise 'Unsupported platform: %s' % platform.system()
def get_nanopb_root():
'''
.. versionchanged:: 0.8
Use :func:`conda_helpers.conda_prefix` function.
'''
if platform.system() in ('Linux', 'Darwin'):
return ch.conda_prefix().joinpath('include', 'Arduino', 'nanopb')
elif platform.system() == 'Windows':
return ch.conda_prefix().joinpath('Library', 'include', 'Arduino',
'nanopb')
raise 'Unsupported platform: %s' % platform.system()
def get_sources():
return get_nanopb_root().files('*.c*')
def get_includes():
return [get_base_path().joinpath('include')]
def compile_nanopb(proto_path, options_file=None):
'''
Compile specified Protocol Buffer file to `Nanopb
<https://code.google.com/p/nanopb>`_ "plain-``C``" code.
.. versionchanged:: 0.9.2
Fix Python 3 unicode support. Use :meth:`path_helpers.path.text`
method instead of :meth:`path_helpers.path.bytes` method.
'''
proto_path = path(proto_path)
tempdir = path(tempfile.mkdtemp(prefix='nanopb'))
cwd = os.getcwd()
try:
os.chdir(tempdir)
protoc = 'protoc' + get_exe_postfix()
check_call([protoc, '-I%s' % proto_path.parent, proto_path,
'-o%s' % (tempdir.joinpath(proto_path.namebase + '.pb'))])
nanopb_gen_cmd = [sys.executable, '-m', 'nanopb_generator',
tempdir.joinpath(proto_path.namebase + '.pb')]
if options_file is not None:
nanopb_gen_cmd += ['-f%s' % options_file]
check_call(nanopb_gen_cmd)
header = tempdir.files('*.h')[0].text()
source = tempdir.files('*.c')[0].text()
source = source.replace(proto_path.namebase + '.pb.h',
'{{ header_path }}')
finally:
os.chdir(cwd)
tempdir.rmtree()
return {'header': header, 'source': source}
def compile_pb(proto_path):
'''
Compile specified Protocol Buffer file to Google `Protocol Buffers
<https://code.google.com/p/protobuf>`_ `C++` and Python code.
.. versionchanged:: 0.9.2
Fix Python 3 unicode support. Use :meth:`path_helpers.path.text`
method instead of :meth:`path_helpers.path.bytes` method.
'''
proto_path = path(proto_path)
tempdir = path(tempfile.mkdtemp(prefix='nanopb'))
result = {}
try:
protoc = 'protoc' + get_exe_postfix()
check_call([protoc, '-I%s' % proto_path.parent, proto_path,
'--python_out=%s' % tempdir, '--cpp_out=%s' % tempdir])
result['python'] = tempdir.files('*.py')[0].text()
result['cpp'] = {'header': tempdir.files('*.h*')[0].text(),
'source': tempdir.files('*.c*')[0].text()}
finally:
tempdir.rmtree()
return result
| 29.832215 | 79 | 0.624297 |
3c29fededf16df0784cfa8c3c23fa318126655ec | 1,067 | bzl | Python | tools/workspace/raspberrypi-firmware/repository.bzl | kabrezi/pi3hat | 583595c5895240a5dc2b75f5981aef6d7671f94b | [
"Apache-2.0"
] | 64 | 2017-01-18T15:12:05.000Z | 2022-02-16T08:28:11.000Z | tools/workspace/raspberrypi-firmware/repository.bzl | kabrezi/pi3hat | 583595c5895240a5dc2b75f5981aef6d7671f94b | [
"Apache-2.0"
] | 7 | 2021-02-18T14:51:37.000Z | 2022-03-28T20:58:56.000Z | tools/workspace/raspberrypi-firmware/repository.bzl | kabrezi/pi3hat | 583595c5895240a5dc2b75f5981aef6d7671f94b | [
"Apache-2.0"
] | 14 | 2021-01-11T09:48:34.000Z | 2021-12-16T16:20:35.000Z | # -*- python -*-
# Copyright 2018 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def raspberrypi_firmware_repository(name):
http_archive(
name = name,
url = "https://github.com/raspberrypi/firmware/archive/1.20200601.tar.gz",
sha256 = "d826cdfdcf5931b5ccdcf89b206a83983bea8c94ec349552eeccdd20666430c0",
strip_prefix = "firmware-1.20200601",
build_file = Label("//tools/workspace/raspberrypi-firmware:package.BUILD"),
)
| 38.107143 | 84 | 0.733833 |
9ab649cab2ed92f18bdeaa17a0d65eea86ae8d23 | 2,157 | py | Python | tools/giws/datatypes/shortDataGiws.py | sguazt/dcsxx-testbed | e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0 | [
"Apache-2.0"
] | null | null | null | tools/giws/datatypes/shortDataGiws.py | sguazt/dcsxx-testbed | e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0 | [
"Apache-2.0"
] | null | null | null | tools/giws/datatypes/shortDataGiws.py | sguazt/dcsxx-testbed | e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python -u
# Copyright or Copr. INRIA/Scilab - Sylvestre LEDRU
#
# Sylvestre LEDRU - <sylvestre.ledru@inria.fr> <sylvestre@ledru.info>
#
# This software is a computer program whose purpose is to generate C++ wrapper
# for Java objects/methods.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
# For more information, see the file COPYING
from dataGiws import dataGiws
class shortDataGiws(dataGiws):
type="jshort"
nativeType="short"
callMethod="CallShortMethod"
callStaticMethod="CallStaticShortMethod"
def getTypeSignature(self):
return "S"
def getRealJavaType(self):
return "short"
def getDescription(self):
return "signed 16 bits"
if __name__ == '__main__':
print shortDataGiws().getReturnTypeSyntax()
| 37.189655 | 79 | 0.762633 |
c2ec19ba32936e2522af8fb83460894b6a209da3 | 2,030 | py | Python | packages/syft/src/syft/proto/core/pointer/pointer_pb2.py | vishalbelsare/PySyft | fb04404fcfbef82fad1fb47407b35a24e9afb599 | [
"Apache-1.1"
] | 8,428 | 2017-08-10T09:17:49.000Z | 2022-03-31T08:20:14.000Z | packages/syft/src/syft/proto/core/pointer/pointer_pb2.py | vishalbelsare/PySyft | fb04404fcfbef82fad1fb47407b35a24e9afb599 | [
"Apache-1.1"
] | 4,779 | 2017-08-09T23:19:00.000Z | 2022-03-29T11:49:36.000Z | packages/syft/src/syft/proto/core/pointer/pointer_pb2.py | vishalbelsare/PySyft | fb04404fcfbef82fad1fb47407b35a24e9afb599 | [
"Apache-1.1"
] | 2,307 | 2017-08-10T08:52:12.000Z | 2022-03-30T05:36:07.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/pointer/pointer.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
from syft.proto.core.io import address_pb2 as proto_dot_core_dot_io_dot_address__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n proto/core/pointer/pointer.proto\x12\x11syft.core.pointer\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"\x97\x02\n\x07Pointer\x12"\n\x1apoints_to_object_with_path\x18\x01 \x01(\t\x12\x14\n\x0cpointer_name\x18\x02 \x01(\t\x12-\n\x0eid_at_location\x18\x03 \x01(\x0b\x32\x15.syft.core.common.UID\x12\'\n\x08location\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0c\n\x04tags\x18\x05 \x03(\t\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12\x13\n\x0bobject_type\x18\x07 \x01(\t\x12\x16\n\x0e\x61ttribute_name\x18\x08 \x01(\t\x12\x19\n\x0cpublic_shape\x18\t \x01(\x0cH\x00\x88\x01\x01\x42\x0f\n\r_public_shapeb\x06proto3'
)
_POINTER = DESCRIPTOR.message_types_by_name["Pointer"]
Pointer = _reflection.GeneratedProtocolMessageType(
"Pointer",
(_message.Message,),
{
"DESCRIPTOR": _POINTER,
"__module__": "proto.core.pointer.pointer_pb2"
# @@protoc_insertion_point(class_scope:syft.core.pointer.Pointer)
},
)
_sym_db.RegisterMessage(Pointer)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_POINTER._serialized_start = 124
_POINTER._serialized_end = 403
# @@protoc_insertion_point(module_scope)
| 44.130435 | 664 | 0.780296 |
ca34a91d75261d0721fde818ae2320a6d75abc38 | 755 | py | Python | src/third_party/skia/gn/dehydrate_sksl.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 54 | 2016-04-05T17:45:19.000Z | 2022-01-31T06:27:33.000Z | src/third_party/skia/gn/dehydrate_sksl.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 25 | 2016-03-18T04:01:06.000Z | 2020-06-27T15:39:35.000Z | src/third_party/skia/gn/dehydrate_sksl.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 50 | 2016-03-03T20:31:58.000Z | 2022-03-31T18:26:13.000Z | #!/usr/bin/env python
#
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
skslc = sys.argv[1]
targetDir = sys.argv[2]
includes = sys.argv[3:]
for inc in includes:
print("Recompiling " + inc + "...")
try:
noExt, _ = os.path.splitext(inc)
head, tail = os.path.split(noExt)
if not os.path.isdir(targetDir):
os.mkdir(targetDir)
target = os.path.join(targetDir, tail)
subprocess.check_output([skslc, inc, target + ".dehydrated.sksl"])
except subprocess.CalledProcessError as err:
print("### Error compiling " + inc + ":")
print(err.output)
exit(1)
| 26.034483 | 74 | 0.633113 |
155f50966c56421999b6e02a9f1ff4fece286e96 | 3,836 | py | Python | flopy/mt3d/mttob.py | codacy-badger/flopy | de874b02661f59ef4e99f18272883a13a4d55f16 | [
"CC0-1.0",
"BSD-3-Clause"
] | 1 | 2022-03-30T14:48:22.000Z | 2022-03-30T14:48:22.000Z | flopy/mt3d/mttob.py | codacy-badger/flopy | de874b02661f59ef4e99f18272883a13a4d55f16 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | flopy/mt3d/mttob.py | codacy-badger/flopy | de874b02661f59ef4e99f18272883a13a4d55f16 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | from ..pakbase import Package
class Mt3dTob(Package):
"""
Transport Observation package class
"""
def __init__(self, model, outnam='tob_output', CScale=1.0, FluxGroups=[],
FScale=1.0, iOutFlux=0, extension='tob', unitnumber=None,
filenames=None):
if unitnumber is None:
unitnumber = Mt3dTob.defaultunit()
elif unitnumber == 0:
unitnumber = Mt3dTob.reservedunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [Mt3dTob.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# TOB for MT3DMS, generated by Flopy.'
self.outnam = outnam
self.CScale = CScale
self.FluxGroups = FluxGroups
self.FScale = FScale
self.iOutFlux = iOutFlux
self.parent.add_package(self)
return
def __repr__(self):
return 'Transport Observation package class'
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
# Open file for writing
f_tob = open(self.fn_path, 'w')
f_tob.write('%s\n' % (self.heading))
MaxConcObs = 0
MaxFluxObs = 0
MaxFluxCells = 0
inConcObs = 0
inFluxObs = 88
inSaveObs = 89
if (inFluxObs):
for FluxGroup in self.FluxGroups:
MaxFluxCells = MaxFluxCells + len(FluxGroup[1])
MaxFluxObs = MaxFluxObs + 1
f_tob.write('%10d%10d%10d\n' % (MaxConcObs, MaxFluxObs, MaxFluxCells))
f_tob.write('%s%10d%10d%10d\n' % (self.outnam, inConcObs, inFluxObs,
inSaveObs))
# if (inConcObs):
#
if (inFluxObs):
nFluxGroup = len(self.FluxGroups)
f_tob.write('%10d%10f%10d\n' % (nFluxGroup, self.FScale,
self.iOutFlux))
for FluxGroup in self.FluxGroups:
nFluxTimeObs, FluxTimeObs = (
self.assign_layer_row_column_data(FluxGroup[0], 5,
zerobase=False)) # misuse of function - zerobase set to False
nCells, Cells = self.assign_layer_row_column_data(FluxGroup[1],
4,
zerobase=False) # misuse of function - zerobase set to False
nCells = 4
iSSType = FluxGroup[2]
f_tob.write('%10d%10d%10d\n' % (nFluxTimeObs, nCells, iSSType))
for fto in FluxTimeObs:
fto = fto[0] # Still to fix this!
f_tob.write('%12s%10s%10s%10s%10s\n' % (fto[0], fto[1],
fto[2], fto[3],
fto[4]))
for c in Cells:
c = c[0] # Still to fix this!
f_tob.write('%10d%10d%10d%10f\n' % (c[0], c[1], c[2],
c[3]))
f_tob.close()
return
@staticmethod
def ftype():
return 'TOB'
@staticmethod
def defaultunit():
return 37
@staticmethod
def reservedunit():
return 12
| 33.946903 | 127 | 0.48488 |
17189708d5c5782c3a0eda6bfba8378af7f73c84 | 384 | py | Python | project_plantware/warehouse/migrations/0008_auto_20200629_2114.py | naiem2525/plantware | 5d72989780ff39b59949dde649052d9d01729c86 | [
"bzip2-1.0.6"
] | null | null | null | project_plantware/warehouse/migrations/0008_auto_20200629_2114.py | naiem2525/plantware | 5d72989780ff39b59949dde649052d9d01729c86 | [
"bzip2-1.0.6"
] | null | null | null | project_plantware/warehouse/migrations/0008_auto_20200629_2114.py | naiem2525/plantware | 5d72989780ff39b59949dde649052d9d01729c86 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-29 15:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('warehouse', '0007_auto_20200628_1420'),
]
operations = [
migrations.AlterField(
model_name='order',
name='date_ordered',
field=models.DateTimeField(),
),
]
| 20.210526 | 49 | 0.598958 |
82768baec8bb33e473288b5e757951c756c0f198 | 1,374 | py | Python | setup.py | briang1/django-filebrowser-no-grappelli | 61363cb37c03d3f98008b55691ed4c423c416c4a | [
"BSD-3-Clause"
] | null | null | null | setup.py | briang1/django-filebrowser-no-grappelli | 61363cb37c03d3f98008b55691ed4c423c416c4a | [
"BSD-3-Clause"
] | null | null | null | setup.py | briang1/django-filebrowser-no-grappelli | 61363cb37c03d3f98008b55691ed4c423c416c4a | [
"BSD-3-Clause"
] | 1 | 2019-12-03T06:17:59.000Z | 2019-12-03T06:17:59.000Z | import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-filebrowser-no-grappelli',
version='3.7.8',
description='Media-Management no Grappelli',
long_description=read('README.rst'),
url='https://github.com/smacker/django-filebrowser-no-grappelli',
download_url='',
author='Patrick Kranzlmueller, Axel Swoboda (vonautomatisch)',
author_email='office@vonautomatisch.at',
maintainer='Maxim Sukharev',
maintainer_email='max@smacker.ru',
license='BSD',
packages=find_packages(exclude=['tests']),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
zip_safe=False,
)
| 34.35 | 70 | 0.640466 |
e662d65002493925065b9f75a08ec56fc051bf3c | 8,134 | py | Python | source/lambda/jobresultprocessor/lambda_function.py | jfathi/document-understanding-solution | 81da1cce082b8f814ee09825bff4984086f67cab | [
"Apache-2.0"
] | 138 | 2020-04-01T02:15:14.000Z | 2022-03-29T17:54:27.000Z | source/lambda/jobresultprocessor/lambda_function.py | jfathi/document-understanding-solution | 81da1cce082b8f814ee09825bff4984086f67cab | [
"Apache-2.0"
] | 59 | 2020-05-04T15:39:34.000Z | 2022-02-22T17:54:53.000Z | source/lambda/jobresultprocessor/lambda_function.py | jfathi/document-understanding-solution | 81da1cce082b8f814ee09825bff4984086f67cab | [
"Apache-2.0"
] | 68 | 2020-04-02T01:42:53.000Z | 2022-03-11T17:37:36.000Z |
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the License). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
#####################################################################################################################
import json
import os
import boto3
import time
from helper import AwsHelper
from og import OutputGenerator, KVPAIRS, DOCTEXT ,SERVICE_OUTPUT_PATH_S3_PREFIX,COMPREHEND_PATH_S3_PREFIX,TEXTRACT_PATH_S3_PREFIX,PUBLIC_PATH_S3_PREFIX
import datastore
from comprehendHelper import ComprehendHelper
from kendraHelper import KendraHelper
def generatePdf(documentId, bucketName, objectName, responseBucketName,outputPath):
responseDocumentName = "{}{}response.json".format(outputPath,TEXTRACT_PATH_S3_PREFIX)
fileName = os.path.basename(objectName).split(".")[0]
outputDocumentName = "{}{}-searchable.pdf".format(outputPath, fileName)
data = {}
data["bucketName"] = bucketName
data["documentName"] = objectName
data["responseBucketName"] = responseBucketName
data["responseDocumentName"] = responseDocumentName
data["outputBucketName"] = responseBucketName
data["outputDocumentName"] = outputDocumentName
client = boto3.client('lambda')
response = client.invoke(
FunctionName=os.environ['PDF_LAMBDA'],
InvocationType='RequestResponse',
LogType='Tail',
Payload=json.dumps(data)
)
def getJobResults(api, jobId):
pages = []
client = AwsHelper().getClient('textract')
response = client.get_document_analysis(JobId=jobId)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
print("Next token: {}".format(nextToken))
while(nextToken):
try:
if(api == "StartDocumentTextDetection"):
response = client.get_document_text_detection(JobId=jobId, NextToken=nextToken)
else:
response = client.get_document_analysis(JobId=jobId, NextToken=nextToken)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
print("Next token: {}".format(nextToken))
except Exception as e:
if(e.__class__.__name__ == 'ProvisionedThroughputExceededException'):
print("ProvisionedThroughputExceededException.")
print("Waiting for few seconds...")
time.sleep(5)
print("Waking up...")
return pages
def processRequest(request):
output = ""
print("Request : {}".format(request))
jobId = request['jobId']
documentId = request['jobTag']
jobStatus = request['jobStatus']
jobAPI = request['jobAPI']
bucketName = request['bucketName']
outputBucketName = request['outputBucketName']
objectName = request['objectName']
outputTable = request["outputTable"]
documentsTable = request["documentsTable"]
elasticsearchDomain = request["elasticsearchDomain"]
pages = getJobResults(jobAPI, jobId)
print("Result pages recieved: {}".format(len(pages)))
dynamodb = AwsHelper().getResource("dynamodb")
ddb = dynamodb.Table(outputTable)
detectForms = False
detectTables = False
if(jobAPI == "StartDocumentAnalysis"):
detectForms = True
detectTables = True
dynamodb = AwsHelper().getResource('dynamodb')
ddb = dynamodb.Table(outputTable)
outputPath = '{}{}/{}'.format(PUBLIC_PATH_S3_PREFIX,documentId,SERVICE_OUTPUT_PATH_S3_PREFIX)
print("Generating output for DocumentId: {} and storing in {}".format(documentId,outputPath))
opg = OutputGenerator(documentId, pages, outputBucketName, objectName, detectForms, detectTables, ddb,outputPath, elasticsearchDomain)
opg_output = opg.run()
generatePdf(documentId, bucketName, objectName, outputBucketName,outputPath)
# generate Comprehend and ComprehendMedical entities
comprehendOutputPath = "{}{}".format(outputPath,COMPREHEND_PATH_S3_PREFIX)
print("Comprehend output path: " + comprehendOutputPath)
maxPages = 100
comprehendClient = ComprehendHelper()
responseDocumentName = "{}{}response.json".format(outputPath,TEXTRACT_PATH_S3_PREFIX)
comprehendAndMedicalEntities = comprehendClient.processComprehend(outputBucketName, responseDocumentName, comprehendOutputPath, maxPages)
# if Kendra is available then let it index the document
if 'KENDRA_INDEX_ID' in os.environ:
kendraClient = KendraHelper()
fileName = os.path.basename(objectName).split(".")[0]
fileExtension = os.path.basename(objectName).split(".")[1]
outputDocumentName = "{}{}-searchable.pdf".format(outputPath, fileName)
kendraClient.indexDocument(os.environ['KENDRA_INDEX_ID'],
os.environ['KENDRA_ROLE_ARN'],
bucketName,
outputDocumentName,
documentId,
fileExtension)
print("DocumentId: {}".format(documentId))
print("Processed Comprehend data: {}".format(comprehendAndMedicalEntities))
# index document once the comprehend entities and KVPairs have been extracted
for key, val in opg_output[KVPAIRS].items():
if key not in comprehendAndMedicalEntities:
comprehendAndMedicalEntities[key] = val
else:
comprehendAndMedicalEntities[key].add(val)
opg.indexDocument(opg_output[DOCTEXT], comprehendAndMedicalEntities)
ds = datastore.DocumentStore(documentsTable, outputTable)
ds.markDocumentComplete(documentId)
output = "Processed -> Document: {}, Object: {}/{} processed.".format(documentId, bucketName, objectName)
return {
'statusCode': 200,
'body': output
}
def lambda_handler(event, context):
print("event: {}".format(event))
body = json.loads(event['Records'][0]['body'])
message = json.loads(body['Message'])
print("Message: {}".format(message))
request = {}
request["jobId"] = message['JobId']
request["jobTag"] = message['JobTag']
request["jobStatus"] = message['Status']
request["jobAPI"] = message['API']
request["bucketName"] = message['DocumentLocation']['S3Bucket']
request["objectName"] = message['DocumentLocation']['S3ObjectName']
request["outputBucketName"] = os.environ['OUTPUT_BUCKET']
request["elasticsearchDomain"] = os.environ['ES_DOMAIN']
request["outputTable"] = os.environ['OUTPUT_TABLE']
request["documentsTable"] = os.environ['DOCUMENTS_TABLE']
return processRequest(request)
def lambda_handler_local(event, context):
print("Event: {}".format(event))
return processRequest(event)
| 41.28934 | 151 | 0.610032 |
24bdee255f7dafab2868373a0c344ea5439b3c16 | 2,637 | py | Python | vmtkScripts/vmtksurfacebooleanoperation.py | Clemson-MSE/vmtk | de358d4316ed441e20a39d46128a6a6c47af20cb | [
"Apache-2.0"
] | null | null | null | vmtkScripts/vmtksurfacebooleanoperation.py | Clemson-MSE/vmtk | de358d4316ed441e20a39d46128a6a6c47af20cb | [
"Apache-2.0"
] | null | null | null | vmtkScripts/vmtksurfacebooleanoperation.py | Clemson-MSE/vmtk | de358d4316ed441e20a39d46128a6a6c47af20cb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfacebooleanOperation.py,v $
## Language: Python
## Date: $Date: 2005/09/14 09:49:59 $
## Version: $Revision: 1.7 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
from vmtk import vtkvmtk
import sys
from vmtk import pypes
class vmtkSurfaceBooleanOperation(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.Surface2 = None
self.Tolerance = 1E-6
self.Operation = 'union'
self.SetScriptName('vmtksurfacebooleanoperation')
self.SetScriptDoc('perform a boolean operation between two surfaces')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['Surface2','i2','vtkPolyData',1,'','the second input surface','vmtksurfacereader'],
['Tolerance','tolerance','float',1,'(0.0,)','tolerance for considering two points coincident'],
['Operation','operation','str',1,'["union","intersection","difference"]','the boolean operation to be performed']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter']
])
def Execute(self):
if self.Surface == None:
self.PrintError('Error: No Surface.')
if self.Surface2 == None:
self.PrintError('Error: No Surface2.')
booleanOperationFilter = vtk.vtkBooleanOperationPolyDataFilter()
booleanOperationFilter.SetInputData(0,self.Surface)
booleanOperationFilter.SetInputData(1,self.Surface2)
if self.Operation == 'union':
booleanOperationFilter.SetOperationToUnion()
elif self.Operation == 'intersection':
booleanOperationFilter.SetOperationToIntersection()
elif self.Operation == 'difference':
booleanOperationFilter.SetOperationToDifference()
booleanOperationFilter.SetTolerance(self.Tolerance)
booleanOperationFilter.Update()
self.Surface = booleanOperationFilter.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 35.16 | 125 | 0.667425 |
52defc9936ac19817e42359fb246e0a12b31e825 | 600 | py | Python | hazedumper/__init__.py | cxldxice/hazedumper | 2c5aea17165cc931d8b027671150c01eb13abc4e | [
"MIT"
] | null | null | null | hazedumper/__init__.py | cxldxice/hazedumper | 2c5aea17165cc931d8b027671150c01eb13abc4e | [
"MIT"
] | null | null | null | hazedumper/__init__.py | cxldxice/hazedumper | 2c5aea17165cc931d8b027671150c01eb13abc4e | [
"MIT"
] | null | null | null | #name: hazedumper
#desc: pkg for auto update offsets
#source: https://github.com/frk1/hazedumper
#author: by @fxcvd
import requests
from .default import *
HAZEDUMPER_URL = "https://raw.githubusercontent.com/frk1/hazedumper/master/csgo.min.json"
LoadError = Exception("LoadError")
def load():
res = requests.get(HAZEDUMPER_URL)
if res.status_code != 200:
raise LoadError
json = res.json()
offsets = dict(json["signatures"].items() | json["netvars"].items())
for offset in offsets:
globals()[offset] = offsets[offset]
if __name__ != "__main__":
load() | 20.689655 | 89 | 0.683333 |
5c9842a8435c4046002497b359c6b53001afe32c | 71,311 | py | Python | src/sage/rings/multi_power_series_ring_element.py | ChamanAgrawal/sage | 5f6d56ba247b352d7d46442e88fa3a027e9f222d | [
"BSL-1.0"
] | 2 | 2019-06-02T03:16:47.000Z | 2019-06-15T10:17:19.000Z | src/sage/rings/multi_power_series_ring_element.py | ChamanAgrawal/sage | 5f6d56ba247b352d7d46442e88fa3a027e9f222d | [
"BSL-1.0"
] | null | null | null | src/sage/rings/multi_power_series_ring_element.py | ChamanAgrawal/sage | 5f6d56ba247b352d7d46442e88fa3a027e9f222d | [
"BSL-1.0"
] | 1 | 2019-06-02T03:16:55.000Z | 2019-06-02T03:16:55.000Z | r"""
Multivariate Power Series
Construct and manipulate multivariate power series (in finitely many
variables) over a given commutative ring. Multivariate power series
are implemented with total-degree precision.
EXAMPLES:
Power series arithmetic, tracking precision::
sage: R.<s,t> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in s, t over Integer Ring
sage: f = 1 + s + 3*s^2; f
1 + s + 3*s^2
sage: g = t^2*s + 3*t^2*s^2 + R.O(5); g
s*t^2 + 3*s^2*t^2 + O(s, t)^5
sage: g = t^2*s + 3*t^2*s^2 + O(s, t)^5; g
s*t^2 + 3*s^2*t^2 + O(s, t)^5
sage: f = f.O(7); f
1 + s + 3*s^2 + O(s, t)^7
sage: f += s; f
1 + 2*s + 3*s^2 + O(s, t)^7
sage: f*g
s*t^2 + 5*s^2*t^2 + O(s, t)^5
sage: (f-1)*g
2*s^2*t^2 + 9*s^3*t^2 + O(s, t)^6
sage: f*g - g
2*s^2*t^2 + O(s, t)^5
sage: f*=s; f
s + 2*s^2 + 3*s^3 + O(s, t)^8
sage: f%2
s + s^3 + O(s, t)^8
sage: (f%2).parent()
Multivariate Power Series Ring in s, t over Ring of integers modulo 2
As with univariate power series, comparison of `f` and `g` is
done up to the minimum precision of `f` and `g`::
sage: f = 1 + t + s + s*t + R.O(3); f
1 + s + t + s*t + O(s, t)^3
sage: g = s^2 + 2*s^4 - s^5 + s^2*t^3 + R.O(6); g
s^2 + 2*s^4 - s^5 + s^2*t^3 + O(s, t)^6
sage: f == g
False
sage: g == g.add_bigoh(3)
True
sage: f < g
False
sage: f > g
True
Calling::
sage: f = s^2 + s*t + s^3 + s^2*t + 3*s^4 + 3*s^3*t + R.O(5); f
s^2 + s*t + s^3 + s^2*t + 3*s^4 + 3*s^3*t + O(s, t)^5
sage: f(t,s)
s*t + t^2 + s*t^2 + t^3 + 3*s*t^3 + 3*t^4 + O(s, t)^5
sage: f(t^2,s^2)
s^2*t^2 + t^4 + s^2*t^4 + t^6 + 3*s^2*t^6 + 3*t^8 + O(s, t)^10
Substitution is defined only for elements of positive valuation, unless `f`
has infinite precision::
sage: f(t^2,s^2+1)
Traceback (most recent call last):
...
TypeError: Substitution defined only for elements of positive valuation,
unless self has infinite precision.
sage: g = f.truncate()
sage: g(t^2,s^2+1)
t^2 + s^2*t^2 + 2*t^4 + s^2*t^4 + 4*t^6 + 3*s^2*t^6 + 3*t^8
sage: g(t^2,(s^2+1).O(3))
t^2 + s^2*t^2 + 2*t^4 + O(s, t)^5
0 has valuation ``+Infinity``::
sage: f(t^2,0)
t^4 + t^6 + 3*t^8 + O(s, t)^10
sage: f(t^2,s^2+s)
s*t^2 + s^2*t^2 + t^4 + O(s, t)^5
Substitution of power series with finite precision works too::
sage: f(s.O(2),t)
s^2 + s*t + O(s, t)^3
sage: f(f,f)
2*s^4 + 4*s^3*t + 2*s^2*t^2 + 4*s^5 + 8*s^4*t + 4*s^3*t^2 + 16*s^6 +
34*s^5*t + 20*s^4*t^2 + 2*s^3*t^3 + O(s, t)^7
sage: t(f,f)
s^2 + s*t + s^3 + s^2*t + 3*s^4 + 3*s^3*t + O(s, t)^5
sage: t(0,f) == s(f,0)
True
The ``subs`` syntax works as expected::
sage: r0 = -t^2 - s*t^3 - 2*t^6 + s^7 + s^5*t^2 + R.O(10)
sage: r1 = s^4 - s*t^4 + s^6*t - 4*s^2*t^5 - 6*s^3*t^5 + R.O(10)
sage: r2 = 2*s^3*t^2 - 2*s*t^4 - 2*s^3*t^4 + s*t^7 + R.O(10)
sage: r0.subs({t:r2,s:r1})
-4*s^6*t^4 + 8*s^4*t^6 - 4*s^2*t^8 + 8*s^6*t^6 - 8*s^4*t^8 - 4*s^4*t^9
+ 4*s^2*t^11 - 4*s^6*t^8 + O(s, t)^15
sage: r0.subs({t:r2,s:r1}) == r0(r1,r2)
True
Construct ring homomorphisms from one power series ring to another::
sage: A.<a,b> = PowerSeriesRing(QQ)
sage: X.<x,y> = PowerSeriesRing(QQ)
sage: phi = Hom(A,X)([x,2*y]); phi
Ring morphism:
From: Multivariate Power Series Ring in a, b over Rational Field
To: Multivariate Power Series Ring in x, y over Rational Field
Defn: a |--> x
b |--> 2*y
sage: phi(a+b+3*a*b^2 + A.O(5))
x + 2*y + 12*x*y^2 + O(x, y)^5
Multiplicative inversion of power series::
sage: h = 1 + s + t + s*t + s^2*t^2 + 3*s^4 + 3*s^3*t + R.O(5)
sage: k = h^-1; k
1 - s - t + s^2 + s*t + t^2 - s^3 - s^2*t - s*t^2 - t^3 - 2*s^4 -
2*s^3*t + s*t^3 + t^4 + O(s, t)^5
sage: h*k
1 + O(s, t)^5
sage: f = 1 - 5*s^29 - 5*s^28*t + 4*s^18*t^35 + \
....: 4*s^17*t^36 - s^45*t^25 - s^44*t^26 + s^7*t^83 + \
....: s^6*t^84 + R.O(101)
sage: h = ~f; h
1 + 5*s^29 + 5*s^28*t - 4*s^18*t^35 - 4*s^17*t^36 + 25*s^58 + 50*s^57*t
+ 25*s^56*t^2 + s^45*t^25 + s^44*t^26 - 40*s^47*t^35 - 80*s^46*t^36
- 40*s^45*t^37 + 125*s^87 + 375*s^86*t + 375*s^85*t^2 + 125*s^84*t^3
- s^7*t^83 - s^6*t^84 + 10*s^74*t^25 + 20*s^73*t^26 + 10*s^72*t^27
+ O(s, t)^101
sage: h*f
1 + O(s, t)^101
AUTHORS:
- Niles Johnson (07/2010): initial code
- Simon King (08/2012): Use category and coercion framework, :trac:`13412`
"""
# ****************************************************************************
# Copyright (C) 2010 Niles Johnson <nilesj@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from six import iteritems, integer_types
from sage.structure.richcmp import richcmp
from sage.rings.finite_rings.integer_mod_ring import Zmod
from sage.rings.infinity import infinity, is_Infinite
from sage.rings.integer import Integer
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.power_series_ring import is_PowerSeriesRing
from sage.rings.power_series_ring_element import PowerSeries
def is_MPowerSeries(f):
"""
Return ``True`` if ``f`` is a multivariate power series.
TESTS::
sage: from sage.rings.power_series_ring_element import is_PowerSeries
sage: from sage.rings.multi_power_series_ring_element import is_MPowerSeries
sage: M = PowerSeriesRing(ZZ,4,'v')
sage: is_PowerSeries(M.random_element(10))
True
sage: is_MPowerSeries(M.random_element(10))
True
sage: T.<v> = PowerSeriesRing(RR)
sage: is_MPowerSeries(1 - v + v^2 +O(v^3))
False
sage: is_PowerSeries(1 - v + v^2 +O(v^3))
True
"""
return isinstance(f, MPowerSeries)
class MPowerSeries(PowerSeries):
### methods from PowerSeries that we *don't* override:
#
# __hash__ : works just fine
#
# __reduce__ : don't really understand this
#
# is_sparse : works just fine
#
# is_dense : works just fine
#
# is_gen : works just fine
#
# base_extend : works just fine
#
# change_ring : works just fine
#
# _cmp_ : don't understand this
#
# __copy__ : works just fine
#
# base_ring : works just fine
#
# common_prec : works just fine
#
# common_prec_c : seems fine
#
# _mul_prec : works just fine
#
# __bool__ : works just fine
#
"""
Multivariate power series; these are the elements of Multivariate Power
Series Rings.
INPUT:
- ``parent`` -- A multivariate power series.
- ``x`` -- The element (default: 0). This can be another
:class:`MPowerSeries` object, or an element of one of the following:
- the background univariate power series ring
- the foreground polynomial ring
- a ring that coerces to one of the above two
- ``prec`` -- (default: ``infinity``) The precision
- ``is_gen`` -- (default: ``False``) Is this element one of the generators?
- ``check`` -- (default: ``False``) Needed by univariate power series class
EXAMPLES:
Construct multivariate power series from generators::
sage: S.<s,t> = PowerSeriesRing(ZZ)
sage: f = s + 4*t + 3*s*t
sage: f in S
True
sage: f = f.add_bigoh(4); f
s + 4*t + 3*s*t + O(s, t)^4
sage: g = 1 + s + t - s*t + S.O(5); g
1 + s + t - s*t + O(s, t)^5
sage: T = PowerSeriesRing(GF(3),5,'t'); T
Multivariate Power Series Ring in t0, t1, t2, t3, t4 over Finite
Field of size 3
sage: t = T.gens()
sage: w = t[0] - 2*t[1]*t[3] + 5*t[4]^3 - t[0]^3*t[2]^2; w
t0 + t1*t3 - t4^3 - t0^3*t2^2
sage: w = w.add_bigoh(5); w
t0 + t1*t3 - t4^3 + O(t0, t1, t2, t3, t4)^5
sage: w in T
True
sage: w = t[0] - 2*t[0]*t[2] + 5*t[4]^3 - t[0]^3*t[2]^2 + T.O(6)
sage: w
t0 + t0*t2 - t4^3 - t0^3*t2^2 + O(t0, t1, t2, t3, t4)^6
Get random elements::
sage: S.random_element(4) # random
-2*t + t^2 - 12*s^3 + O(s, t)^4
sage: T.random_element(10) # random
-t1^2*t3^2*t4^2 + t1^5*t3^3*t4 + O(t0, t1, t2, t3, t4)^10
Convert elements from polynomial rings::
sage: R = PolynomialRing(ZZ,5,T.variable_names())
sage: t = R.gens()
sage: r = -t[2]*t[3] + t[3]^2 + t[4]^2
sage: T(r)
-t2*t3 + t3^2 + t4^2
sage: r.parent()
Multivariate Polynomial Ring in t0, t1, t2, t3, t4 over Integer Ring
sage: r in T
True
"""
def __init__(self, parent, x=0, prec=infinity, is_gen=False, check=False):
"""
Input ``x`` can be an :class:`MPowerSeries`, or an element of
- the background univariate power series ring
- the foreground polynomial ring
- a ring that coerces to one of the above two
TESTS::
sage: S.<s,t> = PowerSeriesRing(ZZ)
sage: f = s + 4*t + 3*s*t
sage: f in S
True
sage: f = f.add_bigoh(4); f
s + 4*t + 3*s*t + O(s, t)^4
sage: g = 1 + s + t - s*t + S.O(5); g
1 + s + t - s*t + O(s, t)^5
sage: B.<s, t> = PowerSeriesRing(QQ)
sage: C.<z> = PowerSeriesRing(QQ)
sage: B(z)
Traceback (most recent call last):
...
TypeError: Cannot coerce input to polynomial ring.
sage: D.<s> = PowerSeriesRing(QQ)
sage: s.parent() is D
True
sage: B(s) in B
True
sage: d = D.random_element(20)
sage: b = B(d) # test coercion from univariate power series ring
sage: b in B
True
"""
PowerSeries.__init__(self, parent, prec, is_gen=is_gen)
self._PowerSeries__is_gen = is_gen
try:
prec = min(prec, x.prec()) # use precision of input, if defined
except AttributeError:
pass
# set the correct background value, depending on what type of input x is
try:
xparent = x.parent() # 'int' types have no parent
except AttributeError:
xparent = None
# test whether x coerces to background univariate
# power series ring of parent
from sage.rings.multi_power_series_ring import is_MPowerSeriesRing
if is_PowerSeriesRing(xparent) or is_MPowerSeriesRing(xparent):
# x is either a multivariate or univariate power series
#
# test whether x coerces directly to designated parent
if is_MPowerSeries(x):
try:
self._bg_value = parent._bg_ps_ring(x._bg_value)
except TypeError:
raise TypeError("Unable to coerce into background ring.")
# test whether x coerces to background univariate
# power series ring of parent
elif xparent == parent._bg_ps_ring():
self._bg_value = x
elif parent._bg_ps_ring().has_coerce_map_from(xparent):
# previous test may fail if precision or term orderings of
# base rings do not match
self._bg_value = parent._bg_ps_ring(x)
else:
# x is a univariate power series, but not from the
# background power series ring
#
# convert x to a polynomial and send to background
# ring of parent
x = x.polynomial()
self._bg_value = parent._send_to_bg(x).add_bigoh(prec)
# test whether x coerces to underlying polynomial ring of parent
elif is_PolynomialRing(xparent):
self._bg_value = parent._send_to_bg(x).add_bigoh(prec)
else:
try:
x = parent._poly_ring(x)
#self._value = x
self._bg_value = parent._send_to_bg(x).add_bigoh(prec)
except (TypeError, AttributeError):
raise TypeError("Input does not coerce to any of the "
"expected rings.")
self._go_to_fg = parent._send_to_fg
self._prec = self._bg_value.prec()
# self._parent is used a lot by the class PowerSeries
self._parent = self.parent()
def __reduce__(self):
"""
For pickling.
EXAMPLES::
sage: K.<s,t> = PowerSeriesRing(QQ)
sage: f = 1 + t - s + s*t - s*t^3 + K.O(12)
sage: loads(dumps(f)) == f
True
"""
return self.__class__, (self._parent,self._bg_value,self._prec)
def __call__(self, *x, **kwds):
"""
Evaluate ``self``.
EXAMPLES::
sage: R.<s,t> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in s, t over Integer Ring
sage: f = s^2 + s*t + s^3 + s^2*t + 3*s^4 + 3*s^3*t + R.O(5); f
s^2 + s*t + s^3 + s^2*t + 3*s^4 + 3*s^3*t + O(s, t)^5
sage: f(t,s)
s*t + t^2 + s*t^2 + t^3 + 3*s*t^3 + 3*t^4 + O(s, t)^5
sage: f(t,0)
t^2 + t^3 + 3*t^4 + O(s, t)^5
sage: f(t,2)
Traceback (most recent call last):
...
TypeError: Substitution defined only for elements of positive
valuation, unless self has infinite precision.
sage: f.truncate()(t,2)
2*t + 3*t^2 + 7*t^3 + 3*t^4
Checking that :trac:`15059` is fixed::
sage: M.<u,v> = PowerSeriesRing(GF(5))
sage: s = M.hom([u, u+v])
sage: s(M.one())
1
"""
if len(x) != self.parent().ngens():
raise ValueError("Number of arguments does not match number of variables in parent.")
sub_dict = {}
valn_list = []
for i in range(len(x)):
try:
xi = self.parent(x[i])
except (AttributeError, TypeError):
# Input does not coerce to parent ring of self
# attempt formal substitution
return self._subs_formal(*x,**kwds)
if xi.valuation() == 0 and self.prec() is not infinity:
raise TypeError("Substitution defined only for elements of positive valuation, unless self has infinite precision.")
elif xi.valuation() > 0:
sub_dict[self.parent()._poly_ring().gens()[i]] = xi.add_bigoh(xi.valuation()*self.prec())
valn_list.append(xi.valuation())
else:
sub_dict[self.parent()._poly_ring().gens()[i]] = xi
if self.prec() is infinity:
newprec = infinity
else:
newprec = self.prec()*min(valn_list)
return self.parent()(self._value().subs(sub_dict)).add_bigoh(newprec)
def _subs_formal(self, *x, **kwds):
"""
Substitution of inputs as variables of ``self``. This is formal
in the sense that the inputs do not need to be elements of
same multivariate power series ring as ``self``. They can be any
objects which support addition and multiplication with
each other and with the coefficients of ``self``. If ``self`` has
finite precision, the inputs must also support an ``add_bigoh``
method.
TESTS::
sage: B.<s, t> = PowerSeriesRing(QQ)
sage: C.<z> = PowerSeriesRing(QQ)
sage: s(z,z)
z
sage: f = -2/33*s*t^2 - 1/5*t^5 - s^5*t + s^2*t^4
sage: f(z,z) #indirect doctest
-2/33*z^3 - 1/5*z^5
sage: f(z,1) #indirect doctest
-1/5 - 2/33*z + z^2 - z^5
sage: RF = RealField(10)
sage: f(z,RF(1)) #indirect doctest
-0.20 - 0.061*z + 1.0*z^2 - 0.00*z^3 - 0.00*z^4 - 1.0*z^5
sage: m = matrix(QQ,[[1,0,1],[0,2,1],[-1,0,0]])
sage: m
[ 1 0 1]
[ 0 2 1]
[-1 0 0]
sage: f(m,m) #indirect doctest
[ 2/33 0 1/5]
[ 131/55 -1136/165 -24/11]
[ -1/5 0 -23/165]
sage: f(m,m) == -2/33*m^3 - 1/5*m^5 #indirect doctest
True
sage: f = f.add_bigoh(10)
sage: f(z,z)
-2/33*z^3 - 1/5*z^5 + O(z^10)
sage: f(m,m)
Traceback (most recent call last):
...
AttributeError: 'sage.matrix.matrix_rational_dense.Matrix_rational_dense' object has no attribute 'add_bigoh'
"""
from sage.misc.misc_c import prod
if len(x) == 1 and isinstance(x[0], (list, tuple)):
x = x[0]
n = self.parent().ngens()
if len(x) != n:
raise ValueError("Input must be of correct length.")
if n == 0:
return self
y = 0
for m, c in iteritems(self.dict()):
y += c*prod([x[i]**m[i] for i in range(n) if m[i] != 0])
if self.prec() == infinity:
return y
else:
return y.add_bigoh(self.prec())
def _value(self):
"""
Return the value of ``self`` in the foreground polynomial ring.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(GF(5)); R
Multivariate Power Series Ring in a, b, c over Finite Field of
size 5
sage: f = 1 + a + b - a*b + R.O(3); f
1 + a + b - a*b + O(a, b, c)^3
sage: f._value()
1 + a + b - a*b
sage: f._value().parent()
Multivariate Polynomial Ring in a, b, c over Finite Field of size 5
"""
return self._go_to_fg(self._bg_value)
def _repr_(self):
"""
Return string representation of ``self``.
EXAMPLES::
sage: B.<s,t,v> = PowerSeriesRing(QQ)
sage: e = 1 + s - s*t + t*v/2 - 2*s*t*v/8 + B.O(4)
sage: e._repr_()
'1 + s - s*t + 1/2*t*v - 1/4*s*t*v + O(s, t, v)^4'
"""
if self._prec == infinity:
return "%s" % self._value()
return "%(val)s + O(%(gens)s)^%(prec)s" \
%{'val':self._value(),
'gens':', '.join(str(g) for g in self.parent().gens()),
'prec':self._prec}
def _latex_(self):
"""
Return latex representation of this multivariate power series.
EXAMPLES::
sage: M = PowerSeriesRing(GF(5),3,'t'); M
Multivariate Power Series Ring in t0, t1, t2 over Finite Field of size 5
sage: t = M.gens()
sage: f = -t[0]^4*t[1]^3*t[2]^4 - 2*t[0]*t[1]^4*t[2]^7 \
+ 2*t[1]*t[2]^12 + 2*t[0]^7*t[1]^5*t[2]^2 + M.O(15)
sage: f
-t0^4*t1^3*t2^4 - 2*t0*t1^4*t2^7 + 2*t1*t2^12 + 2*t0^7*t1^5*t2^2
+ O(t0, t1, t2)^15
sage: f._latex_()
'-t_{0}^{4} t_{1}^{3} t_{2}^{4} + 3 t_{0} t_{1}^{4} t_{2}^{7} +
2 t_{1} t_{2}^{12} + 2 t_{0}^{7} t_{1}^{5} t_{2}^{2}
+ O(t_{0}, t_{1}, t_{2})^{15}'
TESTS:
Check that :trac:`25156` is fixed::
sage: R.<x1,y1> = PowerSeriesRing(QQ, ('x', 'y'))
sage: element = 1 + y1^10 + x1^5
sage: element._latex_()
'1 + x_{1}^{5} + y_{1}^{10}'
"""
if self._prec == infinity:
return "%s" % self._value()._latex_()
return "%(val)s + O(%(gens)s)^{%(prec)s}" \
%{'val':self._value()._latex_(),
'gens':', '.join(g._latex_() for g in self.parent().gens()),
'prec':self._prec}
def _im_gens_(self, codomain, im_gens):
"""
Returns the image of this series under the map that sends the
generators to ``im_gens``. This is used internally for computing
homomorphisms.
EXAMPLES::
sage: A.<a,b> = PowerSeriesRing(QQ)
sage: X.<x,y> = PowerSeriesRing(QQ)
sage: phi = Hom(A,X)([x,2*y])
sage: phi = Hom(A,X)([x,2*y]); phi
Ring morphism:
From: Multivariate Power Series Ring in a, b over Rational Field
To: Multivariate Power Series Ring in x, y over Rational Field
Defn: a |--> x
b |--> 2*y
sage: phi(a+b+3*a*b^2 + A.O(5)) # indirect doctest
x + 2*y + 12*x*y^2 + O(x, y)^5
"""
return codomain(self(*im_gens))
def __getitem__(self,n):
"""
Return summand of total degree ``n``.
TESTS::
sage: R.<a,b> = PowerSeriesRing(ZZ)
sage: f = 1 + a + b - a*b + R.O(4)
sage: f[0]
1
sage: f[2]
-a*b
sage: f[3]
0
sage: f[4]
Traceback (most recent call last):
...
IndexError: Cannot return terms of total degree greater than or
equal to precision of self.
"""
if n >= self.prec():
raise IndexError("Cannot return terms of total degree greater than or equal to precision of self.")
return self.parent(self._bg_value[n])
def __invert__(self):
"""
Return multiplicative inverse of this multivariate power series.
Currently implemented only if constant coefficient is a unit in the
base ring.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f = 1 + a + b - a*b - b*c - a*c + R.O(4)
sage: ~f
1 - a - b + a^2 + 3*a*b + a*c + b^2 + b*c - a^3 - 5*a^2*b
- 2*a^2*c - 5*a*b^2 - 4*a*b*c - b^3 - 2*b^2*c + O(a, b, c)^4
"""
if self.valuation() == 0:
return self.parent(~self._bg_value)
else:
raise NotImplementedError("Multiplicative inverse of multivariate power series currently implemented only if constant coefficient is a unit.")
## comparisons
def _richcmp_(self, other, op):
"""
Compare ``self`` to ``other``.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(GF(5)); R
Multivariate Power Series Ring in a, b, c over Finite Field of size 5
sage: f = a + b + c + a^2*c
sage: f == f^2
False
sage: f = f.truncate()
sage: f == f.O(4)
True
Ordering is determined by underlying polynomial ring::
sage: a > b
True
sage: a > a^2
True
sage: b > a^2
True
sage: (f^2).O(3)
a^2 + 2*a*b + 2*a*c + b^2 + 2*b*c + c^2 + O(a, b, c)^3
sage: f < f^2
False
sage: f > f^2
True
sage: f < 2*f
True
"""
return richcmp(self._bg_value, other._bg_value, op)
## arithmetic
def _add_(left, right):
"""
Add ``left`` to ``right``.
TESTS::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f0 = -a^3*b*c^2 + a^2*b^2*c^4 - 12*a^3*b^3*c^3 + R.O(10)
sage: f1 = -6*b*c^3 - 4*a^2*b*c^2 + a^6*b^2*c - 2*a^3*b^3*c^3 + R.O(10)
sage: g = f0 + f1; g #indirect doctest
-6*b*c^3 - 4*a^2*b*c^2 - a^3*b*c^2 + a^2*b^2*c^4 + a^6*b^2*c
- 14*a^3*b^3*c^3 + O(a, b, c)^10
sage: g in R
True
sage: g.polynomial() == f0.polynomial() + f1.polynomial()
True
"""
f = left._bg_value + right._bg_value
return MPowerSeries(left.parent(), f, prec=f.prec())
def _sub_(left, right):
"""
Subtract ``right`` from ``left``.
TESTS::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f0 = -a^3*b*c^2 + a^2*b^2*c^4 - 12*a^3*b^3*c^3 + R.O(10)
sage: f1 = -6*b*c^3 - 4*a^2*b*c^2 + a^6*b^2*c - 2*a^3*b^3*c^3 + R.O(10)
sage: g = f0 - f1; g #indirect doctest
6*b*c^3 + 4*a^2*b*c^2 - a^3*b*c^2 + a^2*b^2*c^4 - a^6*b^2*c
- 10*a^3*b^3*c^3 + O(a, b, c)^10
sage: g in R
True
sage: g.polynomial() == f0.polynomial() - f1.polynomial()
True
"""
f = left._bg_value - right._bg_value
return MPowerSeries(left.parent(), f, prec=f.prec())
def _mul_(left, right):
"""
Multiply ``left`` and ``right``.
TESTS::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f0 = -a^3*b*c^2 + a^2*b^2*c^4 - 12*a^3*b^3*c^3 + R.O(10)
sage: f1 = -6*b*c^3 - 4*a^2*b*c^2 + a^6*b^2*c - 2*a^3*b^3*c^3 + R.O(10)
sage: g = f0*f1; g #indirect doctest
6*a^3*b^2*c^5 + 4*a^5*b^2*c^4 - 6*a^2*b^3*c^7 - 4*a^4*b^3*c^6
+ 72*a^3*b^4*c^6 + O(a, b, c)^14
sage: g in R
True
The power series product and polynomial product agree up to
total degree < precision of `g`::
sage: diff = g.polynomial() - f0.polynomial() * f1.polynomial()
sage: all(S >= g.prec() for S in [sum(e) for e in diff.exponents()])
True
"""
f = left._bg_value * right._bg_value
return MPowerSeries(left.parent(), f, prec=f.prec())
def _lmul_(self, c):
"""
Multiply ``self`` with ``c`` on the left.
TESTS::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f = -a^3*b*c^2 + a^2*b^2*c^4 - 12*a^3*b^3*c^3 + R.O(10)
sage: g = 3*f; g #indirect doctest
-3*a^3*b*c^2 + 3*a^2*b^2*c^4 - 36*a^3*b^3*c^3 + O(a, b, c)^10
sage: g in R
True
sage: g.polynomial() == 3 * (f.polynomial())
True
sage: g = f*5; g #indirect doctest
-5*a^3*b*c^2 + 5*a^2*b^2*c^4 - 60*a^3*b^3*c^3 + O(a, b, c)^10
sage: g in R
True
sage: g.polynomial() == (f.polynomial()) * 5
True
"""
f = c * self._bg_value
return MPowerSeries(self.parent(), f, prec=f.prec())
def trailing_monomial(self):
"""
Return the trailing monomial of ``self``.
This is defined here as the lowest term of the underlying polynomial.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f = 1 + a + b - a*b + R.O(3)
sage: f.trailing_monomial()
1
sage: f = a^2*b^3*f; f
a^2*b^3 + a^3*b^3 + a^2*b^4 - a^3*b^4 + O(a, b, c)^8
sage: f.trailing_monomial()
a^2*b^3
TESTS::
sage: (f-f).trailing_monomial()
0
"""
return self.polynomial().lt()
def quo_rem(self, other, precision=None):
r"""
Return the pair of quotient and remainder for the increasing power
division of ``self`` by ``other``.
If `a` and `b` are two elements of a power series ring
`R[[x_1, x_2, \cdots, x_n]]` such that the trailing term of
`b` is invertible in `R`, then the pair of quotient and
remainder for the increasing power division of `a` by `b` is
the unique pair `(u, v) \in R[[x_1, x_2, \cdots, x_n]] \times
R[x_1, x_2, \cdots, x_n]` such that `a = bu + v` and such that
no monomial appearing in `v` divides the trailing monomial
(:meth:`trailing_monomial`) of `b`. Note that this depends on
the order of the variables.
This method returns both quotient and remainder as power series,
even though in mathematics, the remainder for the increasing
power division of two power series is a polynomial. This is
because Sage's power series come with a precision, and that
precision is not always sufficient to determine the remainder
completely. Disregarding this issue, the :meth:`polynomial`
method can be used to recast the remainder as an actual
polynomial.
INPUT:
- ``other`` -- an element of the same power series ring as
``self`` such that the trailing term of ``other`` is
invertible in ``self`` (this is automatically satisfied
if the base ring is a field, unless ``other`` is zero)
- ``precision`` -- (default: the default precision of the
parent of ``self``) nonnegative integer, determining the
precision to be cast on the resulting quotient and
remainder if both ``self`` and ``other`` have infinite
precision (ignored otherwise); note that the resulting
precision might be lower than this integer
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f = 1 + a + b - a*b + R.O(3)
sage: g = 1 + 2*a - 3*a*b + R.O(3)
sage: q, r = f.quo_rem(g); q, r
(1 - a + b + 2*a^2 + O(a, b, c)^3, 0 + O(a, b, c)^3)
sage: f == q*g+r
True
sage: q, r = (a*f).quo_rem(g); q, r
(a - a^2 + a*b + 2*a^3 + O(a, b, c)^4, 0 + O(a, b, c)^4)
sage: a*f == q*g+r
True
sage: q, r = (a*f).quo_rem(a*g); q, r
(1 - a + b + 2*a^2 + O(a, b, c)^3, 0 + O(a, b, c)^4)
sage: a*f == q*(a*g)+r
True
sage: q, r = (a*f).quo_rem(b*g); q, r
(a - 3*a^2 + O(a, b, c)^3, a + a^2 + O(a, b, c)^4)
sage: a*f == q*(b*g)+r
True
Trying to divide two polynomials, we run into the issue that
there is no natural setting for the precision of the quotient
and remainder (and if we wouldn't set a precision, the
algorithm would never terminate). Here, default precision
comes to our help::
sage: (1+a^3).quo_rem(a+a^2)
(a^2 - a^3 + a^4 - a^5 + a^6 - a^7 + a^8 - a^9 + a^10 + O(a, b, c)^11, 1 + O(a, b, c)^12)
sage: (1+a^3+a*b).quo_rem(b+c)
(a + O(a, b, c)^11, 1 - a*c + a^3 + O(a, b, c)^12)
sage: (1+a^3+a*b).quo_rem(b+c, precision=17)
(a + O(a, b, c)^16, 1 - a*c + a^3 + O(a, b, c)^17)
sage: (a^2+b^2+c^2).quo_rem(a+b+c)
(a - b - c + O(a, b, c)^11, 2*b^2 + 2*b*c + 2*c^2 + O(a, b, c)^12)
sage: (a^2+b^2+c^2).quo_rem(1/(1+a+b+c))
(a^2 + b^2 + c^2 + a^3 + a^2*b + a^2*c + a*b^2 + a*c^2 + b^3 + b^2*c + b*c^2 + c^3 + O(a, b, c)^14,
0)
sage: (a^2+b^2+c^2).quo_rem(a/(1+a+b+c))
(a + a^2 + a*b + a*c + O(a, b, c)^13, b^2 + c^2)
sage: (1+a+a^15).quo_rem(a^2)
(0 + O(a, b, c)^10, 1 + a + O(a, b, c)^12)
sage: (1+a+a^15).quo_rem(a^2, precision=15)
(0 + O(a, b, c)^13, 1 + a + O(a, b, c)^15)
sage: (1+a+a^15).quo_rem(a^2, precision=16)
(a^13 + O(a, b, c)^14, 1 + a + O(a, b, c)^16)
Illustrating the dependency on the ordering of variables::
sage: (1+a+b).quo_rem(b+c)
(1 + O(a, b, c)^11, 1 + a - c + O(a, b, c)^12)
sage: (1+b+c).quo_rem(c+a)
(0 + O(a, b, c)^11, 1 + b + c + O(a, b, c)^12)
sage: (1+c+a).quo_rem(a+b)
(1 + O(a, b, c)^11, 1 - b + c + O(a, b, c)^12)
TESTS::
sage: (f).quo_rem(R.zero())
Traceback (most recent call last):
...
ZeroDivisionError
sage: (f).quo_rem(R.zero().add_bigoh(2))
Traceback (most recent call last):
...
ZeroDivisionError
Coercion is applied on ``other``::
sage: (a+b).quo_rem(1)
(a + b + O(a, b, c)^12, 0 + O(a, b, c)^12)
sage: R.<a,b,c> = PowerSeriesRing(QQ)
sage: R(3).quo_rem(2)
(3/2 + O(a, b, c)^12, 0 + O(a, b, c)^12)
"""
parent = self.parent()
if other.parent() is not parent:
other = self.parent(other)
other_tt = other.trailing_monomial()
if not other_tt:
raise ZeroDivisionError()
self_prec = self.prec()
if self_prec == infinity and other.prec() == infinity:
if precision is None:
precision = parent.default_prec()
self = self.add_bigoh(precision)
self_prec = self.prec()
rem = parent.zero().add_bigoh(self_prec)
quo = parent.zero().add_bigoh(self_prec-other.valuation())
while self:
# Loop invariants:
# ``(the original value of self) - self == quo * other + rem``
# and
# ``(quo * other).prec() <= self.prec().
# (``other`` doesn't change throughout the loop.)
# The loop terminates because:
# (1) every step increases ``self_tt``;
# (2) either ``self`` has finite precision, or ``self`` is a
# polynomial and ``other`` has infinite precision (in
# which case either ``self`` will run out of nonzero
# coefficients after sufficiently many iterations of the
# if-case, or ``self``'s precision gets reduced to finite
# in one iteration of the else-case).
# These show that at the end we have
# ``(the original value of self) == quo * other + rem``
# up to the minimum of the precision of either side of this
# equality and the precision of self.
self_tt = self.trailing_monomial()
#assert self_tt
if not other_tt.divides(self_tt):
self -= self_tt
rem += self_tt
else:
d = self_tt//other_tt
self -= d * other
quo += d
quo = quo.add_bigoh(self.prec()-other_tt.degree())
return quo, rem
def _div_(self, denom_r):
r"""
Division in the ring of power series.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f = 1 + a + b - a*b + R.O(3)
sage: g = 1/f; g #indirect doctest
1 - a - b + a^2 + 3*a*b + b^2 + O(a, b, c)^3
sage: g in R
True
sage: g == ~f
True
When possible, division by non-units also works::
sage: a/(a*f)
1 - a - b + a^2 + 3*a*b + b^2 + O(a, b, c)^3
sage: a/(R.zero())
Traceback (most recent call last):
ZeroDivisionError
sage: (a*f)/f
a + O(a, b, c)^4
sage: f/(a*f)
Traceback (most recent call last):
...
ValueError: not divisible
An example where one loses precision::
sage: ((1+a)*f - f) / a*f
1 + 2*a + 2*b + O(a, b, c)^2
TESTS::
sage: ((a+b)*f) / f == (a+b)
True
sage: ((a+b)*f) / (a+b) == f
True
"""
if denom_r.is_unit(): # faster if denom_r is a unit
return self.parent(self._bg_value * ~denom_r._bg_value)
quo, rem = self.quo_rem(denom_r)
if rem:
raise ValueError("not divisible")
else:
return quo
# def _r_action_(self, c):
# # multivariate power series rings are assumed to be commutative
# return self._l_action_(c)
def _l_action_(self, c):
"""
Multivariate power series support multiplication by any ring for
which there is a supported action on the base ring.
EXAMPLES::
sage: R.<s,t> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in s, t over Integer Ring
sage: f = 1 + t + s + s*t + R.O(3)
sage: g = f._l_action_(1/2); g
1/2 + 1/2*s + 1/2*t + 1/2*s*t + O(s, t)^3
sage: g.parent()
Multivariate Power Series Ring in s, t over Rational Field
sage: g = (1/2)*f; g
1/2 + 1/2*s + 1/2*t + 1/2*s*t + O(s, t)^3
sage: g.parent()
Multivariate Power Series Ring in s, t over Rational Field
sage: K = NumberField(x-3,'a')
sage: g = K.random_element()*f
sage: g.parent()
Multivariate Power Series Ring in s, t over Number Field in a with defining polynomial x - 3
"""
try:
f = c * self._bg_value
if f.parent() == self.parent()._bg_ps_ring():
return MPowerSeries(self.parent(), f, prec=f.prec())
else:
from sage.rings.all import PowerSeriesRing
new_parent = PowerSeriesRing(f.base_ring().base_ring(), num_gens = f.base_ring().ngens(), names = f.base_ring().gens())
return MPowerSeries(new_parent, f, prec=f.prec())
except (TypeError, AttributeError):
raise TypeError("Action not defined.")
def __mod__(self, other):
"""
TESTS::
sage: R.<a,b,c> = PowerSeriesRing(ZZ)
sage: f = -a^3*b*c^2 + a^2*b^2*c^4 - 12*a^3*b^3*c^3 + R.O(10)
sage: g = f % 2; g
a^3*b*c^2 + a^2*b^2*c^4 + O(a, b, c)^10
sage: g in R
False
sage: g in R.base_extend(Zmod(2))
True
sage: g.polynomial() == f.polynomial() % 2
True
"""
if isinstance(other, integer_types + (Integer,)):
return self.change_ring(Zmod(other))
raise NotImplementedError("Mod on multivariate power series ring elements not defined except modulo an integer.")
def dict(self):
"""
Return underlying dictionary with keys the exponents and values the
coefficients of this power series.
EXAMPLES::
sage: M = PowerSeriesRing(QQ,4,'t',sparse=True); M
Sparse Multivariate Power Series Ring in t0, t1, t2, t3 over
Rational Field
sage: M.inject_variables()
Defining t0, t1, t2, t3
sage: m = 2/3*t0*t1^15*t3^48 - t0^15*t1^21*t2^28*t3^5
sage: m2 = 1/2*t0^12*t1^29*t2^46*t3^6 - 1/4*t0^39*t1^5*t2^23*t3^30 + M.O(100)
sage: s = m + m2
sage: s.dict()
{(1, 15, 0, 48): 2/3,
(12, 29, 46, 6): 1/2,
(15, 21, 28, 5): -1,
(39, 5, 23, 30): -1/4}
"""
out_dict = {}
for j in self._bg_value.coefficients():
out_dict.update(j.dict())
return out_dict
def polynomial(self):
"""
Return the underlying polynomial of ``self`` as an element of
the underlying multivariate polynomial ring (the "foreground
polynomial ring").
EXAMPLES::
sage: M = PowerSeriesRing(QQ,4,'t'); M
Multivariate Power Series Ring in t0, t1, t2, t3 over Rational
Field
sage: t = M.gens()
sage: f = 1/2*t[0]^3*t[1]^3*t[2]^2 + 2/3*t[0]*t[2]^6*t[3] \
- t[0]^3*t[1]^3*t[3]^3 - 1/4*t[0]*t[1]*t[2]^7 + M.O(10)
sage: f
1/2*t0^3*t1^3*t2^2 + 2/3*t0*t2^6*t3 - t0^3*t1^3*t3^3
- 1/4*t0*t1*t2^7 + O(t0, t1, t2, t3)^10
sage: f.polynomial()
1/2*t0^3*t1^3*t2^2 + 2/3*t0*t2^6*t3 - t0^3*t1^3*t3^3
- 1/4*t0*t1*t2^7
sage: f.polynomial().parent()
Multivariate Polynomial Ring in t0, t1, t2, t3 over Rational Field
Contrast with :meth:`truncate`::
sage: f.truncate()
1/2*t0^3*t1^3*t2^2 + 2/3*t0*t2^6*t3 - t0^3*t1^3*t3^3 - 1/4*t0*t1*t2^7
sage: f.truncate().parent()
Multivariate Power Series Ring in t0, t1, t2, t3 over Rational Field
"""
return self._value()
def variables(self):
"""
Return tuple of variables occurring in ``self``.
EXAMPLES::
sage: T = PowerSeriesRing(GF(3),5,'t'); T
Multivariate Power Series Ring in t0, t1, t2, t3, t4 over
Finite Field of size 3
sage: t = T.gens()
sage: w = t[0] - 2*t[0]*t[2] + 5*t[4]^3 - t[0]^3*t[2]^2 + T.O(6)
sage: w
t0 + t0*t2 - t4^3 - t0^3*t2^2 + O(t0, t1, t2, t3, t4)^6
sage: w.variables()
(t0, t2, t4)
"""
return tuple(self.parent(v) for v in self._value().variables())
def monomials(self):
"""
Return a list of monomials of ``self``.
These are the keys of the dict returned by :meth:`coefficients`.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in a, b, c over Integer Ring
sage: f = 1 + a + b - a*b - b*c - a*c + R.O(4)
sage: sorted(f.monomials())
[b*c, a*c, a*b, b, a, 1]
sage: f = 1 + 2*a + 7*b - 2*a*b - 4*b*c - 13*a*c + R.O(4)
sage: sorted(f.monomials())
[b*c, a*c, a*b, b, a, 1]
sage: f = R.zero()
sage: f.monomials()
[]
"""
return list(self.coefficients())
def coefficients(self):
"""
Return a dict of monomials and coefficients.
EXAMPLES::
sage: R.<s,t> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in s, t over Integer Ring
sage: f = 1 + t + s + s*t + R.O(3)
sage: f.coefficients()
{s*t: 1, t: 1, s: 1, 1: 1}
sage: (f^2).coefficients()
{t^2: 1, s*t: 4, s^2: 1, t: 2, s: 2, 1: 1}
sage: g = f^2 + f - 2; g
3*s + 3*t + s^2 + 5*s*t + t^2 + O(s, t)^3
sage: cd = g.coefficients()
sage: g2 = sum(k*v for (k,v) in cd.items()); g2
3*s + 3*t + s^2 + 5*s*t + t^2
sage: g2 == g.truncate()
True
"""
if self.is_sparse():
return self.dict()
tmp = {}
for j in self._bg_value.coefficients():
for m in j.monomials():
tmp[self.parent(m)]=j.monomial_coefficient(self.parent()._poly_ring(m))
return tmp
def constant_coefficient(self):
"""
Return constant coefficient of ``self``.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in a, b, c over Integer Ring
sage: f = 3 + a + b - a*b - b*c - a*c + R.O(4)
sage: f.constant_coefficient()
3
sage: f.constant_coefficient().parent()
Integer Ring
"""
return self.base_ring()(self._bg_value[0])
def exponents(self):
"""
Return a list of tuples which hold the exponents of each monomial
of ``self``.
EXAMPLES::
sage: H = QQ[['x,y']]
sage: (x,y) = H.gens()
sage: h = -y^2 - x*y^3 - 6/5*y^6 - x^7 + 2*x^5*y^2 + H.O(10)
sage: h
-y^2 - x*y^3 - 6/5*y^6 - x^7 + 2*x^5*y^2 + O(x, y)^10
sage: h.exponents()
[(0, 2), (1, 3), (0, 6), (7, 0), (5, 2)]
"""
exp_list = []
for m in self._bg_value.coefficients():
exp_list += m.exponents()
return exp_list
def V(self, n):
r"""
If
.. MATH::
f = \sum a_{m_0, \ldots, m_k} x_0^{m_0} \cdots x_k^{m_k},
then this function returns
.. MATH::
\sum a_{m_0, \ldots, m_k} x_0^{n m_0} \cdots x_k^{n m_k}.
The total-degree precision of the output is ``n`` times the precision
of ``self``.
EXAMPLES::
sage: H = QQ[['x,y,z']]
sage: (x,y,z) = H.gens()
sage: h = -x*y^4*z^7 - 1/4*y*z^12 + 1/2*x^7*y^5*z^2 \
+ 2/3*y^6*z^8 + H.O(15)
sage: h.V(3)
-x^3*y^12*z^21 - 1/4*y^3*z^36 + 1/2*x^21*y^15*z^6 + 2/3*y^18*z^24 + O(x, y, z)^45
"""
cd = self.coefficients()
Vs = sum(v * k**n for k, v in iteritems(cd))
return Vs.add_bigoh(self.prec()*n)
def prec(self):
"""
Return precision of ``self``.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in a, b, c over Integer Ring
sage: f = 3 + a + b - a*b - b*c - a*c + R.O(4)
sage: f.prec()
4
sage: f.truncate().prec()
+Infinity
"""
return self._prec
def add_bigoh(self, prec):
"""
Return a multivariate power series of precision ``prec``
obtained by truncating ``self`` at precision ``prec``.
This is the same as :meth:`O`.
EXAMPLES::
sage: B.<x,y> = PowerSeriesRing(QQ); B
Multivariate Power Series Ring in x, y over Rational Field
sage: r = 1 - x*y + x^2
sage: r.add_bigoh(4)
1 + x^2 - x*y + O(x, y)^4
sage: r.add_bigoh(2)
1 + O(x, y)^2
Note that this does not change ``self``::
sage: r
1 + x^2 - x*y
"""
return self.parent(self._bg_value.add_bigoh(prec))
def O(self, prec):
"""
Return a multivariate power series of precision ``prec``
obtained by truncating ``self`` at precision ``prec``.
This is the same as :meth:`add_bigoh`.
EXAMPLES::
sage: B.<x,y> = PowerSeriesRing(QQ); B
Multivariate Power Series Ring in x, y over Rational Field
sage: r = 1 - x*y + x^2
sage: r.O(4)
1 + x^2 - x*y + O(x, y)^4
sage: r.O(2)
1 + O(x, y)^2
Note that this does not change ``self``::
sage: r
1 + x^2 - x*y
"""
return self.parent(self._bg_value.O(prec))
def truncate(self, prec=infinity):
"""
Return infinite precision multivariate power series formed by
truncating ``self`` at precision ``prec``.
EXAMPLES::
sage: M = PowerSeriesRing(QQ,4,'t'); M
Multivariate Power Series Ring in t0, t1, t2, t3 over Rational Field
sage: t = M.gens()
sage: f = 1/2*t[0]^3*t[1]^3*t[2]^2 + 2/3*t[0]*t[2]^6*t[3] \
- t[0]^3*t[1]^3*t[3]^3 - 1/4*t[0]*t[1]*t[2]^7 + M.O(10)
sage: f
1/2*t0^3*t1^3*t2^2 + 2/3*t0*t2^6*t3 - t0^3*t1^3*t3^3
- 1/4*t0*t1*t2^7 + O(t0, t1, t2, t3)^10
sage: f.truncate()
1/2*t0^3*t1^3*t2^2 + 2/3*t0*t2^6*t3 - t0^3*t1^3*t3^3
- 1/4*t0*t1*t2^7
sage: f.truncate().parent()
Multivariate Power Series Ring in t0, t1, t2, t3 over Rational Field
Contrast with polynomial::
sage: f.polynomial()
1/2*t0^3*t1^3*t2^2 + 2/3*t0*t2^6*t3 - t0^3*t1^3*t3^3 - 1/4*t0*t1*t2^7
sage: f.polynomial().parent()
Multivariate Polynomial Ring in t0, t1, t2, t3 over Rational Field
"""
return self.parent((self.O(prec))._value())
def valuation(self):
r"""
Return the valuation of ``self``.
The valuation of a power series `f` is the highest nonnegative
integer `k` less or equal to the precision of `f` and such
that the coefficient of `f` before each term of degree `< k` is
zero. (If such an integer does not exist, then the valuation is
the precision of `f` itself.)
EXAMPLES::
sage: R.<a,b> = PowerSeriesRing(GF(4949717)); R
Multivariate Power Series Ring in a, b over Finite Field of
size 4949717
sage: f = a^2 + a*b + a^3 + R.O(9)
sage: f.valuation()
2
sage: g = 1 + a + a^3
sage: g.valuation()
0
sage: R.zero().valuation()
+Infinity
"""
try:
return self._bg_value.valuation()
except (TypeError, AttributeError):
if self._bg_value == 0:
return infinity
# at this stage, self is probably a non-zero
# element of the base ring
for a in range(len(self._bg_value.list())):
if self._bg_value.list()[a] is not 0:
return a
def is_nilpotent(self):
"""
Return ``True`` if ``self`` is nilpotent. This occurs if
- ``self`` has finite precision and positive valuation, or
- ``self`` is constant and nilpotent in base ring.
Otherwise, return ``False``.
.. WARNING::
This is so far just a sufficient condition, so don't trust
a ``False`` output to be legit!
.. TODO::
What should we do about this method? Is nilpotency of a
power series even decidable (assuming a nilpotency oracle
in the base ring)? And I am not sure that returning
``True`` just because the series has finite precision and
zero constant term is a good idea.
EXAMPLES::
sage: R.<a,b,c> = PowerSeriesRing(Zmod(8)); R
Multivariate Power Series Ring in a, b, c over Ring of integers
modulo 8
sage: f = a + b + c + a^2*c
sage: f.is_nilpotent()
False
sage: f = f.O(4); f
a + b + c + a^2*c + O(a, b, c)^4
sage: f.is_nilpotent()
True
sage: g = R(2)
sage: g.is_nilpotent()
True
sage: (g.O(4)).is_nilpotent()
True
sage: S = R.change_ring(QQ)
sage: S(g).is_nilpotent()
False
sage: S(g.O(4)).is_nilpotent()
False
"""
if self.prec() < infinity and self.valuation() > 0:
return True
elif self == self.constant_coefficient() and \
self.base_ring()(self.constant_coefficient()).is_nilpotent():
return True
else:
return False
def degree(self):
"""
Return degree of underlying polynomial of ``self``.
EXAMPLES::
sage: B.<x,y> = PowerSeriesRing(QQ)
sage: B
Multivariate Power Series Ring in x, y over Rational Field
sage: r = 1 - x*y + x^2
sage: r = r.add_bigoh(4); r
1 + x^2 - x*y + O(x, y)^4
sage: r.degree()
2
"""
return self._value().degree()
def is_unit(self):
"""
A multivariate power series is a unit if and only if its constant
coefficient is a unit.
EXAMPLES::
sage: R.<a,b> = PowerSeriesRing(ZZ); R
Multivariate Power Series Ring in a, b over Integer Ring
sage: f = 2 + a^2 + a*b + a^3 + R.O(9)
sage: f.is_unit()
False
sage: f.base_extend(QQ).is_unit()
True
"""
return self._bg_value[0].is_unit()
###
### the following could be implemented, but aren't
###
def padded_list(self):
"""
Method from univariate power series not yet implemented.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.padded_list()
Traceback (most recent call last):
...
NotImplementedError: padded_list
"""
raise NotImplementedError("padded_list")
def is_square(self):
"""
Method from univariate power series not yet implemented.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.is_square()
Traceback (most recent call last):
...
NotImplementedError: is_square
"""
raise NotImplementedError("is_square")
def square_root(self):
"""
Method from univariate power series not yet implemented.
Depends on square root method for multivariate polynomials.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.square_root()
Traceback (most recent call last):
...
NotImplementedError: square_root
"""
raise NotImplementedError("square_root")
sqrt = square_root
def derivative(self, *args):
"""
The formal derivative of this power series, with respect to
variables supplied in ``args``.
EXAMPLES::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a^2*b + T.O(5)
sage: f.derivative(a)
1 + 2*a*b + O(a, b)^4
sage: f.derivative(a,2)
2*b + O(a, b)^3
sage: f.derivative(a,a)
2*b + O(a, b)^3
sage: f.derivative([a,a])
2*b + O(a, b)^3
sage: f.derivative(a,5)
0 + O(a, b)^0
sage: f.derivative(a,6)
0 + O(a, b)^0
"""
from sage.misc.derivative import derivative_parse
R = self.parent()
variables = [ x.polynomial() for x in derivative_parse(args) ]
deriv = self.polynomial().derivative(variables)
new_prec = max(self.prec()-len(variables), 0)
return R(deriv) + R.O(new_prec)
def integral(self, *args):
"""
The formal integral of this multivariate power series, with respect to
variables supplied in ``args``.
The variable sequence ``args`` can contain both variables and
counts; for the syntax, see
:meth:`~sage.misc.derivative.derivative_parse`.
EXAMPLES::
sage: T.<a,b> = PowerSeriesRing(QQ,2)
sage: f = a + b + a^2*b + T.O(5)
sage: f.integral(a, 2)
1/6*a^3 + 1/2*a^2*b + 1/12*a^4*b + O(a, b)^7
sage: f.integral(a, b)
1/2*a^2*b + 1/2*a*b^2 + 1/6*a^3*b^2 + O(a, b)^7
sage: f.integral(a, 5)
1/720*a^6 + 1/120*a^5*b + 1/2520*a^7*b + O(a, b)^10
Only integration with respect to variables works::
sage: f.integral(a+b)
Traceback (most recent call last):
...
ValueError: a + b is not a variable
.. warning:: Coefficient division.
If the base ring is not a field (e.g. `ZZ`), or if it has a
non-zero characteristic, (e.g. `ZZ/3ZZ`), integration is not
always possible while staying with the same base ring. In the
first case, Sage will report that it has not been able to
coerce some coefficient to the base ring::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + T.O(5)
sage: f.integral(a)
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
One can get the correct result by changing the base ring first::
sage: f.change_ring(QQ).integral(a)
1/2*a^2 + O(a, b)^6
However, a correct result is returned even without base change
if the denominator cancels::
sage: f = 2*b + T.O(5)
sage: f.integral(b)
b^2 + O(a, b)^6
In non-zero characteristic, Sage will report that a zero division
occurred ::
sage: T.<a,b> = PowerSeriesRing(Zmod(3),2)
sage: (a^3).integral(a)
a^4
sage: (a^2).integral(a)
Traceback (most recent call last):
...
ZeroDivisionError: inverse of Mod(0, 3) does not exist
"""
from sage.misc.derivative import derivative_parse
res = self
for v in derivative_parse(args):
res = res._integral(v)
return res
def _integral(self, xx):
"""
Formal integral for multivariate power series.
INPUT: ``xx`` - a generator of the power series ring (the
one with respect to which to integrate)
EXAMPLES::
sage: T.<a,b> = PowerSeriesRing(QQ,2)
sage: f = a + b + a^2*b + T.O(5)
sage: f._integral(a)
1/2*a^2 + a*b + 1/3*a^3*b + O(a, b)^6
sage: f._integral(b)
a*b + 1/2*b^2 + 1/2*a^2*b^2 + O(a, b)^6
TESTS:
We try to recognize variables even if they are not recognized as
generators of the rings::
sage: T.<a,b> = PowerSeriesRing(QQ,2)
sage: a.is_gen()
True
sage: (a+0).is_gen()
False
sage: (a+b).integral(a+0)
1/2*a^2 + a*b
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: aa = a.change_ring(Zmod(5))
sage: aa.is_gen()
False
sage: aa.integral(aa)
3*a^2
sage: aa.integral(a)
3*a^2
"""
P = self.parent()
R = P.base_ring()
xx = P(xx)
if not xx.is_gen():
for g in P.gens(): # try to find a generator equal to xx
if g == xx:
xx = g
break
else:
raise ValueError("%s is not a variable" % xx)
xxe = xx.exponents()[0]
pos = [i for i, c in enumerate(xxe) if c != 0][0] # get the position of the variable
res = {mon.eadd(xxe): R(co / (mon[pos]+1))
for mon, co in iteritems(self.dict())}
return P( res ).add_bigoh(self.prec()+1)
def ogf(self):
"""
Method from univariate power series not yet implemented
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.ogf()
Traceback (most recent call last):
...
NotImplementedError: ogf
"""
raise NotImplementedError("ogf")
def egf(self):
"""
Method from univariate power series not yet implemented
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.egf()
Traceback (most recent call last):
...
NotImplementedError: egf
"""
raise NotImplementedError("egf")
def __pari__(self):
"""
Method from univariate power series not yet implemented
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.__pari__()
Traceback (most recent call last):
...
NotImplementedError: __pari__
"""
raise NotImplementedError("__pari__")
###
### the following don't make sense for multivariable power series
###
def list(self):
"""
Doesn't make sense for multivariate power series.
Multivariate polynomials don't have list of coefficients either.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.list()
Traceback (most recent call last):
...
NotImplementedError: Multivariate power series do not have list
of coefficients; use 'coefficients' to get a dict of coefficients.
"""
#return [self.parent(c) for c in self._bg_value.list()]
raise NotImplementedError("Multivariate power series do not have list of coefficients; use 'coefficients' to get a dict of coefficients.")
def variable(self):
"""
Doesn't make sense for multivariate power series.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.variable()
Traceback (most recent call last):
...
NotImplementedError: variable not defined for multivariate power
series; use 'variables' instead.
"""
raise NotImplementedError("variable not defined for multivariate power series; use 'variables' instead.")
def shift(self, n):
"""
Doesn't make sense for multivariate power series.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.shift(3)
Traceback (most recent call last):
...
NotImplementedError: shift not defined for multivariate power series.
"""
raise NotImplementedError("shift not defined for multivariate power series.")
def __lshift__(self, n):
"""
Doesn't make sense for multivariate power series.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.__lshift__(3)
Traceback (most recent call last):
...
NotImplementedError: __lshift__ not defined for multivariate power series.
"""
raise NotImplementedError("__lshift__ not defined for multivariate power series.")
def __rshift__(self, n):
"""
Doesn't make sense for multivariate power series.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.__rshift__(3)
Traceback (most recent call last):
...
NotImplementedError: __rshift__ not defined for multivariate power series.
"""
raise NotImplementedError("__rshift__ not defined for multivariate power series.")
def valuation_zero_part(self):
"""
Doesn't make sense for multivariate power series;
valuation zero with respect to which variable?
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.valuation_zero_part()
Traceback (most recent call last):
...
NotImplementedError: valuation_zero_part not defined for multivariate
power series; perhaps 'constant_coefficient' is what you want.
"""
raise NotImplementedError("valuation_zero_part not defined for multivariate power series; perhaps 'constant_coefficient' is what you want.")
def solve_linear_de(self, prec=infinity, b=None, f0=None):
"""
Not implemented for multivariate power series.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.solve_linear_de()
Traceback (most recent call last):
...
NotImplementedError: solve_linear_de not defined for multivariate power series.
"""
raise NotImplementedError("solve_linear_de not defined for multivariate power series.")
def exp(self, prec=infinity):
r"""
Exponentiate the formal power series.
INPUT:
- ``prec`` -- Integer or ``infinity``. The degree to truncate
the result to.
OUTPUT:
The exponentiated multivariate power series as a new
multivariate power series.
EXAMPLES::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(3)
sage: exp(f)
1 + a + b + 1/2*a^2 + 2*a*b + 1/2*b^2 + O(a, b)^3
sage: f.exp()
1 + a + b + 1/2*a^2 + 2*a*b + 1/2*b^2 + O(a, b)^3
sage: f.exp(prec=2)
1 + a + b + O(a, b)^2
sage: log(exp(f)) - f
0 + O(a, b)^3
If the power series has a constant coefficient `c` and
`\exp(c)` is transcendental, then `\exp(f)` would have to be a
power series over the :class:`~sage.symbolic.ring.SymbolicRing`. These
are not yet implemented and therefore such cases raise an error::
sage: g = 2+f
sage: exp(g)
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for *: 'Symbolic Ring' and
'Power Series Ring in Tbg over Multivariate Polynomial Ring in a, b
over Rational Field'
Another workaround for this limitation is to change base ring
to one which is closed under exponentiation, such as `\RR` or `\CC`::
sage: exp(g.change_ring(RDF))
7.38905609... + 7.38905609...*a + 7.38905609...*b + 3.69452804...*a^2 +
14.7781121...*a*b + 3.69452804...*b^2 + O(a, b)^3
If no precision is specified, the default precision is used::
sage: T.default_prec()
12
sage: exp(a)
1 + a + 1/2*a^2 + 1/6*a^3 + 1/24*a^4 + 1/120*a^5 + 1/720*a^6 + 1/5040*a^7 +
1/40320*a^8 + 1/362880*a^9 + 1/3628800*a^10 + 1/39916800*a^11 + O(a, b)^12
sage: a.exp(prec=5)
1 + a + 1/2*a^2 + 1/6*a^3 + 1/24*a^4 + O(a, b)^5
sage: exp(a + T.O(5))
1 + a + 1/2*a^2 + 1/6*a^3 + 1/24*a^4 + O(a, b)^5
TESTS::
sage: exp(a^2 + T.O(5))
1 + a^2 + 1/2*a^4 + O(a, b)^5
"""
R = self.parent()
Rbg = R._bg_power_series_ring
from sage.functions.log import exp
c = self.constant_coefficient()
exp_c = exp(c)
x = self._bg_value - c
if x.is_zero(): return exp_c
val = x.valuation()
assert(val >= 1)
prec = min(prec, self.prec())
if is_Infinite(prec):
prec = R.default_prec()
n_inv_factorial = R.base_ring().one()
x_pow_n = Rbg.one()
exp_x = Rbg.one().add_bigoh(prec)
for n in range(1,prec//val+1):
x_pow_n = (x_pow_n * x).add_bigoh(prec)
n_inv_factorial /= n
exp_x += x_pow_n * n_inv_factorial
result_bg = exp_c*exp_x
if result_bg.base_ring() is not self.base_ring():
R = R.change_ring(self.base_ring().fraction_field())
return R(result_bg, prec=prec)
def log(self, prec=infinity):
r"""
Return the logarithm of the formal power series.
INPUT:
- ``prec`` -- Integer or ``infinity``. The degree to truncate
the result to.
OUTPUT:
The logarithm of the multivariate power series as a new
multivariate power series.
EXAMPLES::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = 1 + a + b + a*b + T.O(5)
sage: f.log()
a + b - 1/2*a^2 - 1/2*b^2 + 1/3*a^3 + 1/3*b^3 - 1/4*a^4 - 1/4*b^4 + O(a, b)^5
sage: log(f)
a + b - 1/2*a^2 - 1/2*b^2 + 1/3*a^3 + 1/3*b^3 - 1/4*a^4 - 1/4*b^4 + O(a, b)^5
sage: exp(log(f)) - f
0 + O(a, b)^5
If the power series has a constant coefficient `c` and
`\exp(c)` is transcendental, then `\exp(f)` would have to be a
power series over the :class:`~sage.symbolic.ring.SymbolicRing`. These
are not yet implemented and therefore such cases raise an error::
sage: g = 2+f
sage: log(g)
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for -: 'Symbolic Ring' and 'Power
Series Ring in Tbg over Multivariate Polynomial Ring in a, b over Rational Field'
Another workaround for this limitation is to change base ring
to one which is closed under exponentiation, such as `\RR` or `\CC`::
sage: log(g.change_ring(RDF))
1.09861228... + 0.333333333...*a + 0.333333333...*b - 0.0555555555...*a^2
+ 0.222222222...*a*b - 0.0555555555...*b^2 + 0.0123456790...*a^3
- 0.0740740740...*a^2*b - 0.0740740740...*a*b^2 + 0.0123456790...*b^3
- 0.00308641975...*a^4 + 0.0246913580...*a^3*b + 0.0246913580...*a*b^3
- 0.00308641975...*b^4 + O(a, b)^5
TESTS::
sage: (1+a).log(prec=10).exp()
1 + a + O(a, b)^10
sage: a.exp(prec=10).log()
a + O(a, b)^10
sage: log(1+a)
a - 1/2*a^2 + 1/3*a^3 - 1/4*a^4 + 1/5*a^5 - 1/6*a^6 + 1/7*a^7
- 1/8*a^8 + 1/9*a^9 - 1/10*a^10 + 1/11*a^11 + O(a, b)^12
sage: -log(1-a+T.O(5))
a + 1/2*a^2 + 1/3*a^3 + 1/4*a^4 + O(a, b)^5
sage: a.log(prec=10)
Traceback (most recent call last):
...
ValueError: Can only take formal power series for non-zero constant term.
"""
R = self.parent()
Rbg = R._bg_power_series_ring
from sage.functions.log import log
c = self.constant_coefficient()
if c.is_zero():
raise ValueError('Can only take formal power series for non-zero constant term.')
log_c = log(c)
x = 1 - self._bg_value/c
if x.is_zero(): return log_c
val = x.valuation()
assert(val >= 1)
prec = min(prec, self.prec())
if is_Infinite(prec):
prec = R.default_prec()
x_pow_n = Rbg.one()
log_x = Rbg.zero().add_bigoh(prec)
for n in range(1,prec//val+1):
x_pow_n = (x_pow_n * x).add_bigoh(prec)
log_x += x_pow_n / n
result_bg = log_c - log_x
if result_bg.base_ring() is not self.base_ring():
R = R.change_ring(self.base_ring().fraction_field())
return R(result_bg, prec=prec)
def laurent_series(self):
"""
Not implemented for multivariate power series.
TESTS::
sage: T.<a,b> = PowerSeriesRing(ZZ,2)
sage: f = a + b + a*b + T.O(5)
sage: f.laurent_series()
Traceback (most recent call last):
...
NotImplementedError: laurent_series not defined for multivariate power series.
"""
raise NotImplementedError("laurent_series not defined for multivariate power series.")
class MO(object):
"""
Object representing a zero element with given precision.
EXAMPLES::
sage: R.<u,v> = QQ[[]]
sage: m = O(u, v)
sage: m^4
0 + O(u, v)^4
sage: m^1
0 + O(u, v)^1
sage: T.<a,b,c> = PowerSeriesRing(ZZ,3)
sage: z = O(a, b, c)
sage: z^1
0 + O(a, b, c)^1
sage: 1 + a + z^1
1 + O(a, b, c)^1
sage: w = 1 + a + O(a, b, c)^2; w
1 + a + O(a, b, c)^2
sage: w^2
1 + 2*a + O(a, b, c)^2
"""
def __init__(self,x):
"""
Initialize ``self``.
EXAMPLES::
sage: R.<u,v> = QQ[[]]
sage: m = O(u, v)
"""
self._vars = x
def __pow__(self, prec):
"""
Raise ``self`` to the given precision ``prec``.
EXAMPLES::
sage: R.<u,v> = QQ[[]]
sage: m = O(u, v)
sage: m^4
0 + O(u, v)^4
"""
parent = self._vars[0].parent()
if self._vars != parent.gens():
raise NotImplementedError
return self._vars[0].parent()(0,prec)
| 33.495068 | 154 | 0.497118 |
569e62e7aa67f7c8824d71dd723072f97be22237 | 10,247 | py | Python | Hw2TSP.py | Halil-ibrahim-GUNBULAK/GenericAlgortihm-TSP_solving | 89c97916d1c49aa1c8ecc17840f4035224122c66 | [
"Apache-2.0"
] | 1 | 2022-02-15T13:36:17.000Z | 2022-02-15T13:36:17.000Z | Hw2TSP.py | Halil-ibrahim-GUNBULAK/GenericAlgortihm-TSP_solving | 89c97916d1c49aa1c8ecc17840f4035224122c66 | [
"Apache-2.0"
] | null | null | null | Hw2TSP.py | Halil-ibrahim-GUNBULAK/GenericAlgortihm-TSP_solving | 89c97916d1c49aa1c8ecc17840f4035224122c66 | [
"Apache-2.0"
] | null | null | null | import random
import copy
import os
import time
import math
import matplotlib.pyplot as plt
list_of_cities = []
# inside the city class keeps the x y coordinates and name of the city also the distance between two cities is calculated here again
class City(object):
def __init__(self, num, x, y, distance=None):
self.x = x
self.y = y
self.num = num
list_of_cities.append(self) #list of cities appen global list then we use that
self.distance = {self.num: 0.0}# Creates a dictionary of the distances to all the other cities initial always 0
if distance:
self.distance = distance
def point_dist(self, x1, y1, x2, y2):# to coordinaats between calculate
return math.sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2))
def calculate_distances(self):
for city in list_of_cities:
tmp_dist = self.point_dist(self.x, self.y, city.x, city.y)
self.distance[city.num] = tmp_dist
RouteListX = []
RouteListY = []
# Route Class Created to keep route information and support graphic creation
class Route(object):
def __init__(self):
self.route = sorted(list_of_cities, key=lambda *args: random.random())
### Calculates its length route lentg hesaplanıyor
self.calc_rt()
def calc_rt(self): # calculate root lentg
self.length = 0.0 # rota özelliğindeki her şehir için:
for city in self.route:
# listedeki bir sonraki şehri işaret eden bir sonraki şehir değişkeni ayarlayın ve sonunda ata:
next_city = self.route[self.route.index(city) - len(self.route) + 1]#Bir sonraki şehre olan mesafeyi bulmak için ilk şehrin Distance_to özelliğini kullanır:
dist_to_next = city.distance[next_city.num]
# bu uzunluğu uzunluk özelliğine ekler.
self.length += dist_to_next
def printcityName_and_takeValueGraph(self, print_route=False):#Rotadaki şehirleri yazdırma ve en iyi halinde grafiği çizdirmek için kullanılır
RouteListX.clear()
RouteListY.clear()
cities_str = ''
for city in self.route:
cities_str += city.num + ','
RouteListX.append(city.x)
RouteListY.append(city.y)
cities_str = cities_str[:-1] # chops off last comma
if print_route:
print(' ' + cities_str)
# Route() nesnelerinin bir popülasyonunu içerir çıkartma Rota nesnelerinin bir listesini içerir ve bunlar hakkında bilgi sağlar.
class RoutePop(object):
def __init__(self, size, initialise): # Bir popülasyon başlatmak istiyorsak: rt_pop kullanırız
self.rt_pop = []
self.size = size
if initialise:
for x in range(0, size):
new_rt = Route()
self.rt_pop.append(new_rt)#Route nesnelerini listeye ekliyoruz
self.get_fittest()# en uygun rota, kendi kendine en uygun rotayı ona ayarlar ve Rotayı döndürür
def get_fittest(self):#Calcualtes fittest route, sets self.fittest to it, and returns the Route.
# listeyi rotaların uzunluklarına göre sıralar
sorted_list = sorted(self.rt_pop, key=lambda x: x.length, reverse=False)
self.fittest = sorted_list[0]
return self.fittest
# Class for bringing together all of the methods to do with the Genetic Algorithm
class GA(object):
def crossover(self, parent1, parent2):
'''
basit anlamda bir bölgesinden kesilen arrayı 12345678 ile 1234.... X ....5678 gibi karşılıklı yer değiştirir
'''
child_rt = Route() #child root created
for x in range(0, len(child_rt.route)):
child_rt.route[x] = None
# Two random integer indices of the parent1:
start_position = random.randint(1, len(parent1.route)) #every time start pos< end pos
end_position = random.randint(start_position-1, len(parent1.route))
for i in range(end_position, start_position):
child_rt.route[i] = parent1.route[i] #degerleri birbiri arasında yer değiştiriyoruz
# Cycles through the parent2. And fills in the child_rt
# cycles through length of parent2:
for i in range(len(parent2.route)):
if not parent2.route[i] in child_rt.route:
for x in range(len(child_rt.route)): #henüz çocuğu olmayan bir düğüme sahipse yok diyio çıkar
if child_rt.route[x] == None:
child_rt.route[x] = parent2.route[i]
break
# tüm şehirler alt rotada olana kadar tekrarlanır alt rotayı döndürür (Route() türünden)
child_rt.calc_rt()
return child_rt
def mutation(self, route_to_mut): # mutasyon
'''
Route() --> Route()
Swaps two random indexes in route_to_mut.route. Runs k_mut_prob*100 % of the time
'''
if random.random() < 0.39: # her seferinde mutasyon yapılmaması için şart koyuldu böylece %60 mutasyon yapılıyor
#bunun yararı rasgelelik olayının artması
#Choosing Mutation and Crossover Ratios for Genetic Algorithms—A Review with a NewDynamic Approach Ahmad Hassanat Esra’a Alkafaween 2, 2.1 GA PARAMETERS
# two random indices:
mut_pos1 = random.randint(0, len(route_to_mut.route) - 1)
mut_pos2=mut_pos1
while(mut_pos2==mut_pos1): #chosed different mut
mut_pos2 = random.randint(0, len(route_to_mut.route) - 1)
# swap 2 position
city1 = route_to_mut.route[mut_pos1]
city2 = route_to_mut.route[mut_pos2]
route_to_mut.route[mut_pos2] = city1
route_to_mut.route[mut_pos1] = city2
# Rotanın uzunluğunu yeniden hesaplayın (.length'i günceller) ve en iyi olan dönmüş olur
route_to_mut.calc_rt()
return route_to_mut
def tournament_select(self, population):#rastgele bir yol bulma ihtimali artar böylece
# New smaller population#size= tournament size
tournament_pop = RoutePop(size=7, initialise=False)
for i in range(6):#size= tournament size -1 # rastgele bireylerle doldurur (aynısını iki kez seçebilir)
tournament_pop.rt_pop.append(random.choice(population.rt_pop))
# returns the fittest:
return tournament_pop.get_fittest()
def evolve_population(self, init_pop):
# makes a new population:
descendant_pop = RoutePop(size=init_pop.size, initialise=True)
# Elitizm ofseti (yeni popülasyona taşınan Routes() miktarı)
elitismOffset = 0
# elitizmimiz varsa, yeni popülasyonun ilkini eskinin en uygununa ayarlayın
if elitism:
descendant_pop.rt_pop[0] = init_pop.fittest
elitismOffset = 1
# Yeni popülasyondan geçer ve önceki popülasyondan iki turnuva kazananın çocuğuyla doldurur
for x in range(elitismOffset, descendant_pop.size):
# two parents:
tournament_parent1 = self.tournament_select(init_pop)
tournament_parent2 = self.tournament_select(init_pop)
# A child:
tournament_child = self.crossover(tournament_parent1, tournament_parent2)
# Fill the population up with children
descendant_pop.rt_pop[x] = tournament_child
# Tüm yolları mutasyona uğratır (bir prob ile gerçekleşen mutasyon p = 0.4)
for route in descendant_pop.rt_pop:
if random.random() < 0.39:
self.mutation(route)
# En uygun rotayı güncelleyin:
descendant_pop.get_fittest()
return descendant_pop
counterList = []
FittestList = []
def GA_loop(n_generations, pop_size):
counter = 0
# Popülasyonu oluşturur:
print("Creates the population:")
the_population = RoutePop(pop_size, True)
best_route = Route()#initial look like counter=0
for x in range(1, n_generations):# Kaç kez genere edileceği
# Mevcut tuvali her n nesilde bir günceller (gecikmesini önlemek için n'yi artırın)
#popolasyonu geliştirir: Yeni popülasyondan geçer ve önceki popülasyondan iki turnuva kazananın çocuğuyla doldurur
the_population = GA().evolve_population(the_population)
# If we have found a new shorter route, save it to best_route
if the_population.fittest.length < best_route.length:
#bir kopyasını kullanıyoruz çünkü pointerda hatalara neden oluyor.
best_route = copy.deepcopy(the_population.fittest)
# for fitness graph
counterList.append(counter)
FittestList.append(the_population.fittest.length)
counter += 1
# Prints final output to terminal:
print("best way founded")
print('Final best distance: {0:.2f}'.format(best_route.length))
print(the_population.fittest.printcityName_and_takeValueGraph())
best_route.printcityName_and_takeValueGraph(print_route=True)
# Elitizm Doğruysa, bir nesilden en iyisi diğerine aktarılacaktır.
elitism = True
cities = []
cityForGraphX = []
cityForGraphY = []
def CityAnd_GAStarter():
c = open("City_cord.txt", 'r')
text = c.read()
text = text.split("\n")
for i in text:
cities.append(i.split(" "))
print(cities)
for i in range(101):
tmp2 = City(str(cities[i][0]), int(cities[i][1]), int(cities[i][2]))
cityForGraphX.append(cities[i][1])
cityForGraphY.append(cities[i][2])
for city in list_of_cities:
city.calculate_distances()
######## create and run an application instance:
GA_loop(n_generations=10, pop_size=100)
def geneticAlgorithmPlot(zaman, graph):
plt.plot(zaman, graph, 'r')
plt.axis([0, max(zaman), min(graph), max(graph)])
plt.ylabel('Current Fittest')
plt.xlabel('Time')
plt.show()
def plotCities():
cityForGraphX.append(cityForGraphX[0])
cityForGraphY.append(cityForGraphY[0])
plt.plot(cityForGraphX, cityForGraphY, 'r')
plt.show()
plt.ylabel('City Location')
plt.xlabel('Time')
def draw_Best():
plt.plot(RouteListX, RouteListY, 'y')
plt.show()
CityAnd_GAStarter()
geneticAlgorithmPlot(counterList,FittestList)
plotCities()
print("deger=", RouteListY, RouteListX)
print()
draw_Best()
| 33.596721 | 168 | 0.662145 |
f9f1f18216ba989e964815e13cef608bba4528f2 | 18,350 | py | Python | graphTranspose-dsf-prim-bellmanFord-dijkstra.py | luckydoglou/advanced-algorithms | 0e4b5b9c550b61ac795bee0cc49a564438b0ea44 | [
"MIT"
] | 1 | 2020-07-31T02:00:09.000Z | 2020-07-31T02:00:09.000Z | graphTranspose-dsf-prim-bellmanFord-dijkstra.py | luckydoglou/advanced-algorithms | 0e4b5b9c550b61ac795bee0cc49a564438b0ea44 | [
"MIT"
] | null | null | null | graphTranspose-dsf-prim-bellmanFord-dijkstra.py | luckydoglou/advanced-algorithms | 0e4b5b9c550b61ac795bee0cc49a564438b0ea44 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Topic: Graph theory, minimal spanning tree, shortest path, greedy algorithms
Implementation: graph transpose, in & out degree, depth-first-search
prim's algorithm, bellman-ford algorithm, and dijkstra's algorithm
Name: Xiaolou Huang
Date: 11/12/2019
"""
import sys
# =============================================================================
class Graph(object):
"""docstring for Graph"""
user_defined_vertices = []
dfs_timer = 0
def __init__(self, vertices, edges):
super(Graph, self).__init__()
n = len(vertices)
self.matrix = [[0 for x in range(n)] for y in range(n)]
self.vertices = vertices
self.edges = edges
for edge in edges:
x = vertices.index(edge[0])
y = vertices.index(edge[1])
self.matrix[x][y] = edge[2]
def display(self):
print(self.vertices)
for i, v in enumerate(self.vertices):
print(v, self.matrix[i])
def transpose(self):
n = len(self.vertices) # length of vertices
for x in range(n):
for y in range(x, n, 1):
temp = self.matrix[x][y]
self.matrix[x][y] = self.matrix[y][x]
self.matrix[y][x] = temp
def in_degree(self):
n = len(self.vertices) # length of vertices
degree = [0 for i in range(n)] # initialize 'degree' list
for y in range(n): # check every column (every vertex's in-degree)
count = 0
# check every row, if there is a 1, in-degree count + 1
for x in range(n):
if self.matrix[x][y] == 1:
count = count + 1
degree[y] = count
print("In degree of the graph:")
self.print_degree(degree)
def out_degree(self):
n = len(self.vertices) # number of vertices
degree = [0 for i in range(n)] # initialize 'degree' list
for x in range(n): # check every row (every vertex's out-degree)
count = 0
# check every column, if there is a 1, out-degree count + 1
for y in range(n):
if self.matrix[x][y] == 1:
count = count + 1
degree[x] = count
print("Out degree of the graph:")
self.print_degree(degree)
def dfs_on_graph(self):
n = len(self.vertices) # number of vertices
discover = [0 for i in range(n)]
finish = [0 for i in range(n)]
# meaning of each field in list_v:
# [vertex_name, order_number, color, discover_time, finish_time]
list_v = []
for x in range(n):
list_v.append([self.vertices[x], x, "white", 0, 0])
# visit every vertex in the graph
self.dfs_timer = 0
for i in range(n):
if list_v[i][2] == "white":
self.dfs_visit(list_v, i)
# put discover time and finish time into two lists,
# and print final results
for i in range(n):
discover[i] = list_v[i][3]
finish[i] = list_v[i][4]
self.print_discover_and_finish_time(discover, finish)
# Helper function for dfs_on_graph().
# param list_v: is a data structure contains necessary fields for a vertex.
# param i: which vertex in list_v
def dfs_visit(self, list_v, i):
n = len(self.vertices)
self.dfs_timer = self.dfs_timer + 1
list_v[i][3] = self.dfs_timer # record discover time of this vertex
list_v[i][2] = "gray"
# check all adjacency vertices
for x in range(n):
if self.matrix[list_v[i][1]][x] == 1 and list_v[x][2] == "white":
self.dfs_visit(list_v, list_v[x][1])
list_v[i][2] = "black"
self.dfs_timer = self.dfs_timer + 1
list_v[i][4] = self.dfs_timer # record finish time of this vertex
def prim(self, root):
n = len(self.vertices)
# prim_list stores fields needed for information about each iteration
# prim_list fields:
# [vertex_name, vertex_number, distance/weight/key/d, parent/pi]
iteration = 0
prim_list = []
for i in range(n):
if self.vertices[i] == root: # for root, set distance to 0
prim_list.append([self.vertices[i], i, 0, "None"])
else:
prim_list.append([self.vertices[i], i, sys.maxsize, "None"])
# queue_list stores fields needed for remaining vertices to explore.
# queue_list fields: [vertex_name, vertex_number, distance/weight/key]
queue_list = []
for i in range(n):
if self.vertices[i] == root: # for root, set distance to 0
queue_list.append([self.vertices[i], i, 0])
else:
queue_list.append([self.vertices[i], i, sys.maxsize])
# Initial output for self.print_d_and_pi()
count = 0
iteration = "Initial"
d = []
pi = []
for v in prim_list:
if v[2] == sys.maxsize:
d.append("inf")
else:
d.append(v[2])
pi.append(v[3])
self.print_d_and_pi(iteration, d, pi)
while queue_list:
# get the vertex list from remaining queue_list with min value
u = self.extract_min_prim(queue_list)
for i in range(n): # length of self.vertices
# check if it's an adjacency vertex
if self.matrix[u[1]][i] != 0:
# if this vertex in queue_list(the remaining vetices list),
# and the edge weight is greater than 0
# and less than the compared vertex weight,
# then reassign the parent and weight
for j in range(len(queue_list)):
if prim_list[i][0] == queue_list[j][0] \
and prim_list[i][2] > self.matrix[u[1]][i] > 0:
prim_list[i][3] = u[0] # update parent
prim_list[i][2] = self.matrix[u[1]][i] # update key
queue_list[j][2] = self.matrix[u[1]][i] # update key in queue_list
# format the output for self.print_d_and_pi()
d = []
pi = []
iteration = count
count = count + 1
for v in prim_list:
if v[2] == sys.maxsize:
d.append("inf")
else:
d.append(v[2])
pi.append(v[3])
self.print_d_and_pi(iteration, d, pi)
# Helper function for prim().
# Return vertex in remaining list with min value
def extract_min_prim(self, queue_list):
u = []
min_v = sys.maxsize
for i in range(len(queue_list)):
if queue_list[i][2] < min_v:
u = queue_list[i]
min_v = queue_list[i][2]
queue_list.remove(u)
return u
def bellman_ford(self, source):
n = len(self.vertices)
# bellman_list stores the information about vertices for output results
# bellman_list fields:
# {vertex_name, vertex_order, distance/key/d, parent/pi}
bellman_list = []
for i in range(n):
if self.vertices[i] == source:
bellman_list.append({"name": self.vertices[i],
"order": i,
"key": 0,
"pi": "None"})
else:
bellman_list.append({"name": self.vertices[i],
"order": i,
"key": sys.maxsize,
"pi": "None"})
# print(bellman_list)
# initial formatted output for print_d_and_pi()
count = 0
d = []
pi = []
iteration = "Initial"
for x in range(n):
if bellman_list[x]["key"] == sys.maxsize:
d.append("inf")
else:
d.append(bellman_list[x]["key"])
pi.append(bellman_list[x]["pi"])
self.print_d_and_pi(iteration, d, pi)
# will relax edges in this order
relax_list = [('t', 'x'),
('t', 'y'),
('t', 'z'),
('x', 't'),
('y', 'x'),
('y', 'z'),
('z', 'x'),
('z', 's'),
('s', 't'),
('s', 'y')]
for i in range(n - 1): # in each iteration
for j in range(len(relax_list)): # for each edge in relax_list
u = []
v = []
w = 0
for k in range(n): # for each sub-list in bellman_list
if bellman_list[k]["name"] == relax_list[j][0]: # find vertex u
u = bellman_list[k]
elif bellman_list[k]["name"] == relax_list[j][1]: # find vertex v
v = bellman_list[k]
w = self.matrix[u["order"]][v["order"]] # find w(u, v)
self.relax(u, v, w) # relax the edge
# format output for print_d_and_pi()
iteration = count
count = count + 1
for x in range(n):
if bellman_list[x]["key"] >= sys.maxsize - 99999:
d[x] = "inf"
else:
d[x] = bellman_list[x]["key"]
pi[x] = bellman_list[x]["pi"]
self.print_d_and_pi(iteration, d, pi)
# check if there is a negative weight cycle
for j in range(len(relax_list)): # for each edge in relax_list
u = []
v = []
w = 0
for k in range(n): # for each sub-list in bellman_list
if bellman_list[k]["name"] == relax_list[j][0]: # find vertex u
u = bellman_list[k]
elif bellman_list[k]["name"] == relax_list[j][1]: # find vertex v
v = bellman_list[k]
w = self.matrix[u["order"]][v["order"]] # find w(u, v)
if v["key"] > u["key"] + w:
return False
return True
# Helper function for bellman_ford() and dijkstra().
# param u, v are vertices to check if need relax
# param w is the weight of (u, v)
def relax(self, u, v, w):
if v["key"] > u["key"] + w:
v["key"] = u["key"] + w # update d/key
v["pi"] = u["name"] # update parent
def dijkstra(self, source):
n = len(self.vertices)
# dijkstra_list stores information to output the results
# dijkstra_list fields:
# {vertex_name, vertex_order, if_visited, key/distance/d, parent/pi}
dijkstra_list = []
for i in range(n):
if self.vertices[i] == source:
dijkstra_list.append(
{"name": self.vertices[i],
"order": i,
"visited": "F",
"key": 0,
"pi": "None"})
else:
dijkstra_list.append(
{"name": self.vertices[i],
"order": i,
"visited": "F",
"key": sys.maxsize,
"pi": "None"})
# vertices need to iterate through
queue_list = list(self.vertices)
# initial formatted output results for print_d_and_pi()
iteration = "Initial"
count = 0
d = []
pi = []
for i in range(n):
if dijkstra_list[i]["key"] == sys.maxsize:
d.append("inf")
else:
d.append(dijkstra_list[i]["key"])
pi.append(dijkstra_list[i]["pi"])
self.print_d_and_pi(iteration, d, pi)
# finding the shortest path for every vertex
while queue_list:
u = self.extract_min_dijkstra(dijkstra_list) # u from dijkstra_list
queue_list.remove(u["name"])
u["visited"] = "T"
for i in range(n): # check adjacency matrix for adj[u]
w = self.matrix[u["order"]][i] # w is the weight of (u, v)
if w != 0: # if there is an edge from u to v
v = dijkstra_list[i]
self.relax(u, v, w)
# format output results for print_d_and_pi()
iteration = count
count = count + 1
for i in range(n):
if dijkstra_list[i]["key"] == sys.maxsize:
d[i] = "inf"
else:
d[i] = dijkstra_list[i]["key"]
pi[i] = dijkstra_list[i]["pi"]
self.print_d_and_pi(iteration, d, pi)
# Get vertex with min value from unvisited vertices.
def extract_min_dijkstra(self, dijkstra_list):
u = []
min_v = sys.maxsize
for i in range(len(dijkstra_list)):
if min_v > dijkstra_list[i]["key"] \
and dijkstra_list[i]["visited"] == "F":
u = dijkstra_list[i]
min_v = dijkstra_list[i]["key"]
return u
# Print out steps for prim algorithm.
# param d, distance between two vertices
# param pi, parent vertex
def print_d_and_pi(self, iteration, d, pi):
assert ((len(d) == len(self.vertices)) and
(len(pi) == len(self.vertices)))
print("Iteration: {0}".format(iteration))
for i, v in enumerate(self.vertices):
print("Vertex: {0}\td: {1}\tpi: {2}".format(v, 'inf' if d[i] == sys.maxsize else d[i], pi[i]))
def print_discover_and_finish_time(self, discover, finish):
assert ((len(discover) == len(self.vertices)) and
(len(finish) == len(self.vertices)))
for i, v in enumerate(self.vertices):
print("Vertex: {0}\tDiscovered: {1}\tFinished: {2}".format(
v, discover[i], finish[i]))
def print_degree(self, degree):
assert ((len(degree) == len(self.vertices)))
for i, v in enumerate(self.vertices):
print("Vertex: {0}\tDegree: {1}".format(v, degree[i]))
def main():
# Thoroughly test your program and produce useful output.
# # Q1 and Q2
graph = Graph(['1', '2'], [('1', '2', 1)])
graph.display()
graph.transpose()
graph.display()
graph.transpose()
graph.display()
graph.in_degree()
graph.out_degree()
graph.print_d_and_pi(1, [1, sys.maxsize], [2, None])
graph.print_degree([1, 0])
graph.print_discover_and_finish_time([0, 2], [1, 3])
# # Q3
graph = Graph(['q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'],
[('q', 's', 1),
('s', 'v', 1),
('v', 'w', 1),
('w', 's', 1),
('q', 'w', 1),
('q', 't', 1),
('t', 'x', 1),
('x', 'z', 1),
('z', 'x', 1),
('t', 'y', 1),
('y', 'q', 1),
('r', 'y', 1),
('r', 'u', 1),
('u', 'y', 1)])
graph.display()
graph.dfs_on_graph()
# # Q4 - Prim
graph = Graph(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'],
[('A', 'H', 6),
('H', 'A', 6),
('A', 'B', 4),
('B', 'A', 4),
('B', 'H', 5),
('H', 'B', 5),
('B', 'C', 9),
('C', 'B', 9),
('G', 'H', 14),
('H', 'G', 14),
('F', 'H', 10),
('H', 'F', 10),
('B', 'E', 2),
('E', 'B', 2),
('G', 'F', 3),
('F', 'G', 3),
('E', 'F', 8),
('F', 'E', 8),
('D', 'E', 15),
('E', 'D', 15)])
graph.prim('G')
#
# # Q5
graph = Graph(['s', 't', 'x', 'y', 'z'],
[('t', 'x', 5),
('t', 'y', 8),
('t', 'z', -4),
('x', 't', -2),
('y', 'x', -3),
('y', 'z', 9),
('z', 'x', 7),
('z', 's', 2),
('s', 't', 6),
('s', 'y', 7)])
graph.bellman_ford('z')
#
# # Q5 alternate
graph = Graph(['s', 't', 'x', 'y', 'z'],
[('t', 'x', 5),
('t', 'y', 8),
('t', 'z', -4),
('x', 't', -2),
('y', 'x', -3),
('y', 'z', 9),
('z', 'x', 4),
('z', 's', 2),
('s', 't', 6),
('s', 'y', 7)])
graph.bellman_ford('s')
#
# # Q6
graph = Graph(['s', 't', 'x', 'y', 'z'],
[('s', 't', 3),
('s', 'y', 5),
('t', 'x', 6),
('t', 'y', 2),
('x', 'z', 2),
('y', 't', 1),
('y', 'x', 4),
('y', 'z', 6),
('z', 's', 3),
('z', 'x', 7)])
graph.dijkstra('s')
#
graph = Graph(['A', 'B', 'C', 'D', 'E'],
[('A', 'B', -1),
('A', 'C', 4),
('B', 'C', 3),
('B', 'D', 2),
('B', 'E', 2),
('D', 'B', 1),
('D', 'C', 5),
('E', 'D', -3)])
graph.bellman_ford('A')
if __name__ == '__main__':
main()
| 37.296748 | 107 | 0.424251 |
24141e3c3781cc5a23372f5443879b5c1918a482 | 360 | py | Python | studybud/urls.py | FDB09/discord-clone | f44eaef6332aefee60656ce956858c5f49e895b4 | [
"MIT"
] | null | null | null | studybud/urls.py | FDB09/discord-clone | f44eaef6332aefee60656ce956858c5f49e895b4 | [
"MIT"
] | null | null | null | studybud/urls.py | FDB09/discord-clone | f44eaef6332aefee60656ce956858c5f49e895b4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('base.urls')),
path('api/', include('base.api.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 27.692308 | 76 | 0.736111 |
3c42a8d2b4cad643e49df4981410bd4eb789ff3e | 283 | py | Python | reid/loss/__init__.py | tiancity-NJU/Person-Reid | 153e5695acca533229793ef96d0e7cb01dbc243d | [
"MIT"
] | 1 | 2019-03-26T07:49:44.000Z | 2019-03-26T07:49:44.000Z | reid/loss/__init__.py | tiancity-NJU/Person-Reid | 153e5695acca533229793ef96d0e7cb01dbc243d | [
"MIT"
] | null | null | null | reid/loss/__init__.py | tiancity-NJU/Person-Reid | 153e5695acca533229793ef96d0e7cb01dbc243d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from .oim import oim, OIM, OIMLoss
from .triplet import TripletLoss
from .selftraining_triplet import SelfTraining_TripletLoss
__all__ = [
'oim',
'OIM',
'OIMLoss',
'TripletLoss',
'SelfTraining_TripletLoss',
]
| 20.214286 | 59 | 0.699647 |
4b1e8d83c902e889fa4eebdd49c8c01010ccd0a3 | 15,878 | py | Python | src/functions.py | JanSKowalski/ese440-ese441 | 90d7b7afc34aa062aad23dd23813284f66bf1f4d | [
"MIT"
] | null | null | null | src/functions.py | JanSKowalski/ese440-ese441 | 90d7b7afc34aa062aad23dd23813284f66bf1f4d | [
"MIT"
] | null | null | null | src/functions.py | JanSKowalski/ese440-ese441 | 90d7b7afc34aa062aad23dd23813284f66bf1f4d | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from sklearn import metrics
#from sklearn import decomposition
#from sklearn import manifold
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
import random
import time
import PIL
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from skimage import io, transform
import matplotlib.pyplot as plt
#from utils import store_patterns, load_patterns
#from visualization import heatmap_grid
#import lrp
#from lrp.patterns import fit_patternnet, fit_patternnet_positive # PatternNet patterns
#needed for write_art_labels_to_csv()
import os
import csv
from classes import MahanArtDataset
#from main import SEED
TRAIN_TEST_RATIO = 0.8
VALIDATION_TRAIN_RATIO = 0.1
#SEED = random.randint(1, 1000)
#random.seed(SEED)
#np.random.seed(SEED)
#torch.manual_seed(SEED)
#torch.cuda.manual_seed(SEED)
#torch.backends.cudnn.deterministic = True
#From Mahan's directory of photos, produce a csv that connects image with label
#Assumes csvpath and datapath are correct, defined in main.py
def write_art_labels_to_csv(datapath, csvpath):
#generate csv file with classification
#id filename
csv_file = open(csvpath, 'w')
writer = csv.writer(csv_file)
row = ["filename", "filepath", "classification"]
writer.writerow(row)
size_of_data=0
for directory in os.listdir(datapath):
classpath = datapath+"/"+directory
for filename in os.listdir(classpath):
filepath = classpath+"/"+filename
row = [filename, filepath, directory]
writer.writerow(row)
size_of_data += 1
csv_file.close()
print("There are "+ str(size_of_data) +" data entries in this csv")
#Define transforms on the data, collect the data into torch iterators, instantiate model object
def prepare_data(csvpath, frame_size, BATCH_SIZE):
#print(f"SEED: {SEED}")
initial_transforms = transforms.Compose([transforms.Resize((frame_size, frame_size))])
#Read in data pointers
origin_df = pd.read_csv(csvpath)
origin_dataset = MahanArtDataset(origin_df, transform=initial_transforms)
origin_iterator = data.DataLoader(origin_dataset, batch_size = BATCH_SIZE)
#Determine which cat codes correspond to which art gallery
class_dictionary = origin_dataset.access_categories()
#Determine data statistics for normalization
means = torch.zeros(3)
stds = torch.zeros(3)
for item in origin_iterator:
img = item["image"]
means += torch.mean(img, dim = (0, 2, 3))
stds += torch.std(img, dim = (0, 2, 3))
means /= len(origin_iterator)
stds /= len(origin_iterator)
#Split train/val/test from pandas dataframe
#train_df = origin_df.sample(frac=TRAIN_TEST_RATIO, random_state=SEED)
train_df = origin_df.sample(frac=TRAIN_TEST_RATIO)
test_df = origin_df.drop(train_df.index)
#val_df = train_df.sample(frac=VALIDATION_TRAIN_RATIO, random_state=SEED)
val_df = train_df.sample(frac=VALIDATION_TRAIN_RATIO)
train_df = train_df.drop(val_df.index)
train_transforms = transforms.Compose([
transforms.Resize((frame_size, frame_size)),
transforms.RandomRotation(5),
transforms.RandomHorizontalFlip(0.5),
transforms.Normalize(mean = means, std = stds)
])
test_transforms = transforms.Compose([
transforms.Resize((frame_size, frame_size)),
transforms.Normalize(mean = means, std = stds)
])
#Load data references into memory
training_data = MahanArtDataset(train_df, transform=train_transforms)
validation_data = MahanArtDataset(val_df, transform=test_transforms)
testing_data = MahanArtDataset(test_df, transform=test_transforms)
#Define iterators from pytorch, helps manage the data references
train_iterator = data.DataLoader(training_data, shuffle = True, batch_size = BATCH_SIZE)
valid_iterator = data.DataLoader(validation_data, batch_size = BATCH_SIZE)
test_iterator = data.DataLoader(testing_data, batch_size = BATCH_SIZE)
return train_iterator, valid_iterator, test_iterator, class_dictionary
#Written by Ben Trevett
def calculate_accuracy(y_pred, y):
top_pred = y_pred.argmax(1, keepdim = True)
correct = top_pred.eq(y.view_as(top_pred)).sum()
acc = correct.float() / y.shape[0]
return acc
#Written by Ben Trevett
def train(model, iterator, optimizer, criterion, device, logging_file):
epoch_loss = 0
epoch_acc = 0
model.train()
for item in iterator:
x=item['image'].to(device)
y=item['classification'].to(device)
optimizer.zero_grad()
#print(x)
#print(y)
y_pred, _ = model(x)
loss = criterion(y_pred, y)
acc = calculate_accuracy(y_pred, y)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
#Written by Ben Trevett
def evaluate(model, iterator, criterion, device):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for item in iterator:
x=item['image'].to(device)
y=item['classification'].to(device)
y_pred, _ = model(x)
loss = criterion(y_pred, y)
acc = calculate_accuracy(y_pred, y)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
#Written by Ben Trevett
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
#High level training control -- Important
def train_model(NUM_EPOCHS, model, train_iterator, valid_iterator, output_filename, trial_num, load_previous):
previous_training = output_filename+'.pt'
logging_file = open(output_filename+'_'+str(trial_num)+'.txt', 'a')
#Look at computer hardware
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
start_epoch = 0
if (load_previous):
try:
model, optimizer, start_epoch, criterion = load_checkpoint(model, optimizer, criterion, previous_training)
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device)
except OSError:
print("Not loading from checkpoint")
pass
model = model.to(device)
criterion = criterion.to(device)
best_valid_loss = float('inf')
#Make sure not to repeat epochs that have already been trained
for epoch in range(NUM_EPOCHS-start_epoch):
start_time = time.monotonic()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion, device, logging_file)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion, device)
end_time = time.monotonic()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
output = f'Epoch: {start_epoch+epoch:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\n\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%\n\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%'
print(output)
logging_file.write(output)
state = {'epoch': NUM_EPOCHS, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'criterion': criterion, }
torch.save(state, previous_training)
logging_file.close()
#Written by Scott Hawley
#https://discuss.pytorch.org/t/loading-a-saved-model-for-continue-training/17244/2
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
def load_checkpoint(model, optimizer, criterion, filename):
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
criterion = checkpoint['criterion']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model, optimizer, start_epoch, criterion
#Find out what the accuracy is on test data
def test_model(output_filename, model, test_iterator):
#model.load_state_dict(torch.load(output_filename))
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
model, a, b, c = load_checkpoint(model, optimizer, criterion, "MLP_neural_network.pt")
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
model = model.to(device)
criterion = criterion.to(device)
test_loss, test_acc = evaluate(model, test_iterator, criterion, device)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
#output confusion matrix
model.eval()
with torch.no_grad():
y_true = []
y_choice = []
for item in test_iterator:
x=item['image'].to(device)
y=item['classification'].to(device)
y_pred, _ = model(x)
for entry in y:
y_true.append(entry)
for entry in y_pred:
y_choice.append(torch.argmax(entry).numpy())
#print("#--------------------------------------#")
#print("Confusion matrix of test predictions:")
cm = confusion_matrix(y_true, y_choice)
return cm
def write_results_to_csv(cm, output_filename, trial_num, cat_dict, frame_size, num_epochs, batch_size):
csv_filename = output_filename+'.csv'
if os.path.isfile(csv_filename):
csv_file = open(csv_filename, 'a')
writer = csv.writer(csv_file)
else:
csv_file = open(csv_filename, 'w')
writer = csv.writer(csv_file)
row = ["Network: MLP"]
writer.writerow(row)
#row = [f"Seed: {SEED}", f"Frame size: {frame_size}", f"Number of epochs: {num_epochs}", f"Number of samples per batch: {batch_size}"]
row = ["", f"Frame size: {frame_size}", f"Number of epochs: {num_epochs}", f"Number of samples per batch: {batch_size}"]
writer.writerow(row)
row = []
writer.writerow(row)
#Column titles
row = ["Run#", "Ski-learn Accuracy", "Accuracy"]
for i in range(len(cat_dict)):
row.append("")
row.append("Precision")
for i in range(len(cat_dict)):
row.append("")
row.append("Sensitivity")
for i in range(len(cat_dict)):
row.append("")
row.append("Specificity")
writer.writerow(row)
row = ["",""]
for i in range(len(cat_dict)):
row.append(cat_dict[i])
row.append("")
for i in range(len(cat_dict)):
row.append(cat_dict[i])
row.append("")
for i in range(len(cat_dict)):
row.append(cat_dict[i])
row.append("")
for i in range(len(cat_dict)):
row.append(cat_dict[i])
writer.writerow(row)
#https://towardsdatascience.com/multi-class-classification-extracting-performance-metrics-from-the-confusion-matrix-b379b427a872
#save metrics for the final results file
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
FP = FP.astype(float)
FN = FN.astype(float)
TP = TP.astype(float)
TN = TN.astype(float)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Overall accuracy for each class
ACC = (TP+TN)/(TP+FP+FN+TN)
ski_acc = metrics.accuracy_score(cm.sum(axis=0), cm.sum(axis=1), True)
#Put the data in a csv friendly format
row = [f"{trial_num}"]
row.append(f"{ski_acc:.2f}")
row.append(f"{np.nanmean(ACC):.2f}")
for i in range(len(ACC)):
row.append(f"{ACC[i]:.2f}")
row.append(f"{np.nanmean(PPV):.2f}")
for i in range(len(PPV)):
row.append(f"{PPV[i]:.2f}")
row.append(f"{np.nanmean(TPR):.2f}")
for i in range(len(TPR)):
row.append(f"{TPR[i]:.2f}")
row.append(f"{np.nanmean(TNR):.2f}")
for i in range(len(PPV)):
row.append(f"{TNR[i]:.2f}")
writer.writerow(row)
csv_file.close()
'''
##########################################################################
# LRP stuff
##########################################################################
def compute_and_plot_explanation(rule, ax_, title=None, postprocess=None, pattern=None, cmap='seismic'):
# # # # For the interested reader:
# This is where the LRP magic happens.
# Reset gradient
x.grad = None
# Forward pass with rule argument to "prepare" the explanation
y_hat = model.forward(x, explain=True, rule=rule, pattern=pattern)
# Choose argmax
y_hat = y_hat[torch.arange(x.shape[0]), y_hat.max(1)[1]]
# y_hat *= 0.5 * y_hat # to use value of y_hat as starting point
y_hat = y_hat.sum()
# Backward pass (compute explanation)
y_hat.backward()
attr = x.grad
if postprocess: # Used to compute input * gradient
with torch.no_grad():
attr = postprocess(attr)
attr = heatmap_grid(attr, cmap_name=cmap)
if title is None: title = rule
plot_attribution(attr, ax_, pred, title, cmap=cmap)
#Find out what the accuracy is on test data
def test_model_lrp(output_filename, model, test_iterator):
#model.load_state_dict(torch.load(output_filename))
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
model, a, b, c = load_checkpoint(model, optimizer, criterion, "MLP_neural_network.pt")
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
model = model.to(device)
criterion = criterion.to(device)
test_loss, test_acc = evaluate(model, test_iterator, criterion, device)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
#output confusion matrix
model.eval()
with torch.no_grad():
y_true = []
y_choice = []
for item in test_iterator:
x=item['image'].to(device)
y=item['classification'].to(device)
y_pred, _ = model(x)
for entry in y:
y_true.append(entry)
for entry in y_pred:
y_choice.append(torch.argmax(entry).numpy())
print("#--------------------------------------#")
print("Confusion matrix of test predictions:")
print(confusion_matrix(y_true, y_choice))
# # # # Patterns for PatternNet and PatternAttribution
#patterns_all = fit_patternnet(model, train_loader, device=args.device)
patterns_all = fit_patternnet(model, test_iterator, device)
store_patterns("pattern_all.pkl", patterns_all)
pos_patterns_path = (base_path / 'examples' / 'patterns' / 'pattern_pos.pkl').as_posix()
if not os.path.exists(pos_patterns_path):
patterns_pos = fit_patternnet_positive(model, train_loader, device)#, max_iter=1)
store_patterns(pos_patterns_path, patterns_pos)
else:
patterns_pos = [torch.from_numpy(p).to(args.device) for p in load_patterns(pos_patterns_path)]
# # # Plotting
fig, ax = plt.subplots(2, 5, figsize=(10, 5))
with torch.no_grad():
x_plot = heatmap_grid(x*2-1, cmap_name="gray")
plot_attribution(x_plot, ax[0, 0], pred, "Input")
# compute_and_plot_explanation("gradient", ax[1, 0], title="gradient")
compute_and_plot_explanation("gradient", ax[1, 0], title="input $\\times$ gradient", postprocess = lambda attribution: attribution * x)
compute_and_plot_explanation("epsilon", ax[0, 1])
compute_and_plot_explanation("gamma+epsilon", ax[1, 1])
#
compute_and_plot_explanation("alpha1beta0", ax[0, 2])
compute_and_plot_explanation("alpha2beta1", ax[1, 2])
#
compute_and_plot_explanation("patternnet", ax[0, 3], pattern=patterns_all, title="PatternNet $S(x)$", cmap='gray')
compute_and_plot_explanation("patternnet", ax[1, 3], pattern=patterns_pos, title="PatternNet $S(x)_{+-}$", cmap='gray')
compute_and_plot_explanation("patternattribution", ax[0, 4], pattern=patterns_all, title="PatternAttribution $S(x)$")
compute_and_plot_explanation("patternattribution", ax[1, 4], pattern=patterns_pos, title="PatternAttribution $S(x)_{+-}$")
fig.tight_layout()
fig.savefig((base_path / 'examples' / 'plots' / "mnist_explanations.png").as_posix(), dpi=280)
plt.show()
return test_acc
'''
| 33.217573 | 217 | 0.711928 |
b9d6d327855355e490230e2f43b9b8faefb39cae | 897 | py | Python | ryzen/run_per_core_prof.py | akhilguliani/daemon | 5faae4fb303da563d661571d93d9c7a7e6a36fb0 | [
"Apache-2.0"
] | null | null | null | ryzen/run_per_core_prof.py | akhilguliani/daemon | 5faae4fb303da563d661571d93d9c7a7e6a36fb0 | [
"Apache-2.0"
] | 1 | 2020-06-12T06:14:14.000Z | 2020-06-12T06:14:14.000Z | ryzen/run_per_core_prof.py | akhilguliani/daemon | 5faae4fb303da563d661571d93d9c7a7e6a36fb0 | [
"Apache-2.0"
] | 1 | 2019-12-10T10:54:19.000Z | 2019-12-10T10:54:19.000Z |
import sys
from time import time
import psutil
from msr import setup_perf
from frequency import *
from launcher import *
## Setup perf registers and set frequency to max
setup_perf()
# ensure MSR module is loaded
psutil.Popen(args=["modprobe","msr"])
# set govornor to userspace and freq to max possible
set_gov_userspace()
cur_freq = set_to_max_freq()
input_file = sys.argv[1]
max_cores= 8
core_list = [i*2 for i in range(max_cores)]
set_seq_freqs([3400000, 3000000, 2200000], max_cores)
## Read in the workloads and run
r = parse_file(input_file)
tfile_name = input_file+str(time())
f = open("/mydata/output/percore/"+tfile_name, "w+")
f.flush()
tstat = psutil.Popen(args=["/home/guliani/kernels/tools/turbostat/turbostat", "--debug", "--interval=1", "--add=msr0xC00000E9,u64,cpu,sec,raw,RetI"], stdout=f)
print(tfile_name)
run_multiple_on_cores(r, cores=core_list)
tstat.kill()
f.close()
| 26.382353 | 159 | 0.750279 |
e1a15eb345e26b13bf6384732af38807f7e38421 | 18,018 | py | Python | cea/optimization/slave/seasonal_storage/design_operation.py | VMarty/CityEnergyAnalyst | 5ab4385fc008f3b23f5bd5f9ba683d401cfcef38 | [
"MIT"
] | 1 | 2018-08-16T14:34:23.000Z | 2018-08-16T14:34:23.000Z | cea/optimization/slave/seasonal_storage/design_operation.py | VMarty/CityEnergyAnalyst | 5ab4385fc008f3b23f5bd5f9ba683d401cfcef38 | [
"MIT"
] | null | null | null | cea/optimization/slave/seasonal_storage/design_operation.py | VMarty/CityEnergyAnalyst | 5ab4385fc008f3b23f5bd5f9ba683d401cfcef38 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Storage Design And Operation
This File is called by "Storage_Optimizer_incl_Losses_main.py" (Optimization Routine) and
will operate the storage according to the inputs given by the main file.
The operation data is stored
"""
from __future__ import division
import pandas as pd
import os
import numpy as np
import Import_Network_Data_functions as fn
import SolarPowerHandler_incl_Losses as SPH_fn
from cea.optimization.constants import *
from cea.technologies.constants import DT_HEAT
from cea.resources.geothermal import calc_ground_temperature
from cea.utilities import epwreader
def Storage_Design(CSV_NAME, SOLCOL_TYPE, T_storage_old_K, Q_in_storage_old_W, locator,
STORAGE_SIZE_m3, STORE_DATA, context, P_HP_max_W, config):
"""
:param CSV_NAME:
:param SOLCOL_TYPE:
:param T_storage_old_K:
:param Q_in_storage_old_W:
:param locator:
:param STORAGE_SIZE_m3:
:param STORE_DATA:
:param context:
:param P_HP_max_W:
:param gV:
:type CSV_NAME:
:type SOLCOL_TYPE:
:type T_storage_old_K:
:type Q_in_storage_old_W:
:type locator:
:type STORAGE_SIZE_m3:
:type STORE_DATA:
:type context:
:type P_HP_max_W:
:type gV:
:return:
:rtype:
"""
MS_Var = context
# Import Network Data
Network_Data = pd.read_csv(locator.get_optimization_network_data_folder(CSV_NAME))
# recover Network Data:
mdot_heat_netw_total_kgpers = Network_Data['mdot_DH_netw_total_kgpers'].values
Q_DH_networkload_W = Network_Data['Q_DHNf_W'].values
T_DH_return_array_K = Network_Data['T_DHNf_re_K'].values
T_DH_supply_array_K = Network_Data['T_DHNf_sup_K'].values
Q_wasteheatServer_kWh = Network_Data['Qcdata_netw_total_kWh'].values
Solar_Data_SC = np.zeros((8760, 7))
Solar_Data_PVT = np.zeros((8760, 7))
Solar_Data_PV = np.zeros((8760, 7))
Solar_Tscr_th_SC_K = Solar_Data_SC[:,6]
Solar_E_aux_SC_req_kWh = Solar_Data_SC[:,1]
Solar_Q_th_SC_kWh = Solar_Data_SC[:,1]
Solar_Tscr_th_PVT_K = Solar_Data_PVT[:,6]
Solar_E_aux_PVT_kW = Solar_Data_PVT[:,1]
Solar_Q_th_SC_kWh = Solar_Data_PVT[:,2]
PVT_kWh = Solar_Data_PVT[:,5]
Solar_E_aux_PV_kWh = Solar_Data_PV[:,1]
PV_kWh = Solar_Data_PV[:,5]
# Import Solar Data
os.chdir(locator.get_potentials_solar_folder())
fNameArray = [MS_Var.SOLCOL_TYPE_PVT, MS_Var.SOLCOL_TYPE_SC_ET, MS_Var.SOLCOL_TYPE_SC_FP, MS_Var.SOLCOL_TYPE_PV]
#LOOP AROUND ALL SC TYPES
for solartype in range(4):
fName = fNameArray[solartype]
if MS_Var.SOLCOL_TYPE_SC_ET != "NONE" and fName == MS_Var.SOLCOL_TYPE_SC_ET:
Solar_Area_SC_ET_m2, Solar_E_aux_SC_ET_req_kWh, Solar_Q_th_SC_ET_kWh, Solar_Tscs_th_SC_ET, Solar_mcp_SC_ET_kWperC, SC_ET_kWh, Solar_Tscr_th_SC_ET_K\
= fn.import_solar_data(MS_Var.SOLCOL_TYPE_SC_ET)
if MS_Var.SOLCOL_TYPE_SC_FP != "NONE" and fName == MS_Var.SOLCOL_TYPE_SC_FP:
Solar_Area_SC_FP_m2, Solar_E_aux_SC_FP_req_kWh, Solar_Q_th_SC_FP_kWh, Solar_Tscs_th_SC_FP, Solar_mcp_SC_FP_kWperC, SC_FP_kWh, Solar_Tscr_th_SC_FP_K \
= fn.import_solar_data(MS_Var.SOLCOL_TYPE_SC_FP)
if MS_Var.SOLCOL_TYPE_PVT != "NONE" and fName == MS_Var.SOLCOL_TYPE_PVT:
Solar_Area_PVT_m2, Solar_E_aux_PVT_kW, Solar_Q_th_PVT_kW, Solar_Tscs_th_PVT, Solar_mcp_PVT_kWperC, PVT_kWh, Solar_Tscr_th_PVT_K \
= fn.import_solar_data(MS_Var.SOLCOL_TYPE_PVT)
if MS_Var.SOLCOL_TYPE_PV != "NONE" and fName == MS_Var.SOLCOL_TYPE_PV:
Solar_Area_PV_m2, Solar_E_aux_PV_kWh, Solar_Q_th_PV_kW, Solar_Tscs_th_PV, Solar_mcp_PV_kWperC, PV_kWh, Solar_Tscr_th_PV_K\
= fn.import_solar_data(MS_Var.SOLCOL_TYPE_PV)
# Recover Solar Data
Solar_E_aux_W = np.ravel(Solar_E_aux_SC_ET_req_kWh * 1000 * MS_Var.SOLAR_PART_SC_ET) + np.ravel(Solar_E_aux_SC_FP_req_kWh * 1000 * MS_Var.SOLAR_PART_SC_FP)\
+ np.ravel(Solar_E_aux_PVT_kW * 1000 * MS_Var.SOLAR_PART_PVT) + np.ravel(Solar_E_aux_PV_kWh * 1000 * MS_Var.SOLAR_PART_PV)
Q_SC_ET_gen_Wh = Solar_Q_th_SC_ET_kWh * 1000 * MS_Var.SOLAR_PART_SC_ET
Q_SC_FP_gen_Wh = Solar_Q_th_SC_FP_kWh * 1000 * MS_Var.SOLAR_PART_SC_FP
Q_PVT_gen_Wh = Solar_Q_th_PVT_kW * 1000 * MS_Var.SOLAR_PART_PVT
Q_SCandPVT_gen_Wh = np.zeros(8760)
weather_data = epwreader.epw_reader(config.weather)[['year', 'drybulb_C', 'wetbulb_C','relhum_percent',
'windspd_ms', 'skytemp_C']]
ground_temp = calc_ground_temperature(locator, weather_data['drybulb_C'], depth_m=10)
for hour in range(len(Q_SCandPVT_gen_Wh)):
Q_SCandPVT_gen_Wh[hour] = Q_SC_ET_gen_Wh[hour] + Q_SC_FP_gen_Wh[hour] + Q_PVT_gen_Wh[hour]
E_PV_Wh = PV_kWh * 1000 * MS_Var.SOLAR_PART_PV
E_PVT_Wh = PVT_kWh * 1000 * MS_Var.SOLAR_PART_PVT
HOUR = 0
Q_to_storage_avail_W = np.zeros(8760)
Q_from_storage_W = np.zeros(8760)
to_storage = np.zeros(8760)
Q_storage_content_fin_W = np.zeros(8760)
Q_server_to_directload_W = np.zeros(8760)
Q_server_to_storage_W = np.zeros(8760)
Q_compair_to_directload_W = np.zeros(8760)
Q_compair_to_storage_W = np.zeros(8760)
Q_PVT_to_directload_W = np.zeros(8760)
Q_PVT_to_storage_W = np.zeros(8760)
Q_SC_ET_to_directload_W = np.zeros(8760)
Q_SC_ET_to_storage_W = np.zeros(8760)
Q_SC_FP_to_directload_W = np.zeros(8760)
Q_SC_FP_to_storage_W = np.zeros(8760)
T_storage_fin_K = np.zeros(8760)
Q_from_storage_fin_W = np.zeros(8760)
Q_to_storage_fin_W = np.zeros(8760)
E_aux_ch_fin_W = np.zeros(8760)
E_aux_dech_fin_W = np.zeros(8760)
#E_PV_Wh_fin = np.zeros(8760)
E_aux_solar_W = np.zeros(8760)
Q_missing_fin_W = np.zeros(8760)
Q_from_storage_used_fin_W = np.zeros(8760)
Q_rejected_fin_W = np.zeros(8760)
mdot_DH_fin_kgpers = np.zeros(8760)
Q_uncontrollable_fin_Wh = np.zeros(8760)
E_aux_solar_and_heat_recovery_Wh = np.zeros(8760)
HPServerHeatDesignArray_kWh = np.zeros(8760)
HPpvt_designArray_Wh = np.zeros(8760)
HPCompAirDesignArray_kWh = np.zeros(8760)
HPScDesignArray_Wh = np.zeros(8760)
T_amb_K = 10 + 273.0 # K
T_storage_min_K = MS_Var.T_ST_MAX
Q_disc_seasonstart_W = [0]
Q_loss_tot_W = 0
while HOUR < 8760:
# Store later on this data
HPServerHeatDesign_kWh = 0
HPpvt_design_Wh = 0
HPCompAirDesign_kWh = 0
HPScDesign_Wh = 0
T_DH_sup_K = T_DH_supply_array_K[HOUR]
T_DH_return_K = T_DH_return_array_K[HOUR]
mdot_DH_kgpers = mdot_heat_netw_total_kgpers[HOUR]
if MS_Var.WasteServersHeatRecovery == 1:
Q_server_gen_kW = Q_wasteheatServer_kWh[HOUR]
else:
Q_server_gen_kW = 0
# if MS_Var.WasteCompressorHeatRecovery == 1:
# Q_compair_gen_kW= Q_wasteheatCompAir_kWh[HOUR]
# else:
# Q_compair_gen_kW = 0
Q_SC_ET_gen_W = Q_SC_ET_gen_Wh[HOUR]
Q_SC_FP_gen_W = Q_SC_FP_gen_Wh[HOUR]
Q_PVT_gen_W = Q_PVT_gen_Wh[HOUR]
# check if each source needs a heat-pump, calculate the final energy
if T_DH_sup_K > T_EL_TO_HEAT_SUP - DT_HEAT: #and checkpoint_ElToHeat == 1:
#use a heat pump to bring it to distribution temp
COP_th = T_DH_sup_K / (T_DH_sup_K - (T_EL_TO_HEAT_SUP - DT_HEAT))
COP = HP_ETA_EX * COP_th
E_aux_Server_kWh = Q_server_gen_kW * (1/COP) # assuming the losses occur after the heat pump
if E_aux_Server_kWh > 0:
HPServerHeatDesign_kWh = Q_server_gen_kW
Q_server_gen_kW += E_aux_Server_kWh
else:
E_aux_Server_kWh = 0.0
# if T_DH_sup_K > T_FROM_SERVER - DT_HEAT:# and checkpoint_QfromServer == 1:
# #use a heat pump to bring it to distribution temp
# COP_th = T_DH_sup_K / (T_DH_sup_K - (T_FROM_SERVER - DT_HEAT))
# COP = HP_ETA_EX * COP_th
# E_aux_CAH_kWh = Q_compair_gen_kW * (1/COP) # assuming the losses occur after the heat pump
# if E_aux_Server_kWh > 0:
# HPCompAirDesign_kWh = Q_compair_gen_kW
# Q_compair_gen_kW += E_aux_CAH_kWh
# else:
# E_aux_CAH_kWh = 0.0
#eliminating compressed air of the code
E_aux_CAH_kWh = 0
Q_compair_gen_kW = 0
if T_DH_sup_K > Solar_Tscr_th_PVT_K[HOUR] - DT_HEAT:# and checkpoint_PVT == 1:
#use a heat pump to bring it to distribution temp
COP_th = T_DH_sup_K / (T_DH_sup_K - (Solar_Tscr_th_PVT_K[HOUR] - DT_HEAT))
COP = HP_ETA_EX * COP_th
E_aux_PVT_Wh = Q_PVT_gen_W * (1/COP) # assuming the losses occur after the heat pump
if E_aux_PVT_Wh > 0:
HPpvt_design_Wh = Q_PVT_gen_W
Q_PVT_gen_W += E_aux_PVT_Wh
else:
E_aux_PVT_Wh = 0.0
if T_DH_sup_K > Solar_Tscr_th_SC_ET_K[HOUR] - DT_HEAT:# and checkpoint_SC == 1:
#use a heat pump to bring it to distribution temp
COP_th = T_DH_sup_K / (T_DH_sup_K - (Solar_Tscr_th_SC_ET_K[HOUR] - DT_HEAT))
COP = HP_ETA_EX * COP_th
E_aux_SC_ET_Wh = Q_SC_ET_gen_W * (1/COP) # assuming the losses occur after the heat pump
if E_aux_SC_ET_Wh > 0:
HPScDesign_Wh = Q_SC_ET_gen_W
Q_SC_ET_gen_W += E_aux_SC_ET_Wh
else:
E_aux_SC_ET_Wh = 0.0
if T_DH_sup_K > Solar_Tscr_th_SC_FP_K[HOUR] - DT_HEAT:# and checkpoint_SC == 1:
#use a heat pump to bring it to distribution temp
COP_th = T_DH_sup_K / (T_DH_sup_K - (Solar_Tscr_th_SC_FP_K[HOUR] - DT_HEAT))
COP = HP_ETA_EX * COP_th
E_aux_SC_FP_Wh = Q_SC_FP_gen_W * (1/COP) # assuming the losses occur after the heat pump
if E_aux_SC_FP_Wh > 0:
HPScDesign_Wh = Q_SC_FP_gen_W
Q_SC_FP_gen_W += E_aux_SC_FP_Wh
else:
E_aux_SC_FP_Wh = 0.0
HPServerHeatDesignArray_kWh[HOUR] = HPServerHeatDesign_kWh
HPpvt_designArray_Wh[HOUR] = HPpvt_design_Wh
HPCompAirDesignArray_kWh[HOUR] = HPCompAirDesign_kWh
HPScDesignArray_Wh[HOUR] = HPScDesign_Wh
E_aux_HP_uncontrollable_Wh = float(E_aux_SC_FP_Wh + E_aux_SC_ET_Wh + E_aux_PVT_Wh + E_aux_CAH_kWh + E_aux_Server_kWh)
# Heat Recovery has some losses, these are taken into account as "overall Losses", i.e.: from Source to DH Pipe
# hhhhhhhhhhhhhh GET VALUES
Q_server_gen_W = Q_server_gen_kW * ETA_SERVER_TO_HEAT * 1000 # converting to W
Q_compair_gen_W = Q_compair_gen_kW * ETA_EL_TO_HEAT * 1000
Q_network_demand_W = Q_DH_networkload_W[HOUR]
Storage_Data = SPH_fn.Storage_Operator(Q_PVT_gen_W, Q_SC_ET_gen_W, Q_SC_FP_gen_W, Q_server_gen_W, Q_compair_gen_W, Q_network_demand_W, T_storage_old_K, T_DH_sup_K, T_amb_K, \
Q_in_storage_old_W, T_DH_return_K, mdot_DH_kgpers, STORAGE_SIZE_m3, context, P_HP_max_W, ground_temp[HOUR])
Q_in_storage_new_W = Storage_Data[0]
T_storage_new_K = Storage_Data[1]
Q_to_storage_final_W = Storage_Data[3]
Q_from_storage_req_final_W = Storage_Data[2]
E_aux_ch_W = Storage_Data[4]
E_aux_dech_W = Storage_Data[5]
Q_missing_W = Storage_Data[6]
Q_from_storage_used_fin_W[HOUR] = Storage_Data[7]
Q_loss_tot_W += Storage_Data[8]
mdot_DH_afterSto_kgpers = Storage_Data[9]
Q_server_to_directload_W[HOUR] = Storage_Data[10]
Q_server_to_storage_W[HOUR] = Storage_Data[11]
Q_compair_to_directload_W[HOUR] = Storage_Data[12]
Q_compair_to_storage_W[HOUR] = Storage_Data[13]
Q_PVT_to_directload_W[HOUR] = Storage_Data[14]
Q_PVT_to_storage_W[HOUR] = Storage_Data[15]
Q_SC_ET_to_directload_W[HOUR] = Storage_Data[16]
Q_SC_ET_to_storage_W[HOUR] = Storage_Data[17]
Q_SC_FP_to_directload_W[HOUR] = Storage_Data[18]
Q_SC_FP_to_storage_W[HOUR] = Storage_Data[19]
if Q_in_storage_new_W < 0.0001:
Q_in_storage_new_W = 0
if T_storage_new_K >= MS_Var.T_ST_MAX-0.001: # no more charging possible - reject energy
Q_in_storage_new_W = min(Q_in_storage_old_W, Storage_Data[0])
Q_to_storage_final_W = max(Q_in_storage_new_W - Q_in_storage_old_W, 0)
Q_rejected_fin_W[HOUR] = Q_PVT_gen_W + Q_SC_ET_gen_W + Q_SC_FP_gen_W + Q_compair_gen_W + Q_server_gen_W - Storage_Data[3]
T_storage_new_K = min(T_storage_old_K, T_storage_new_K)
E_aux_ch_W = 0
Q_storage_content_fin_W[HOUR] = Q_in_storage_new_W
Q_in_storage_old_W = Q_in_storage_new_W
T_storage_fin_K[HOUR] = T_storage_new_K
T_storage_old_K = T_storage_new_K
if T_storage_old_K < T_amb_K-1: # chatch an error if the storage temperature is too low
# print "ERROR!"
break
Q_from_storage_fin_W[HOUR] = Q_from_storage_req_final_W
Q_to_storage_fin_W[HOUR] = Q_to_storage_final_W
E_aux_ch_fin_W[HOUR] = E_aux_ch_W
E_aux_dech_fin_W[HOUR] = E_aux_dech_W
E_aux_solar_W[HOUR] = Solar_E_aux_W[HOUR]
Q_uncontrollable_fin_Wh[HOUR] = Q_PVT_to_directload_W[HOUR] + Q_SC_ET_to_directload_W[HOUR] + Q_SC_FP_to_directload_W[HOUR] + Q_compair_to_directload_W[HOUR] + Q_server_to_directload_W[HOUR]
Q_missing_fin_W[HOUR] = Q_network_demand_W - Q_uncontrollable_fin_Wh[HOUR] - Q_from_storage_used_fin_W[HOUR]
E_aux_solar_and_heat_recovery_Wh[HOUR] = float(E_aux_HP_uncontrollable_Wh)
mdot_DH_fin_kgpers[HOUR] = mdot_DH_afterSto_kgpers
# Q_from_storage_fin_W[HOUR] = Q_DH_networkload_W[HOUR] - Q_missing_W
if T_storage_new_K <= T_storage_min_K:
T_storage_min_K = T_storage_new_K
Q_disc_seasonstart_W[0] += Q_from_storage_req_final_W
HOUR += 1
""" STORE DATA """
E_aux_solar_and_heat_recovery_flat_Wh = E_aux_solar_and_heat_recovery_Wh.flatten()
# Calculate imported and exported Electricity Arrays:
E_produced_total_W = np.zeros(8760)
E_consumed_for_storage_solar_and_heat_recovery_W = np.zeros(8760)
for hour in range(8760):
E_produced_total_W[hour] = E_PV_Wh[hour] + E_PVT_Wh[hour]
E_consumed_for_storage_solar_and_heat_recovery_W[hour] = E_aux_ch_fin_W[hour] + E_aux_dech_fin_W[hour] + E_aux_solar_and_heat_recovery_Wh[hour]
if STORE_DATA == "yes":
date = Network_Data.DATE.values
results = pd.DataFrame(
{"DATE": date,
"Q_storage_content_W":Q_storage_content_fin_W,
"Q_DH_networkload_W":Q_DH_networkload_W,
"Q_uncontrollable_hot_W":Q_uncontrollable_fin_Wh,
"Q_to_storage_W":Q_to_storage_fin_W,
"Q_from_storage_used_W":Q_from_storage_used_fin_W,
"Q_server_to_directload_W":Q_server_to_directload_W,
"Q_server_to_storage_W":Q_server_to_storage_W,
"Q_compair_to_directload_W":Q_compair_to_directload_W,
"Q_compair_to_storage_W":Q_compair_to_storage_W,
"Q_PVT_to_directload_W":Q_PVT_to_directload_W,
"Q_PVT_to_storage_W": Q_PVT_to_storage_W,
"Q_SC_ET_to_directload_W":Q_SC_ET_to_directload_W,
"Q_SC_ET_to_storage_W":Q_SC_ET_to_storage_W,
"Q_SC_FP_to_directload_W": Q_SC_FP_to_directload_W,
"Q_SC_FP_to_storage_W": Q_SC_FP_to_storage_W,
"E_aux_ch_W":E_aux_ch_fin_W,
"E_aux_dech_W":E_aux_dech_fin_W,
"Q_missing_W":Q_missing_fin_W,
"mdot_DH_fin_kgpers":mdot_DH_fin_kgpers,
"E_aux_solar_and_heat_recovery_Wh": E_aux_solar_and_heat_recovery_Wh,
"E_consumed_for_storage_solar_and_heat_recovery_W": E_consumed_for_storage_solar_and_heat_recovery_W,
"E_PV_Wh":E_PV_Wh,
"E_PVT_Wh":E_PVT_Wh,
"E_produced_from_solar_W": E_produced_total_W,
"Storage_Size_m3":STORAGE_SIZE_m3,
"Q_SC_ET_gen_Wh":Q_SC_ET_gen_Wh,
"Q_SC_FP_gen_Wh": Q_SC_FP_gen_Wh,
"Q_PVT_gen_Wh": Q_PVT_gen_Wh,
"HPServerHeatDesignArray_kWh":HPServerHeatDesignArray_kWh,
"HPpvt_designArray_Wh":HPpvt_designArray_Wh,
"HPCompAirDesignArray_kWh":HPCompAirDesignArray_kWh,
"HPScDesignArray_Wh":HPScDesignArray_Wh,
"Q_rejected_fin_W":Q_rejected_fin_W,
"P_HPCharge_max_W":P_HP_max_W
})
storage_operation_data_path = locator.get_optimization_slave_storage_operation_data(MS_Var.individual_number,
MS_Var.generation_number)
results.to_csv(storage_operation_data_path, index=False)
Q_stored_max_W = np.amax(Q_storage_content_fin_W)
T_st_max_K = np.amax(T_storage_fin_K)
T_st_min_K = np.amin(T_storage_fin_K)
return Q_stored_max_W, Q_rejected_fin_W, Q_disc_seasonstart_W, T_st_max_K, T_st_min_K, Q_storage_content_fin_W, T_storage_fin_K, \
Q_loss_tot_W, mdot_DH_fin_kgpers, Q_uncontrollable_fin_Wh
""" DESCRIPTION FOR FUTHER USAGE"""
# Q_missing_fin : has to be replaced by other means, like a HP
# Q_from_storage_fin : What is used from Storage
# Q_aus_fin : how much energy was spent on Auxillary power !! NOT WORKING PROPERLY !!
# Q_from_storage_fin : How much energy was used from the storage !! NOT WORKING PROPERLY !!
# Q_missing_fin : How much energy is missing
| 45.5 | 198 | 0.679321 |
7461e8f6dc27c4d12823a9631c1657f129661e53 | 6,575 | py | Python | ceilometer/tests/compute/virt/vmware/test_inspector.py | rdo-management/ceilometer | df10a87ea7810f3ebf47b7d027e30a8403d89b0f | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/compute/virt/vmware/test_inspector.py | rdo-management/ceilometer | df10a87ea7810f3ebf47b7d027e30a8403d89b0f | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/compute/virt/vmware/test_inspector.py | rdo-management/ceilometer | df10a87ea7810f3ebf47b7d027e30a8403d89b0f | [
"Apache-2.0"
] | 3 | 2015-10-08T20:03:36.000Z | 2020-02-05T10:45:50.000Z | # Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for VMware Vsphere inspector.
"""
import mock
from oslo.vmware import api
from oslotest import base
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.compute.virt.vmware import inspector as vsphere_inspector
class TestVsphereInspection(base.BaseTestCase):
def setUp(self):
api_session = api.VMwareAPISession("test_server", "test_user",
"test_password", 0, None,
create_session=False, port=7443)
vsphere_inspector.get_api_session = mock.Mock(
return_value=api_session)
self._inspector = vsphere_inspector.VsphereInspector()
self._inspector._ops = mock.MagicMock()
super(TestVsphereInspection, self).setUp()
def test_inspect_memory_usage(self):
fake_instance_moid = 'fake_instance_moid'
fake_instance_id = 'fake_instance_id'
fake_perf_counter_id = 'fake_perf_counter_id'
fake_memory_value = 1024.0
fake_stat = virt_inspector.MemoryUsageStats(usage=1.0)
def construct_mock_instance_object(fake_instance_id):
instance_object = mock.MagicMock()
instance_object.id = fake_instance_id
return instance_object
fake_instance = construct_mock_instance_object(fake_instance_id)
self._inspector._ops.get_vm_moid.return_value = fake_instance_moid
(self._inspector._ops.
get_perf_counter_id.return_value) = fake_perf_counter_id
(self._inspector._ops.query_vm_aggregate_stats.
return_value) = fake_memory_value
memory_stat = self._inspector.inspect_memory_usage(fake_instance)
self.assertEqual(fake_stat, memory_stat)
def test_inspect_cpu_util(self):
fake_instance_moid = 'fake_instance_moid'
fake_instance_id = 'fake_instance_id'
fake_perf_counter_id = 'fake_perf_counter_id'
fake_cpu_util_value = 60
fake_stat = virt_inspector.CPUUtilStats(util=60)
def construct_mock_instance_object(fake_instance_id):
instance_object = mock.MagicMock()
instance_object.id = fake_instance_id
return instance_object
fake_instance = construct_mock_instance_object(fake_instance_id)
self._inspector._ops.get_vm_moid.return_value = fake_instance_moid
(self._inspector._ops.get_perf_counter_id.
return_value) = fake_perf_counter_id
(self._inspector._ops.query_vm_aggregate_stats.
return_value) = fake_cpu_util_value * 100
cpu_util_stat = self._inspector.inspect_cpu_util(fake_instance)
self.assertEqual(fake_stat, cpu_util_stat)
def test_inspect_vnic_rates(self):
# construct test data
test_vm_moid = "vm-21"
vnic1 = "vnic-1"
vnic2 = "vnic-2"
counter_name_to_id_map = {
vsphere_inspector.VC_NETWORK_RX_COUNTER: 1,
vsphere_inspector.VC_NETWORK_TX_COUNTER: 2
}
counter_id_to_stats_map = {
1: {vnic1: 1, vnic2: 3},
2: {vnic1: 2, vnic2: 4},
}
def get_counter_id_side_effect(counter_full_name):
return counter_name_to_id_map[counter_full_name]
def query_stat_side_effect(vm_moid, counter_id, duration):
# assert inputs
self.assertEqual(test_vm_moid, vm_moid)
self.assertIn(counter_id, counter_id_to_stats_map)
return counter_id_to_stats_map[counter_id]
# configure vsphere operations mock with the test data
ops_mock = self._inspector._ops
ops_mock.get_vm_moid.return_value = test_vm_moid
ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect
ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect
result = self._inspector.inspect_vnic_rates(mock.MagicMock())
# validate result
expected_stats = {
vnic1: virt_inspector.InterfaceRateStats(1024, 2048),
vnic2: virt_inspector.InterfaceRateStats(3072, 4096)
}
for vnic, rates_info in result:
self.assertEqual(expected_stats[vnic.name], rates_info)
def test_inspect_disk_rates(self):
# construct test data
test_vm_moid = "vm-21"
disk1 = "disk-1"
disk2 = "disk-2"
counter_name_to_id_map = {
vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1,
vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2,
vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3,
vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4
}
counter_id_to_stats_map = {
1: {disk1: 1, disk2: 2},
2: {disk1: 300, disk2: 400},
3: {disk1: 5, disk2: 6},
4: {disk1: 700},
}
def get_counter_id_side_effect(counter_full_name):
return counter_name_to_id_map[counter_full_name]
def query_stat_side_effect(vm_moid, counter_id, duration):
# assert inputs
self.assertEqual(test_vm_moid, vm_moid)
self.assertIn(counter_id, counter_id_to_stats_map)
return counter_id_to_stats_map[counter_id]
# configure vsphere operations mock with the test data
ops_mock = self._inspector._ops
ops_mock.get_vm_moid.return_value = test_vm_moid
ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect
ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect
result = self._inspector.inspect_disk_rates(mock.MagicMock())
# validate result
expected_stats = {
disk1: virt_inspector.DiskRateStats(1024, 300, 5120, 700),
disk2: virt_inspector.DiskRateStats(2048, 400, 6144, 0)
}
actual_stats = dict((disk.device, rates) for (disk, rates) in result)
self.assertEqual(expected_stats, actual_stats)
| 39.608434 | 78 | 0.681065 |
841180795de2068473c52cb75a9f59bee1785e80 | 3,091 | py | Python | misc/window_control.py | Madd0g/talon-configs | 0be618cd5185de11c5916b1f6b4f67b67121c3b4 | [
"Unlicense"
] | 7 | 2019-10-13T23:25:06.000Z | 2021-12-01T01:01:58.000Z | misc/window_control.py | Madd0g/talon-configs | 0be618cd5185de11c5916b1f6b4f67b67121c3b4 | [
"Unlicense"
] | null | null | null | misc/window_control.py | Madd0g/talon-configs | 0be618cd5185de11c5916b1f6b4f67b67121c3b4 | [
"Unlicense"
] | 4 | 2020-01-17T19:50:48.000Z | 2022-03-01T05:39:04.000Z | from os import system
from talon.voice import Context, Key, press
from talon import macos, applescript
from ..utils import parse_words_as_integer
ctx = Context("window_control")
def jump_tab(m):
tab_number = parse_words_as_integer(m._words[1:])
if tab_number is not None and tab_number > 0 and tab_number < 9:
press("cmd-%s" % tab_number)
def quit_voice_apps(m):
script = f'''
tell application "Dragon"
quit
end tell
tell application "Talon"
quit
end tell'''
ret = applescript.run(script)
print(ret)
ctx.keymap(
{
# tab control
"(open | new) tab": Key("cmd-t"),
"close (tab | file | whale)": Key("cmd-w"),
# "swell": Key("cmd-w"),
"([switch] tab (right | next))": Key("cmd-shift-]"),
"([switch] tab (left | previous | preev))": Key("cmd-shift-["),
"[switch] tab (1 | 2 | 3 | 4 | 5 | 6 | 7 | 8)": jump_tab,
"[switch] tab (end | rightmost)": Key("cmd-9"),
"all snap": Key("cmd-alt-ctrl-g"),
"snap": Key("cmd-alt-ctrl-f6"),
# TODO:: toggle space num
# TODO:: toggle window between spaces
# windy windy windy windy windy
# 'windy max': Key('cmd-alt-f'),
'win left': Key('ctrl-alt-cmd-left'),
'win right': Key('ctrl-alt-cmd-right'),
'win max': Key('ctrl-alt-cmd-m'),
'win corner': Key('ctrl-alt-cmd-o'),
'(win | window) min': Key('cmd-m'),
# zooming
"zoom in": Key("cmd-="),
"zoom out": Key("cmd--"),
"(zoom normal | zoom reset)": Key("cmd-0"),
# window control
"(open | new) window": Key("cmd-n"),
"close window": Key("cmd-shift-w"),
# "([switch] window (next | right) | gibby)": Key("cmd-`"),
# "([switch] window (left | previous | preev) | shibby)": Key("cmd-shift-`"),
"[switch] space (right | next)": Key("ctrl-right"),
"window [to] space (right | next)": Key("ctrl-alt-right"),
"[switch] space (left | previous | preev)": Key("ctrl-left"),
"window [to] space (left | previous | preev)": Key("ctrl-alt-left"),
"(minimise window | curtail)": Key("cmd-m"),
"([show] (app | application) windows | expozay)": lambda m: macos.dock_notify("com.apple.expose.front.awake"),
"quit it": Key("cmd-q"),
"quit voice": quit_voice_apps,
# application navigation
"[open] launcher": Key("cmd-space"),
"([switch] app (next | right) | swick)": Key("cmd-tab"),
"[switch] app (left | previous | preev)": Key("cmd-shift-tab"),
"[open] mission control": lambda m: macos.dock_notify("com.apple.expose.awake"),
"[open] launchpad": lambda m: macos.dock_notify("com.apple.launchpad.toggle"),
# the following requires keyboard shortcut for mission control in System Preferences > Keyboard > Shortcuts > Mission Control > Show Notification Center.
# is there a bundle id we can use instead?
# "([(open | show)] notification center | ( (show | open) (today | widgets) ))": Key("shift-ctrl-f8"),
}
)
| 41.213333 | 155 | 0.568424 |
1d0006654e4b55f44be844ea21e9916e4fbad82e | 13,937 | py | Python | ptfce/ptfce.py | drammock/ptfce | 572fd1355b92e05532d57477a460659577a46c23 | [
"BSD-3-Clause"
] | null | null | null | ptfce/ptfce.py | drammock/ptfce | 572fd1355b92e05532d57477a460659577a46c23 | [
"BSD-3-Clause"
] | null | null | null | ptfce/ptfce.py | drammock/ptfce | 572fd1355b92e05532d57477a460659577a46c23 | [
"BSD-3-Clause"
] | null | null | null | '''
Adaptation of the pTFCE algorithm to MEG data.
Probabilistic TFCE (pTFCE) was originally published in:
Spisák T, Spisák Z, Zunhammer M, Bingel U, Smith S, Nichols T, & Kincses T
(2019). Probabilistic TFCE: A generalized combination of cluster size and
voxel intensity to increase statistical power. NeuroImage, 185, 12–26.
https://doi.org/10.1016/j.neuroimage.2018.09.078
The original implementations (in R and MATLAB) are here:
- https://github.com/spisakt/pTFCE
- https://github.com/spisakt/pTFCE_spm
As a result of the original authors' interest in MRI analysis, much of the
implementation relies on Gaussian Random Field Theory (GRFT) which, while
appropriate for MRI (where voxel measurements are independent and image values
are routinely z-scored) are not suitable for M/EEG-based distributed source
estimates (where the number of source vertices is much greater than the data
rank, and where activation values are usually restricted to be non-negative).
This code adapts pTFCE to the M/EEG source imaging case, by empirically
generating the necessary null distributions of suprathreshold source
activations and cluster sizes, instead of deriving them from GRFT.
author: Daniel McCloy <dan@mccloy.info>
license: BSD 3-clause
'''
# from functools import partial
from time import perf_counter
from contextlib import contextmanager
import numpy as np
from scipy.integrate import trapezoid
from scipy.interpolate import interp1d
from scipy.stats import norm, gaussian_kde
import mne
# prevent NaN propogation / careless array munging
import warnings
warnings.filterwarnings('error', 'invalid value encountered in')
warnings.filterwarnings('error', 'divide by zero encountered in')
warnings.filterwarnings('error', 'Creating an ndarray from ragged nested')
@contextmanager
def timer(description: str) -> None:
"""Simple context manager for timing code execution piecemeal."""
if description:
print(description)
start = perf_counter()
yield
elapsed_time = perf_counter() - start
space = ' ' if description else ''
print(f'elapsed time{space}{description}: {elapsed_time:.4f} sec.')
def calc_thresholds(data, n_thresh=100):
"""Compute pTFCE thresholds (equidistant in log-space)."""
min_logp_thresh = 0.
max_logp_thresh = -1 * norm.logsf(data.max())
logp_thresholds = np.linspace(min_logp_thresh, max_logp_thresh, n_thresh)
delta_logp_thresh = np.diff(logp_thresholds[:2])[0]
all_thresholds = norm.isf(np.exp(-1 * logp_thresholds))
# convert first thresh (-inf) to zero, or just below our lowest data value
all_thresholds[0] = min(0., data.min() - np.finfo(data.dtype).eps)
return all_thresholds, delta_logp_thresh
def _find_clusters(data, threshold, adjacency):
"""Find indices of vertices that form clusters at the given threshold."""
suprathresh = (data > threshold)
# XXX ↓↓↓ THIS IS THE TIE-IN TO EXISTING MNE-PYTHON CLUSTERING CODE
clusters = mne.stats.cluster_level._get_components(suprathresh, adjacency)
return clusters # list of arrays of vertex numbers
def _get_cluster_sizes(clusters):
"""Get cluster sizes from the _find_clusters output (helper function)."""
return np.array([len(clust) for clust in clusters], dtype=int)
def _cluster_size_density_factory(sizes, max_cluster_size):
"""Find empirically the distribution (density func) of cluster sizes."""
unique_sizes = np.unique(sizes)
if len(unique_sizes) == 0:
return lambda x: np.atleast_1d(np.zeros_like(x, float))
elif len(unique_sizes) == 1:
# can't use gaussian_kde (LinAlgError); make unimodal prob mass func:
return lambda x: np.atleast_1d(x == unique_sizes[0]).astype(float)
else:
counts = np.bincount(sizes)
x = np.nonzero(counts)[0]
y = counts[x]
# we need to interp first before normalizing
_x = np.arange(max_cluster_size) + 1
_y = interp1d(x=x, y=y, fill_value=tuple(y[[0, -1]]),
bounds_error=False)(_x)
_y = _y / _y.sum()
return interp1d(x=_x, y=_y)
def _suprathresh_density_given_cluster_size(
thresholds, all_thresholds, observed_cluster_size,
source_activation_density_func,
threshold_specific_cluster_size_density_func):
"""PDF of threshold or activation value, given an observed cluster size.
Equivalent in pTFCE source code is dvox.clust(); Spisák et al. equation 2
"""
numer = (source_activation_density_func(thresholds) * # p(hᵢ)
threshold_specific_cluster_size_density_func(
thresholds, observed_cluster_size)) # p(c|hᵢ)
y = (source_activation_density_func(all_thresholds) # p(h)
* threshold_specific_cluster_size_density_func(
all_thresholds, observed_cluster_size)) # p(c|h)
assert np.isfinite(y).all()
denom = trapezoid(x=all_thresholds, y=y) # integral
assert np.isfinite(denom)
return numer / denom
def _prob_suprathresh_given_cluster_size(
threshold, all_thresholds, observed_cluster_size,
source_activation_density_func,
threshold_specific_cluster_size_density_func):
"""pvox.clust()"""
thresh_ix = all_thresholds.tolist().index(threshold)
x = all_thresholds[thresh_ix:]
y = _suprathresh_density_given_cluster_size(
x, all_thresholds, observed_cluster_size,
source_activation_density_func,
threshold_specific_cluster_size_density_func)
integral = trapezoid(x=x, y=y)
return integral
def _aggregate_logp_vals(unaggregated_probs, delta_logp_thresh):
"""Perform p-value enhancement by aggregating across thresholds."""
# avoid underflow
finfo = np.finfo(unaggregated_probs.dtype)
unaggregated_probs[unaggregated_probs == 0] = finfo.eps
unaggregated_probs[unaggregated_probs == 1] = 1 - finfo.epsneg
# S(x) = ∑ᵢ -log(P(V ≥ hᵢ|cᵢ)) at voxel position x (Spisák et al. eq. 10)
neglogp = np.sum(-np.log(unaggregated_probs), axis=0)
# (sqrt(Δk * (8S(x) + Δk)) - Δk) / 2 (Spisák et al. eq. 9)
radicand = delta_logp_thresh * (8 * neglogp + delta_logp_thresh)
enhanced = (np.sqrt(radicand) - delta_logp_thresh) / 2
# neglogp → regular p-values
return np.exp(-1 * enhanced)
def ptfce(data, adjacency, noise, max_cluster_size, seed=None):
"""Perform pTFCE.
Parameters
----------
data : array-like, shape (n_points,)
The input data, reshaped or raveled into a one-dimensional vector.
adjacency :
Matrix describing the adjacency of points in the data vector (for the
purposes of cluster formation).
noise : array-like, shape (n_iter, n_points)
Simulated noise to use when constructing the null distributions.
max_cluster_size : int
Largest allowed cluster size (usually the number of vertices in a
hemisphere).
seed : None | int | np.random.Generator
Source of randomness for the noise simulations.
"""
# compute pTFCE thresholds
all_thresholds, delta_logp_thresh = calc_thresholds(data, n_thresh=100)
with timer('calculating source activation prior'):
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ this is the p(v) distribution
source_activation_density_func = gaussian_kde(noise.ravel())
with timer('finding clusters in noise simulations'):
all_noise_clusters = list()
for iter_ix, noise_iter in enumerate(noise):
print(f'iteration {iter_ix}, threshold ', end='', flush=True)
this_clusters = list()
for thresh_ix, threshold in enumerate(all_thresholds):
# progress bar
if not thresh_ix % 5:
print(f'{thresh_ix} ', end='', flush=True)
# compute cluster size prior
clust = _find_clusters(noise_iter, threshold, adjacency)
this_clusters.append(clust)
print()
all_noise_clusters.append(this_clusters)
with timer('calculating cluster size distribution from noise'):
# pool obs across epochs & thresholds → total prob of each cluster size
all_noise_cluster_sizes = _get_cluster_sizes([
clust for _iter in all_noise_clusters for thresh in _iter
for clust in thresh])
# get the PDF of cluster sizes in noise (across all thresholds)
cluster_size_density_func = _cluster_size_density_factory(
all_noise_cluster_sizes, max_cluster_size)
sizes_at_thresh = list()
for thresh_ix in range(len(all_thresholds)):
# estimate prob. density of cluster size at each threshold: p(c|h)
clusts_at_thresh = [
_iter[thresh_ix] for _iter in all_noise_clusters]
_sizes_at_thresh = _get_cluster_sizes(
[clust for _iter in clusts_at_thresh for clust in _iter])
sizes_at_thresh.append(_sizes_at_thresh)
def threshold_specific_cluster_size_density_func(
thresholds, observed_cluster_size):
"""PDF of cluster size, given threshold.
Equivalent in pTFCE source code is dclust() which is derived from the
Euler Characteristic Density of a gaussian field of given dimension.
"""
this_thresholds = np.array(thresholds)
thresh_ixs = np.nonzero(np.in1d(all_thresholds, this_thresholds))[0]
densities = list()
for thresh_ix in thresh_ixs:
noise_cluster_sizes = sizes_at_thresh[thresh_ix]
density_func = _cluster_size_density_factory(
noise_cluster_sizes, max_cluster_size)
density = np.atleast_1d(density_func(observed_cluster_size))[0]
densities.append(density)
return np.array(densities)
# apply to the real data
with timer('finding clusters in real data'):
print('threshold number: ', end='', flush=True)
unaggregated_probs = np.ones(
(len(all_thresholds), *data.shape), dtype=float)
all_data_clusters_by_thresh = list()
all_data_cluster_sizes_by_thresh = list()
for thresh_ix, threshold in enumerate(all_thresholds):
# progress bar
if not thresh_ix % 5:
print(f'{thresh_ix} ', end='', flush=True)
# find clusters in data STC
data_clusters = _find_clusters(data, threshold, adjacency)
data_cluster_sizes = _get_cluster_sizes(data_clusters)
all_data_clusters_by_thresh.append(data_clusters)
all_data_cluster_sizes_by_thresh.append(data_cluster_sizes)
uniq_data_cluster_sizes = np.unique(data_cluster_sizes)
# compute unaggregated probs. (the call to
# _prob_suprathresh_given_cluster_size is slow, so do it only once
# for each unique cluster size)
uniq_data_cluster_probs = {
size: _prob_suprathresh_given_cluster_size(
threshold, all_thresholds, size,
source_activation_density_func,
threshold_specific_cluster_size_density_func)
for size in uniq_data_cluster_sizes}
# prepare prob array that will zip with clusters
data_cluster_probs = np.array(
[uniq_data_cluster_probs[size] for size in data_cluster_sizes])
# assign probs to vertices in thresh-appropriate slice of big array
for clust, prob in zip(data_clusters, data_cluster_probs):
# make sure we're not overwriting anything
assert np.all(unaggregated_probs[thresh_ix][clust] == 1.)
unaggregated_probs[thresh_ix][clust] = prob
print()
with timer('aggregating and adjusting probabilities'):
_ptfce = _aggregate_logp_vals(unaggregated_probs, delta_logp_thresh)
return (_ptfce,
all_thresholds,
unaggregated_probs,
source_activation_density_func,
all_noise_cluster_sizes,
cluster_size_density_func,
all_data_clusters_by_thresh,
all_data_cluster_sizes_by_thresh)
def calc_thresholded_source_prior(threshold, noise):
"""Find empirically the probability of a source being suprathreshold.
Vectorized over thresholds.
"""
noise = np.atleast_2d(noise.ravel()) # (1, noise.size)
thresh = np.atleast_2d(threshold).T # (thresh.size, 1)
suprathresh = (noise > thresh) # (thresh.size, noise.size)
n_suprathresh_src = suprathresh.sum(axis=-1) # (thresh.size,)
assert n_suprathresh_src.shape[0] == thresh.size
return n_suprathresh_src / noise.size
def plot_null_distr(noise, n_iter, source_activation_density_func,
cluster_size_density_func, all_noise_cluster_sizes):
import matplotlib.pyplot as plt
# initialize figure
fig, axs = plt.subplots(1, 3)
subtitle = f'\n({n_iter} noise iterations)'
# first plot: source activation density
ax = axs[0]
x = np.linspace(noise.min(), noise.max(), 100)
y = source_activation_density_func(x)
ax.plot(x, y)
ax.set(title=f'source activation density{subtitle}',
xlabel='activation', ylabel='density')
# second plot: probability of suprathresholdness
ax = axs[1]
y = calc_thresholded_source_prior(threshold=x, noise=noise)
ax.plot(x, y)
ax.set(title=f'probability of suprathresholdness{subtitle}',
xlabel='threshold', ylabel='probability')
# third plot: cluster size density
ax = axs[2]
x = np.arange(all_noise_cluster_sizes.max()) + 1
y = cluster_size_density_func(x)
ax.semilogx(x, y)
ax.set(title=f'cluster size density across all thresholds{subtitle}',
xlabel='cluster size', ylabel='density')
# layout
fig.set_size_inches((12, 4))
fig.subplots_adjust(bottom=0.15, wspace=0.4, left=0.075, right=0.95)
return fig
| 43.148607 | 79 | 0.683433 |
60719274ac677463051523675119dc1045aa1bff | 386 | py | Python | test.py | TimVan1596/datasetX | 9c124ef02dd8998cca4fdadab34076e157f8a253 | [
"WTFPL"
] | null | null | null | test.py | TimVan1596/datasetX | 9c124ef02dd8998cca4fdadab34076e157f8a253 | [
"WTFPL"
] | null | null | null | test.py | TimVan1596/datasetX | 9c124ef02dd8998cca4fdadab34076e157f8a253 | [
"WTFPL"
] | null | null | null | import h5py
import numpy as np
from matplotlib import pyplot as plt
# # 同序shuffle-按相同顺序打乱两个数组
# def same_shuffle(arr1: list, arr2: list):
# rand_state = np.random.get_state()
# np.random.shuffle(arr1)
# np.random.set_state(rand_state)
# np.random.shuffle(arr2)
# return arr1, arr2
import os
from urllib.parse import urlparse
if __name__ == '__main__':
pass
| 19.3 | 43 | 0.704663 |
0bf73df3fb842a4e73200cd928cc00df14738884 | 273 | py | Python | kYPython/FluentPython/BasicLearn/OOP/Functiontools.py | kyaing/KDYSample | 6a09ef3f7dab18a71187cd81f7da2dd13cf7a4a5 | [
"MIT"
] | 10 | 2017-02-23T07:42:20.000Z | 2017-02-23T07:42:25.000Z | kYPython/FluentPython/BasicLearn/OOP/Functiontools.py | kaideyi/KDYSample | 6a09ef3f7dab18a71187cd81f7da2dd13cf7a4a5 | [
"MIT"
] | null | null | null | kYPython/FluentPython/BasicLearn/OOP/Functiontools.py | kaideyi/KDYSample | 6a09ef3f7dab18a71187cd81f7da2dd13cf7a4a5 | [
"MIT"
] | null | null | null | import functools
def note(func):
"""note function"""
@functools.wraps(func) # 用以消除装饰器带来的副作用
def wrapper():
'''wrapper function'''
print('note something')
return func()
return wrapper
@note
def test():
"""test function"""
print('--test--')
print(help(test))
| 15.166667 | 39 | 0.659341 |
49ef9524d795a19f016013168b8cdebf2260279f | 25,151 | py | Python | TrainingExtensions/torch/src/python/aimet_torch/onnx_utils.py | quic-dkhullar/aimet | 0f7b80193036bdf074e67628122f82fcc324a346 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/torch/src/python/aimet_torch/onnx_utils.py | quic-dkhullar/aimet | 0f7b80193036bdf074e67628122f82fcc324a346 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/torch/src/python/aimet_torch/onnx_utils.py | quic-dkhullar/aimet | 0f7b80193036bdf074e67628122f82fcc324a346 | [
"BSD-3-Clause"
] | null | null | null | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Utilities to load and save onnx models """
from typing import Union, List, Tuple, Dict
import os
import copy
from collections import defaultdict
import torch
import torch.nn as nn
import torch.onnx.symbolic_caffe2
import onnx
from aimet_common.utils import AimetLogger
import aimet_torch.utils
import aimet_torch.elementwise_ops as elementwise_ops
from aimet_torch.defs import OpToIOTensors
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
recurrent_onnx_optypes = ['LSTM', 'GRU', 'RNN']
# This is a dict that maps a PyTorch module type to the corresponding ONNX op type (as a string)
map_torch_types_to_onnx = {
nn.Conv2d: ['Conv'],
nn.Dropout: ['Dropout'],
nn.Dropout2d: ['Dropout'],
nn.BatchNorm1d: ['BatchNormalization'],
nn.BatchNorm2d: ['BatchNormalization'],
nn.ReLU: ['Relu'],
nn.ReLU6: ['Clip'],
nn.MaxPool2d: ['MaxPool'],
nn.Linear: ['Gemm', 'MatMul'],
nn.AdaptiveAvgPool2d: ['GlobalAveragePool', 'AveragePool'],
nn.AvgPool2d: ['AveragePool'],
nn.LogSoftmax: ['LogSoftmax'],
nn.RNN: ['RNN'],
nn.LSTM: ['LSTM'],
nn.GRU: ['GRU'],
nn.ConvTranspose2d: ['ConvTranspose'],
nn.Sigmoid: ['Sigmoid'],
nn.Upsample: ['Upsample'],
nn.PReLU: ['PRelu'],
nn.LeakyReLU: ['LeakyRelu'],
nn.Flatten: ['Flatten'],
elementwise_ops.Add: ['Add'],
elementwise_ops.Subtract: ['Sub'],
elementwise_ops.Multiply: ['Mul'],
elementwise_ops.Divide: ['Div'],
elementwise_ops.Concat: ['Concat']
}
# Maps pytorch functional op string names to corresponding onnx types.
pytorch_functional_name_to_onnx_dict = {
'add': 'Add',
'cat': 'Concat',
'mul': 'Mul',
'div': 'Div'
}
class OnnxExportApiArgs:
"""
configuration for torch onnx export api invocation
"""
def __init__(self, opset_version: int = None, input_names: List[str] = None, output_names: List[str] = None):
"""
Refer torch documentation https://pytorch.org/docs/1.7.1/onnx.html?highlight=onnx%20export#torch.onnx.export
:param opset_version: onnx opset version to use to export the model
:param input_names: names to assign to the input nodes of the onnx graph, in order
:param output_names: names to assign to the output nodes of the graph, in order
"""
self.opset_version = opset_version
self.input_names = input_names
self.output_names = output_names
@property
def kwargs(self):
"""
formats all override options into kwarg format to appended to onnx export call
"""
return {'opset_version': self.opset_version,
'input_names': self.input_names,
'output_names': self.output_names}
class OnnxSaver:
"""
Utilities to save/load onnx models
"""
@classmethod
def set_node_names(cls, onnx_model_path: str, pytorch_model: torch.nn.Module,
dummy_input: Union[torch.Tensor, Tuple],
onnx_export_args: OnnxExportApiArgs = OnnxExportApiArgs()):
"""
This utility loads a given onnx model file and set the names of all the nodes (ops) to equivalent
pytorch module names given the corresponding pytorch model.
:param onnx_model_path: Path to the ONNX model file
:param pytorch_model: Equivalent PyTorch model instance
:param dummy_input: Dummy input to the model. Used to parse model graph.
:param onnx_export_args: override options for torch.onnx.export call
:return:
"""
onnx_model = cls._map_onnx_nodes_to_pytorch_modules(pytorch_model, dummy_input,
onnx_model_path, onnx_export_args)
onnx.save(onnx_model, onnx_model_path)
@staticmethod
def _create_map_of_tensor_to_node(onnx_model: onnx.ModelProto) -> Tuple[Dict[str, List[onnx.NodeProto]],
Dict[str, onnx.NodeProto]]:
"""
Create and return two dicts
1. Tensor -> list of nodes that consume this tensor
2. Tensor -> node that produces this tensor
:param onnx_model: ONNX model object
:return: The two dicts described above
Note: The list in #1 is ordered exactly in the order that pytorch trace reaches these nodes. This is important
because later on we will use pytorch layer hooks to match these nodes with the equivalent PyTorch modules.
The expectation is that PyTorch trace and PyTorch hooks follow the same execution sequence
"""
map_input_tensor_to_node = {}
map_output_tensor_to_node = {}
for node in onnx_model.graph.node:
for in_tensor in node.input:
if in_tensor in map_input_tensor_to_node:
map_input_tensor_to_node[in_tensor].append(node)
else:
map_input_tensor_to_node[in_tensor] = [node]
for output in node.output:
assert output not in map_output_tensor_to_node, 'More than one node produces the same tensor'
map_output_tensor_to_node[output] = node
return map_output_tensor_to_node, map_input_tensor_to_node
@classmethod
def _add_markers(cls, starting_module, module_name_map):
"""Recursively add marker layers
"""
class CustomMarkerFunc(torch.autograd.Function):
"""
This function helps add a custom layer when exporting to ONNX
Note the input tensor has a trivial operation performed on it (clamp). This is needed to force
pytorch trace to not ignore the function.
"""
@staticmethod
def symbolic(g, inp, identifier, start):
"""
Magic method that helps with exporting a custom ONNX node
"""
return g.op('CustomMarker', inp, id_s=identifier, start_s=start)
@staticmethod
def forward(ctx, inp, _identifier, _start): # pylint: disable=arguments-differ
return inp.clamp(0)
@staticmethod
def backward(ctx, _grad): # pylint: disable=arguments-differ
raise NotImplementedError()
class CustomMarker(torch.nn.Module):
"""
This is a temporary layer that in inserted next to a real layer to distinguish the real layer in the
exported ONNX format
"""
def __init__(self, module, identifier):
super(CustomMarker, self).__init__()
self.marked_module = module
self.identifier = identifier
def forward(self, *inputs):
"""
Forward method for this CustomMarker layer
"""
output = []
for t in inputs:
if isinstance(t, torch.Tensor):
t = CustomMarkerFunc.apply(t, self.identifier, 'True')
output.append(t)
x = self.marked_module(*output)
if isinstance(x, torch.Tensor):
x = [x]
output = []
for t in x:
if isinstance(t, torch.Tensor):
t = CustomMarkerFunc.apply(t, self.identifier, 'False')
output.append(t)
if len(output) == 1:
output = output[0]
else:
output = tuple(output)
return output
for module_name, module_ref in starting_module.named_children():
if aimet_torch.utils.is_leaf_module(module_ref):
marker_layer = CustomMarker(module_ref, module_name_map[module_ref])
setattr(starting_module, module_name, marker_layer)
# recursively call children modules
else:
cls._add_markers(module_ref, module_name_map)
@classmethod
def _map_onnx_nodes_to_pytorch_modules(cls, pt_model, dummy_input, onnx_model_path, onnx_export_args):
"""
Exports an onnx model, maps the nodes in the onnx model to corresponding pytorch modules and names
them accordingly
:param pt_model: PyTorch model
:param dummy_input: Dummy input to run a fwd pass on @pt_model
:param onnx_model_path: Path to the saved ONNX model
:param onnx_export_args: override options for torch.onnx.export call
"""
working_dir = os.path.dirname(onnx_model_path)
onnx_model = cls._create_onnx_model_with_markers(dummy_input, pt_model, working_dir, onnx_export_args)
model_output_names = [output.name for output in onnx_model.graph.output] # pylint: disable=no-member
# Parse the ONNX model and create mapping from input and output tensors to corresponding nodes
map_output_tensor_to_node, map_input_tensor_to_node = cls._create_map_of_tensor_to_node(onnx_model)
# Find all marker nodes
end_marker_map, start_marker_map = cls._create_map_of_marker_nodes(onnx_model)
# Set names
cls._set_onnx_node_names(map_input_tensor_to_node, start_marker_map)
# Remove markers
for markers in start_marker_map.values():
for marker in markers:
cls._detach_start_marker_node(map_input_tensor_to_node, map_output_tensor_to_node, marker)
for markers in end_marker_map.values():
for marker in markers:
cls._detach_end_marker_node(onnx_model, map_input_tensor_to_node, map_output_tensor_to_node, marker)
# Make sure we rename the model outputs to original names
cls._set_output_names(onnx_model, model_output_names, map_output_tensor_to_node, map_input_tensor_to_node)
# Clean up the detached nodes
onnx_model = cls._remove_detached_nodes_from_onnx_graph(onnx_model)
cls._fix_param_names(onnx_model)
return onnx_model
@classmethod
def _fix_param_names(cls, onnx_model):
"""
Parameter names have an additional level due to the name of the Marker module itself. This method removes that.
:param onnx_model: Onnx Model
"""
# Rename initializers
for ini in onnx_model.graph.initializer:
if 'marked_module' in ini.name:
name = ini.name
name = name.replace('marked_module.', '')
ini.name = name
# Change the references to initializers in each node
for node in onnx_model.graph.node:
indices_to_replace = []
for index, inp_tensor in enumerate(node.input):
if 'marked_module' in inp_tensor:
indices_to_replace.append(index)
for index in indices_to_replace:
param_name = node.input[index]
node.input.remove(param_name)
node.input.insert(index, param_name.replace('marked_module.', ''))
@classmethod
def _remove_detached_nodes_from_onnx_graph(cls, onnx_model):
"""
Given a ONNX model removes any detached nodes from the graph
:return: Updated onnx model
"""
e = onnx.utils.Extractor(onnx_model)
model_inputs = [inp.name for inp in onnx_model.graph.input]
model_outputs = [output.name for output in onnx_model.graph.output]
onnx_model = e.extract_model(model_inputs, model_outputs)
return onnx_model
@classmethod
def _set_onnx_node_names(cls, map_input_tensor_to_node, start_marker_map):
"""
Set names of the ONNX nodes using the identifier fields in the marker layers
:param map_input_tensor_to_node: Map of tensor to node consuming that tensor
:param start_marker_map: Map of start marker nodes in the ONNX graph
:return:
"""
def set_name_for_downstream_nodes(starting_nodes, name, depth):
for node in starting_nodes:
if node.op_type == 'CustomMarker': # Recursion end condition
return
if depth == 0:
node.name = name
else:
node.name = name + "#" + str(depth)
for tensor in node.output:
downstream_nodes = map_input_tensor_to_node.get(tensor, [])
set_name_for_downstream_nodes(downstream_nodes, name, depth + 1)
for node_name, markers in start_marker_map.items():
for marker in markers:
out_tensor = marker.output[0]
downstream_nodes = map_input_tensor_to_node.get(out_tensor, [])
set_name_for_downstream_nodes(downstream_nodes, node_name, 0)
@classmethod
def _create_map_of_marker_nodes(cls, onnx_model):
"""
Creates and returns maps of start and end marker nodes
:param onnx_model: Onnx model
:return: Map of end marker node, Map of start marker nodes
"""
start_marker_map = defaultdict(list)
end_marker_map = defaultdict(list)
for node in onnx_model.graph.node:
if node.op_type == 'CustomMarker':
identifier = node.attribute[0].s.decode()
is_start_marker = node.attribute[1].s.decode()
if is_start_marker == 'True':
start_marker_map[identifier].append(node)
else:
end_marker_map[identifier].append(node)
print(start_marker_map.keys())
print(end_marker_map.keys())
return end_marker_map, start_marker_map
@classmethod
def _create_onnx_model_with_markers(cls, dummy_input, pt_model, working_dir, onnx_export_args) -> onnx.ModelProto:
"""
Exports an onnx model with marker nodes inserted
:param dummy_input: Dummy input
:param pt_model: PyTorch model
:param working_dir: Working directory for storing the exported onnx model
:param onnx_export_args: override options for torch.onnx.export call
:return: Onnx model with marker layers
"""
model = copy.deepcopy(pt_model).cpu()
module_name_map = {}
for module_name, module_ref in model.named_modules():
if aimet_torch.utils.is_leaf_module(module_ref):
module_name_map[module_ref] = module_name
cls._add_markers(model, module_name_map)
temp_file = os.path.join(working_dir, 'temp_onnx_model_with_markers.onnx')
torch.onnx.export(model, dummy_input, temp_file, enable_onnx_checker=False, **onnx_export_args.kwargs)
onnx_model = onnx.load(temp_file)
return onnx_model
@classmethod
def _detach_start_marker_node(cls, map_input_tensor_to_node, map_output_tensor_to_node, start_marker):
"""
Given a ONNX start_marker node, detach it from the graph
:param map_input_tensor_to_node: Map of tensor to node consuming the tensor
:param map_output_tensor_to_node: Map of tensor to node producing the tensor
:param start_marker: Reference to the ONNX node to detach
"""
assert len(start_marker.input) == 1
assert len(start_marker.output) == 1
input_tensor = start_marker.input[0]
output_tensor = start_marker.output[0]
for next_node in map_input_tensor_to_node[output_tensor]:
index = list(next_node.input).index(output_tensor)
next_node.input.remove(output_tensor)
next_node.input.insert(index, input_tensor)
map_input_tensor_to_node[input_tensor].append(next_node)
map_input_tensor_to_node[input_tensor].remove(start_marker)
del map_output_tensor_to_node[output_tensor] # No node should produce output tensor anymore
del map_input_tensor_to_node[output_tensor] # No node should consume output tensor anymore
start_marker.input.pop()
start_marker.output.pop()
@classmethod
def _detach_end_marker_node(cls, onnx_model, map_input_tensor_to_node, map_output_tensor_to_node, end_marker):
"""
Given a ONNX end_marker node, detach it from the graph
:param onnx_model: ONNX model instance
:param map_input_tensor_to_node: Map of tensor to node consuming the tensor
:param map_output_tensor_to_node: Map of tensor to node producing the tensor
:param end_marker: Reference to the ONNX node to detach
"""
assert len(end_marker.input) == 1
assert len(end_marker.output) == 1
input_tensor = end_marker.input[0]
output_tensor = end_marker.output[0]
model_outputs = [output.name for output in onnx_model.graph.output]
if output_tensor in model_outputs:
# Degenerate case: somebody did a "return y, y" at the end of the model or something similar
for index, model_output in enumerate(model_outputs):
if model_output == output_tensor:
onnx_model.graph.output[index].name = input_tensor
else:
for next_node in map_input_tensor_to_node[output_tensor]:
index = list(next_node.input).index(output_tensor)
next_node.input.remove(output_tensor)
next_node.input.insert(index, input_tensor)
map_input_tensor_to_node[input_tensor].append(next_node)
map_input_tensor_to_node[input_tensor].remove(end_marker)
if not map_input_tensor_to_node[input_tensor]:
del map_input_tensor_to_node[input_tensor]
del map_output_tensor_to_node[output_tensor] # No node should produce output tensor anymore
if output_tensor in map_input_tensor_to_node:
del map_input_tensor_to_node[output_tensor] # No node should consume output tensor anymore
end_marker.input.pop()
end_marker.output.pop()
@staticmethod
def _set_output_names(onnx_model: onnx.ModelProto, desired_model_output_names,
map_output_tensor_to_node, map_input_tensor_to_node):
# Find duplicates in model outputs
duplicates = []
actual_model_output_names = [output.name for output in onnx_model.graph.output]
for output in actual_model_output_names:
if actual_model_output_names.count(output) > 1:
duplicates.append(output)
# Iterate over the model outputs
for index, output in enumerate(onnx_model.graph.output):
new_tensor = desired_model_output_names[index]
old_tensor = output.name
if old_tensor == new_tensor: # Nothing to do
continue
if old_tensor in map_input_tensor_to_node:
# Degenerate case: model output tensor also is an intermediate tensor that inputs into other nodes
for consumer in map_input_tensor_to_node[old_tensor]:
index = list(consumer.input).index(old_tensor)
consumer.input.remove(old_tensor)
consumer.input.insert(index, new_tensor)
if new_tensor not in map_input_tensor_to_node:
map_input_tensor_to_node[new_tensor] = []
map_input_tensor_to_node[new_tensor].append(consumer)
del map_input_tensor_to_node[old_tensor] # No node should consume old tensor anymore
producer = map_output_tensor_to_node[old_tensor]
output.name = new_tensor
index = list(producer.output).index(old_tensor)
producer.output.remove(old_tensor)
producer.output.insert(index, new_tensor)
del map_output_tensor_to_node[old_tensor]
map_output_tensor_to_node[new_tensor] = producer
# If there were duplicate outputs with the same name, they need to be updated
for output_node in onnx_model.graph.output:
# Ugly double loop - cannot avoid
if output_node.name == old_tensor:
output_node.name = new_tensor
@staticmethod
def _collate_io_tensors_for_multi_layer_recurrent_nodes(onnx_model: onnx.NodeProto,
node_to_io_tensor_name_map: Dict):
"""
Given an ONNX model and corresponding node-tensor map, consolidate multi-layer recurrent nodes
into single map entries
"""
recurrent_nodes = []
for node in onnx_model.graph.node:
if node.op_type in recurrent_onnx_optypes:
recurrent_nodes.append(node.name)
# Collection of recurrent nodes that includes only the first layer nodes
recurrent_root_nodes = [node for node in recurrent_nodes if '#' not in node]
for root_node in recurrent_root_nodes:
# Find nodes corresponding to all other layers of the recurrent node
other_layers = [node for node in recurrent_nodes if node.startswith(root_node + '#')]
# sort the other layers using the depth value following the '#'
other_layers = sorted(other_layers, key=lambda layer: int(layer.split('#')[1]))
# Append the io_tensors for all layers for the current root recurrent node, in order
io_tensor_list = [node_to_io_tensor_name_map[root_node]]
for layer in other_layers:
io_tensor_list.append(node_to_io_tensor_name_map[layer])
del node_to_io_tensor_name_map[layer]
node_to_io_tensor_name_map[root_node] = io_tensor_list
@classmethod
def get_onnx_node_to_io_tensor_names_map(cls, onnx_model: onnx.NodeProto) -> \
(Dict[str, Union[OpToIOTensors, List[OpToIOTensors]]], set):
"""
Given an ONNX model, gets the inputs and output tensor names for each node in the model.
if multiple onnx nodes have the same name then the nodes are provided as a list of inputs and output tensor
names, one for each onnx node.
:param onnx_model: The ONNX model instance
:return: Dictionary of ONNX node name and corresponding input and output tensor names and a set with all valid
param names in model
"""
node_to_io_tensor_name_map = {}
valid_param_set = set()
initializer_names = {initializer.name for initializer in onnx_model.graph.initializer}
for node in onnx_model.graph.node:
if node.name:
onnx_node_io_tensors = OpToIOTensors(list(node.input), list(node.output))
if (node.name not in node_to_io_tensor_name_map) or node.op_type in recurrent_onnx_optypes:
node_to_io_tensor_name_map[node.name] = onnx_node_io_tensors
# update valid params list
for input_tensor in list(node.input):
if input_tensor in initializer_names:
valid_param_set.add(input_tensor)
cls._collate_io_tensors_for_multi_layer_recurrent_nodes(onnx_model, node_to_io_tensor_name_map)
return node_to_io_tensor_name_map, valid_param_set
| 42.919795 | 119 | 0.647569 |
5203deb8b3220dc8cdac678223b9685887f03d41 | 3,041 | py | Python | Lib/ctypes/test/test_python_api.py | s-wakaba/bitpacked-cpython | d036e47137907c2b4e416561428f1a3362cc2829 | [
"PSF-2.0"
] | null | null | null | Lib/ctypes/test/test_python_api.py | s-wakaba/bitpacked-cpython | d036e47137907c2b4e416561428f1a3362cc2829 | [
"PSF-2.0"
] | null | null | null | Lib/ctypes/test/test_python_api.py | s-wakaba/bitpacked-cpython | d036e47137907c2b4e416561428f1a3362cc2829 | [
"PSF-2.0"
] | null | null | null | from ctypes import *
import unittest, sys
from test import support
################################################################
# This section should be moved into ctypes\__init__.py, when it's ready.
from _ctypes import PyObj_FromPtr
################################################################
from sys import getrefcount as grc
if sys.version_info > (2, 4):
c_py_ssize_t = c_size_t
else:
c_py_ssize_t = c_int
class PythonAPITestCase(unittest.TestCase):
def test_PyBytes_FromStringAndSize(self):
PyBytes_FromStringAndSize = pythonapi.PyBytes_FromStringAndSize
PyBytes_FromStringAndSize.restype = py_object
PyBytes_FromStringAndSize.argtypes = c_char_p, c_py_ssize_t
self.assertEqual(PyBytes_FromStringAndSize(b"abcdefghi", 3), b"abc")
@support.refcount_test
def test_PyString_FromString(self):
pythonapi.PyBytes_FromString.restype = py_object
pythonapi.PyBytes_FromString.argtypes = (c_char_p,)
s = b"abc"
refcnt = grc(s)
pyob = pythonapi.PyBytes_FromString(s)
self.assertEqual(grc(s), refcnt)
self.assertEqual(s, pyob)
del pyob
self.assertEqual(grc(s), refcnt)
@support.refcount_test
def test_PyLong_Long(self):
from sysconfig import get_config_vars
ref42 = grc(42)
pythonapi.PyLong_FromLong.restype = py_object
self.assertEqual(pythonapi.PyLong_FromLong(42), 42)
if not get_config_vars().get('BITPACKED'): self.assertEqual(grc(42), ref42)
pythonapi.PyLong_AsLong.argtypes = (py_object,)
pythonapi.PyLong_AsLong.restype = c_long
res = pythonapi.PyLong_AsLong(42)
if not get_config_vars().get('BITPACKED'): self.assertEqual(grc(res), ref42 + 1)
del res
if not get_config_vars().get('BITPACKED'): self.assertEqual(grc(42), ref42)
@support.refcount_test
def test_PyObj_FromPtr(self):
s = "abc def ghi jkl"
ref = grc(s)
# id(python-object) is the address
pyobj = PyObj_FromPtr(id(s))
self.assertIs(s, pyobj)
self.assertEqual(grc(s), ref + 1)
del pyobj
self.assertEqual(grc(s), ref)
def test_PyOS_snprintf(self):
PyOS_snprintf = pythonapi.PyOS_snprintf
PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p
buf = c_buffer(256)
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s", b"ctypes")
self.assertEqual(buf.value, b"Hello from ctypes")
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s (%d, %d, %d)", b"ctypes", 1, 2, 3)
self.assertEqual(buf.value, b"Hello from ctypes (1, 2, 3)")
# not enough arguments
self.assertRaises(TypeError, PyOS_snprintf, buf)
def test_pyobject_repr(self):
self.assertEqual(repr(py_object()), "py_object(<NULL>)")
self.assertEqual(repr(py_object(42)), "py_object(42)")
self.assertEqual(repr(py_object(object)), "py_object(%r)" % object)
if __name__ == "__main__":
unittest.main()
| 33.417582 | 90 | 0.640908 |
f0eec0d047f0249799fd545c3dc70110a14a78e5 | 3,877 | py | Python | Pythonfile/game.py | panzermeow/Board-Game-Art-of-statecraft | 7cbd4ceb561f406f0c63b88cf45f354169532d10 | [
"Apache-2.0"
] | null | null | null | Pythonfile/game.py | panzermeow/Board-Game-Art-of-statecraft | 7cbd4ceb561f406f0c63b88cf45f354169532d10 | [
"Apache-2.0"
] | null | null | null | Pythonfile/game.py | panzermeow/Board-Game-Art-of-statecraft | 7cbd4ceb561f406f0c63b88cf45f354169532d10 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 00:03:07 2020
@author: Tianyang Yu
"""
import factory as fac
import pop
import numpy as np
import re
import country as c
name=['USA']
country=[]
def load_country():
for i in range(len(name)):
temp=c.Country()
#basic info
print("-"*20)
print("loading country:{}".format(name[i]))
print("-"*20)
config=np.loadtxt('country/{}.txt'.format(name[i]),dtype=str)
temp.name=re.search('name=(.*)',config[0]).group(1)
temp.eco_type=int(re.search('type=(.*)',config[1]).group(1))
#pop initial
print("loading population")
temp.undereducated=int(re.search('undereducated=(.*)',config[2]).group(1))
temp.worker=int(re.search('worker=(.*)',config[3]).group(1))
temp.educated=int(re.search('educated=(.*)',config[4]).group(1))
temp.elite=int(re.search('elite=(.*)',config[5]).group(1))
#fact initial
print("loading factories")
temp.mine_coal=int(re.search('mine_coal=(.*)',config[6]).group(1))
temp.mine_oil=int(re.search('mine_oil=(.*)',config[7]).group(1))
temp.mine_material=int(re.search('mine_material=(.*)',config[8]).group(1))
temp.mine_U235=int(re.search('mine_U235=(.*)',config[9]).group(1))
temp.farm=int(re.search('farm=(.*)',config[10]).group(1))
temp.factory=int(re.search('factory=(.*)',config[11]).group(1))
temp.high_tech=int(re.search('high_tech=(.*)',config[12]).group(1))
temp.bank=int(re.search('bank=(.*)',config[13]).group(1))
temp.power=int(re.search('power=(.*)',config[14]).group(1))
#national capital
print("loading capital")
temp.c_liquid=float(re.search('c_liquid=(.*)',config[15]).group(1))
temp.c_solid=float(re.search('c_solid=(.*)',config[16]).group(1))
temp.c_bank=float(re.search('c_bank=(.*)',config[17]).group(1))
#private capital
temp.p_liquid=float(re.search('p_liquid=(.*)',config[18]).group(1))
temp.p_solid=float(re.search('p_solid=(.*)',config[19]).group(1))
temp.p_bank=float(re.search('p_bank=(.*)',config[20]).group(1))
#political power in this country
print("loading political power")
temp.undereducated_pp=int(re.search('undereducated_pp=(.*)',config[21]).group(1))
temp.worker_pp=int(re.search('worker_pp=(.*)',config[22]).group(1))
temp.educated_pp=int(re.search('educated_pp=(.*)',config[23]).group(1))
temp.elite_pp=int(re.search('elite_pp=(.*)',config[24]).group(1))
#stockpile
print("loading stockpile")
temp.food=float(re.search('food=(.*)',config[25]).group(1))
temp.material=float(re.search('material=(.*)',config[26]).group(1))
temp.industrial=float(re.search('industrial=(.*)',config[27]).group(1))
temp.high_tech=float(re.search('high_tech=(.*)',config[28]).group(1))
temp.oil=float(re.search('oil=(.*)',config[29]).group(1))
temp.energy=float(re.search('energy=(.*)',config[30]).group(1))
temp.coal=float(re.search('coal=(.*)',config[31]).group(1))
temp.U235=float(re.search('U235=(.*)',config[32]).group(1))
temp.nuclear=float(re.search('nuclear=(.*)',config[33]).group(1))
#school
print("loading school")
temp.high_school=int(re.search('high_school=(.*)',config[34]).group(1))
temp.public_university=int(re.search('public_university=(.*)',config[35]).group(1))
temp.private_universty=int(re.search('private_university=(.*)',config[36]).group(1))
country.append(temp)
print("-"*20)
print("loading complete")
print("-"*20)
load_country()
country[0].init_pop_entity()
country[0].print_stock
country[0].calculate_demand()
#country_config=load_country('country_{}'.format(name[0]))
| 41.688172 | 92 | 0.606139 |
0475fe201cc60ac9e3f1894049096a4113a8ed38 | 593 | py | Python | Arquivo/2020-2/2020-2-uff-lrp/provas/2021-p1/q4.py | joaog314/uff-projects | 417895d5b7c6fd88e9c67c925e7c6a4abb6bb6f4 | [
"MIT"
] | null | null | null | Arquivo/2020-2/2020-2-uff-lrp/provas/2021-p1/q4.py | joaog314/uff-projects | 417895d5b7c6fd88e9c67c925e7c6a4abb6bb6f4 | [
"MIT"
] | null | null | null | Arquivo/2020-2/2020-2-uff-lrp/provas/2021-p1/q4.py | joaog314/uff-projects | 417895d5b7c6fd88e9c67c925e7c6a4abb6bb6f4 | [
"MIT"
] | null | null | null | entr = list(map(int, input().split()))
values = list(map(int, input().split()))
if (1 <= entr[0] <= 1000) and (1 <= entr[1] <= 100):
count = 0
# print(values[i:i+2])
while values != [entr[1]]*len(values):
for i in range(len(values)):
while values[i] != entr[1]:
if values[i] > entr[1]:
values[i:i+2] = [n - 1 for n in values[i:i+2]]
count += 1
elif values[i] < entr[1]:
values[i:i+2] = [n + 1 for n in values[i:i+2]]
count += 1
print(count) | 32.944444 | 67 | 0.438449 |
b0243e8ab59af6cac8d13ffc5a0e3262c1c703b0 | 37 | py | Python | venv/lib/python3.6/encodings/cp861.py | JamesMusyoka/Blog | fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5 | [
"Unlicense"
] | 2 | 2019-04-17T13:35:50.000Z | 2021-12-21T00:11:36.000Z | venv/lib/python3.6/encodings/cp861.py | JamesMusyoka/Blog | fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5 | [
"Unlicense"
] | 2 | 2021-03-31T19:51:24.000Z | 2021-06-10T23:05:09.000Z | venv/lib/python3.6/encodings/cp861.py | JamesMusyoka/Blog | fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5 | [
"Unlicense"
] | 2 | 2019-10-01T08:47:35.000Z | 2020-07-11T06:32:16.000Z | /usr/lib/python3.6/encodings/cp861.py | 37 | 37 | 0.810811 |
e6247bbec150f12c0afddaa324046418eb992d6e | 10,152 | py | Python | pak/datasets/UMPM.py | jutanke/pak | 6f3be954ef68804ebe622cafe46f033ccf6eb2e7 | [
"MIT"
] | 20 | 2018-09-19T06:52:01.000Z | 2020-10-02T11:18:00.000Z | pak/datasets/UMPM.py | justayak/pak | 6f3be954ef68804ebe622cafe46f033ccf6eb2e7 | [
"MIT"
] | 2 | 2017-11-16T21:42:54.000Z | 2018-03-12T19:31:03.000Z | pak/datasets/UMPM.py | justayak/pak | 6f3be954ef68804ebe622cafe46f033ccf6eb2e7 | [
"MIT"
] | 3 | 2020-01-12T08:51:09.000Z | 2020-10-06T05:47:24.000Z | from pak.datasets.Dataset import Dataset
import numpy as np
from pak import utils
from pak.util import download, unzip
from os import makedirs, listdir
from os.path import join, isfile, isdir, exists, splitext
import time
import cv2
import json
import c3d
class UMPM:
""" Utrecht Multi-Person Motion Benchmark
http://www.projects.science.uu.nl/umpm/data/data.shtml
"""
def __init__(self, root, username, password, verbose=True):
"""
"""
utils.talk("UMPM", verbose)
data_root = join(root, 'umpm')
self.data_root = data_root
root_url = 'http://umpm-mirror.cs.uu.nl/download/'
if not isdir(data_root):
makedirs(data_root)
calib_zip = join(data_root, 'umpm_camcalib1.zip')
if not isfile(calib_zip):
calib_url = 'http://umpm-mirror.cs.uu.nl/download/umpm_camcalib1.zip'
download.download_with_login(calib_url, data_root, username, password)
assert isfile(calib_zip)
calib_dir = join(data_root, 'Calib')
if not isdir(calib_dir):
unzip.unzip(calib_zip, data_root)
assert isdir(calib_dir)
for file in UMPM.get_file_list():
cur_loc = join(data_root, file)
fzip = join(cur_loc, file + ".zip")
cur_url = root_url + file + '.zip'
# fc3d_gt = join(data_root, file + '.c3d')
# cur_url_gt = root_url_gt + file + '.c3d'
if not isdir(cur_loc):
utils.talk("could not find location " + file, verbose)
if not isfile(fzip):
utils.talk("could not find file " + file + '.zip', verbose)
download.download_with_login(
cur_url,
cur_loc,
username,
password)
if not isdir(join(cur_loc, 'Groundtruth')):
# is not unzipped
utils.talk("unzipping " + fzip, verbose)
unzip.unzip(fzip, cur_loc, del_after_unzip=True)
video_loc = join(cur_loc, 'Video')
lzma_videos = [join(video_loc, f) for f in listdir(video_loc) if f.endswith('.xz')]
for lzma_video in lzma_videos:
utils.talk('unzipping video ' + lzma_video, verbose)
unzip.unzip(lzma_video, video_loc, del_after_unzip=True)
# if not isfile(fc3d_gt):
# utils.talk("could not find c3d file " + file, verbose)
# download.download_with_login(cur_url_gt, cur_loc, username, password)
def get_data(self, name, lock_gt_framerate=True):
"""
:param name:
:param lock_gt_framerate: if True the framerate of the
ground truth is reduced to match that of the
Videos. Otherwise, the gt frame rate is twice as
large as the video frame rate
:return: X and C3D for the given name
"""
cur_loc = join(self.data_root, name)
settings_json = join(cur_loc, name + '.json')
assert isdir(cur_loc)
nbr_people_in_video = int(name[1])
try:
settings = json.load(open(settings_json))
except json.JSONDecodeError:
# fix the bug (trailing ',' at the last position)
with open(settings_json, 'r') as f:
data = f.read().replace('\n', '').replace(' ', '')
data = data.replace(',}', '}')
if '}{' in data:
# more than one entry: just drop the last one
data = data.replace('}{', '},{')
data = '[' + data + ']'
with open(settings_json, 'w') as f:
f.write(data)
settings = json.load(open(settings_json))
if isinstance(settings, (list,)):
# more than one entry: just drop the last one
settings = settings[0]
calib_name = settings['calib']
calibration = self.get_calibration(calib_name)
video_loc = join(cur_loc, 'Video'); assert isdir(video_loc)
gt_loc = join(cur_loc, 'Groundtruth'); assert isdir(gt_loc)
shape = UMPM.get_shape(name)
# we have 4 videos: l,r,s,f
Videos = {'l': None, 'r': None, 's': None, 'f': None}
for cam in ['l', 'r', 's', 'f']:
fmmap = join(video_loc, name + '_' + cam + '.npy')
if not isfile(fmmap):
avi = join(video_loc, name + '_' + cam + '.avi'); assert isfile(avi)
cap = cv2.VideoCapture(avi)
X = np.memmap(fmmap, dtype='uint8', mode='w+', shape=shape)
i = 0
while True:
valid, frame = cap.read()
if not valid:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
X[i] = frame
i = i + 1
del X
time.sleep(0.01)
X = np.memmap(fmmap, dtype='uint8', mode='r', shape=shape)
Videos[cam] = X
# ------ gt -------
fc3d = join(gt_loc, name + '_vm.c3d'); assert isfile(fc3d)
with open(fc3d, 'rb') as handle:
reader = c3d.Reader(handle)
F = reader.read_frames()
points = []
for frame, point, analog in F:
if lock_gt_framerate:
if frame % 2 == 0:
# skip every second frame to 'adjust'
# the gt frames (100fps) to the video
# frames (50fps)
continue
points.append(point)
assert len(points) == Videos['l'].shape[0]
points = np.array(points)
assert points.shape[1] == nbr_people_in_video * 15
return Videos, points, calibration
def get_calibration(self, name):
"""
Gets the calibration for a given name
:param name: e.g. calib_l06
:return:
"""
calib_dir = join(self.data_root, 'Calib'); assert isdir(calib_dir)
Calibs = {'l': None, 'r': None, 's': None, 'f': None}
for cam in ['l', 'r', 's', 'f']:
ini_path = join(calib_dir, name + '_' + cam + '.ini')
assert isfile(ini_path)
with open(ini_path) as f:
content = f.readlines()
K_r1 = [float(s) for s in content[0].replace('\n', '').split(' ') \
if len(s) > 0]
K_r2 = [float(s) for s in content[1].replace('\n', '').split(' ') \
if len(s) > 0]
K_r3 = [float(s) for s in content[2].replace('\n', '').split(' ') \
if len(s) > 0]
K = np.zeros((3, 3))
K[0] = K_r1; K[1] = K_r2; K[2] = K_r3
distCoef = [float(s) for s in content[3].replace('\n', '').split(' ') \
if len(s) > 0]
rvec = [float(s) for s in content[4].replace('\n', '').split(' ') \
if len(s) > 0]
tvec = [float(s) for s in content[5].replace('\n', '').split(' ') \
if len(s) > 0]
Calibs[cam] = {
'K': K,
'distCoeff': distCoef,
'rvec': rvec,
'tvec': tvec
}
return Calibs
# -------- static --------
@staticmethod
def get_shape(name):
return {
'p1_triangle_1': (2471,486,644,3),
'p2_ball_1': (2796,486,644,3),
'p2_chair_2': (2677,486,644,3),
'p2_circle_01': (2313,486,644,3),
'p2_free_1': (2437,486,644,3),
'p2_grab_2': (2779,486,644,3)
}[name]
@staticmethod
def get_file_list():
return [
'p2_ball_1',
'p2_free_1',
'p2_grab_1',
'p2_chair_2'
]
@staticmethod
def get_file_list_ALL():
"""
:return: all the files stored on the server
"""
return [
'p1_grab_3',
'p1_orthosyn_1',
'p1_table_2',
'p1_triangle_1',
'p2_ball_1',
'p2_chair_1',
'p2_chair_2',
'p2_circle_01',
'p2_free_1',
'p2_free_2',
'p2_grab_1',
'p2_grab_2',
'p2_meet_1',
'p2_orthosyn_1',
'p2_staticsyn_1',
'p2_table_1',
'p2_table_2',
'p3_ball_12',
'p3_ball_1',
'p3_ball_2',
'p3_ball_3',
'p3_chair_11',
'p3_chair_12',
'p3_chair_15',
'p3_chair_16',
'p3_chair_1',
'p3_chair_2',
'p3_circle_15',
'p3_circle_16',
'p3_circle_2',
'p3_circlesyn_11',
'p3_free_11',
'p3_free_12',
'p3_free_1',
'p3_free_2',
'p3_grab_11',
'p3_grab_12',
'p3_grab_1',
'p3_grab_2',
'p3_meet_11',
'p3_meet_12',
'p3_meet_1',
'p3_meet_2',
'p3_orthosyn_11',
'p3_orthosyn_12',
'p3_orthosyn_2',
'p3_staticsyn_11',
'p3_staticsyn_12',
'p3_staticsyn_2',
'p3_table_11',
'p3_table_2',
'p3_triangle_11',
'p3_triangle_12',
'p3_triangle_13',
'p3_triangle_1',
'p3_triangle_2',
'p3_triangle_3',
'p4_ball_11',
'p4_ball_12',
'p4_chair_11',
'p4_chair_1',
'p4_circle_11',
'p4_circle_12',
'p4_free_11',
'p4_free_1',
'p4_grab_11',
'p4_grab_1',
'p4_meet_11',
'p4_meet_12',
'p4_meet_2',
'p4_staticsyn_11',
'p4_staticsyn_13',
'p4_table_11',
'p4_table_12'
]
| 32.854369 | 95 | 0.476753 |
e5e0437a2714beb57f4638fd1e0e30d08df6258a | 21,885 | py | Python | tests/unit/compile/test_serialization.py | iconix/flambe | 939e28853ece75094ae9335e3d10e3821235a97a | [
"MIT"
] | null | null | null | tests/unit/compile/test_serialization.py | iconix/flambe | 939e28853ece75094ae9335e3d10e3821235a97a | [
"MIT"
] | null | null | null | tests/unit/compile/test_serialization.py | iconix/flambe | 939e28853ece75094ae9335e3d10e3821235a97a | [
"MIT"
] | null | null | null | import pytest
import tempfile
from collections import abc, OrderedDict
import os
import pprint
import torch
import dill
from ruamel.yaml.compat import StringIO
from ruamel.yaml import YAML
# from flambe.compile import yaml
from flambe import Component, save_state_to_file, load_state_from_file, load, save
from flambe.compile import Registrable, yaml, make_component
from flambe.compile.serialization import _extract_prefix
FLAMBE_SOURCE_KEY = '_flambe_source'
FLAMBE_CLASS_KEY = '_flambe_class'
FLAMBE_CONFIG_KEY = '_flambe_config'
FLAMBE_DIRECTORIES_KEY = '_flambe_directories'
KEEP_VARS_KEY = 'keep_vars'
VERSION_KEY = '_flambe_version'
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def check_mapping_equivalence(x, y, exclude_config=False):
for key in x.keys():
if key == KEEP_VARS_KEY or key == 'version':
continue
if key == FLAMBE_CONFIG_KEY and exclude_config:
continue
assert key in y
if isinstance(x[key], abc.Mapping):
check_mapping_equivalence(x[key], y[key], exclude_config=exclude_config)
elif isinstance(x[key], torch.Tensor):
assert isinstance(y[key], torch.Tensor)
torch.equal(x[key], y[key])
else:
assert x[key] == y[key]
EXAMPLE_TRAINER_CONFIG = """
!Trainer
train_sampler: !BaseSampler
val_sampler: !BaseSampler
dataset: !TabularDataset
train: [['']]
model: !RNNEncoder
input_size: 300
rnn_type: lstm
n_layers: 2
hidden_size: 256
loss_fn: !torch.NLLLoss
metric_fn: !Accuracy
optimizer: !torch.Adam
params: []
max_steps: 2
iter_per_step: 2
"""
@pytest.fixture
def make_classes_2():
class A(Component):
def __init__(self, akw1=0, akw2=None):
self.akw1 = akw1
self.akw2 = akw2
class B(Component):
def __init__(self, bkw1=0, bkw2='', bkw3=99):
self.bkw1 = bkw1
self.bkw2 = bkw2
self.bkw3 = bkw3
class C(Component):
def __init__(self, one, two):
self.one = one
self.two = two
return A, B
class Basic(Component):
pass
class Composite(Component):
def __init__(self):
self.leaf = Basic()
class BasicStateful(Component):
def __init__(self):
self.x = 2019
self.register_attrs('x')
# self.b = Basic()
class IntermediateTorch(Component, torch.nn.Module):
def __init__(self):
super().__init__()
self.leaf = Basic()
class IntermediateStatefulTorch(Component, torch.nn.Module):
def __init__(self):
super().__init__()
self.leaf = BasicStateful()
self.linear = torch.nn.Linear(2, 2)
class RootTorch(Component):
def __init__(self):
super().__init__()
self.model = IntermediateStatefulTorch()
# self.linear = torch.nn.Linear(2, 2)
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()))
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 0.01)
class ComposableTorchStateful(Component, torch.nn.Module):
def __init__(self, a: Component, b: int, c: torch.nn.Linear):
super().__init__()
self.child = a
self.other_data = b
self.linear = c
self.register_attrs('other_data')
class ComposableTorchStatefulPrime(Component, torch.nn.Module):
def __init__(self, a: Component, b: int, c: torch.nn.Linear):
super().__init__()
self.child = a
self.other_data = b
self.linear = c
def _state(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'other_data'] = self.other_data
return state_dict
def _load_state(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
assert prefix + 'other_data' in state_dict
self.other_data = state_dict[prefix + 'other_data']
class ComposableContainer(Component):
def __init__(self, item: Component):
self.item = item
def create_factory(class_):
def _factory(from_config):
if from_config:
config = f"!{class_.__name__} {{}}\n"
obj = yaml.load(config)()
return obj
else:
obj = class_()
return obj
return _factory
@pytest.fixture
def basic_object():
return create_factory(Basic)
@pytest.fixture
def nested_object():
return create_factory(Composite)
@pytest.fixture
def basic_object_with_state():
return create_factory(BasicStateful)
@pytest.fixture
def alternating_nn_module_with_state():
return create_factory(RootTorch)
def complex_builder(from_config):
if from_config:
config = """
!ComposableTorchStateful
a: !ComposableTorchStateful
a: !ComposableTorchStateful
a: !BasicStateful {}
b: 2021
c: !torch.Linear
in_features: 2
out_features: 2
b: 2022
c: !torch.Linear
in_features: 2
out_features: 2
b: 2023
c: !torch.Linear
in_features: 2
out_features: 2
"""
obj = yaml.load(config)()
return obj
else:
a1 = BasicStateful()
b1 = 2021
c1 = torch.nn.Linear(2, 2)
a2 = ComposableTorchStateful(a1, b1, c1)
b2 = 2022
c2 = torch.nn.Linear(2, 2)
a3 = ComposableTorchStateful(a2, b2, c2)
b3 = 2023
c3 = torch.nn.Linear(2, 2)
obj = ComposableTorchStateful(a3, b3, c3)
return obj
def complex_builder_nontorch_root(from_config):
if from_config:
config = """
!ComposableContainer
item:
!ComposableTorchStatefulPrime
a: !ComposableTorchStateful
a: !ComposableTorchStateful
a: !BasicStateful {}
b: 2021
c: !torch.Linear
in_features: 2
out_features: 2
b: 2022
c: !torch.Linear
in_features: 2
out_features: 2
b: 2023
c: !torch.Linear
in_features: 2
out_features: 2
"""
obj = yaml.load(config)()
return obj
else:
a1 = BasicStateful()
b1 = 2021
c1 = torch.nn.Linear(2, 2)
a2 = ComposableTorchStateful(a1, b1, c1)
b2 = 2022
c2 = torch.nn.Linear(2, 2)
a3 = ComposableTorchStateful(a2, b2, c2)
b3 = 2023
c3 = torch.nn.Linear(2, 2)
item = ComposableTorchStateful(a3, b3, c3)
obj = ComposableContainer(item)
return obj
@pytest.fixture
def complex_multi_layered():
return complex_builder
@pytest.fixture
def complex_multi_layered_nontorch_root():
return complex_builder_nontorch_root
class TestHelpers:
def test_extract_prefix(self):
_extract_prefix
class TestState:
def test_state_returns_not_None(self, basic_object):
obj = basic_object(from_config=True)
assert obj.get_state() is not None
def test_state_metadata(self, basic_object):
state = basic_object(from_config=False).get_state()
assert hasattr(state, '_metadata')
assert '' in state._metadata
assert FLAMBE_DIRECTORIES_KEY in state._metadata
assert FLAMBE_SOURCE_KEY in state._metadata['']
assert VERSION_KEY in state._metadata['']
assert state._metadata[''][FLAMBE_SOURCE_KEY] == "class Basic(Component):\n pass\n"
# assert state[FLAMBE_CONFIG_KEY] == ''
assert '' in state._metadata[FLAMBE_DIRECTORIES_KEY] and len(state._metadata[FLAMBE_DIRECTORIES_KEY]) == 1
assert state._metadata[''][VERSION_KEY] == '0.0.0'
def test_state_config(self, basic_object):
assert FLAMBE_CONFIG_KEY not in basic_object(from_config=False).get_state()._metadata['']
obj = basic_object(from_config=True)
state = obj.get_state()
assert FLAMBE_CONFIG_KEY in state._metadata['']
assert state._metadata[''][FLAMBE_CONFIG_KEY] == "!Basic {}\n"
def test_state_nested_but_empty(self, nested_object):
expected_state = {}
expected_metadata = {'': {FLAMBE_SOURCE_KEY: "class Composite(Component):\n def __init__(self):\n self.leaf = Basic()\n", VERSION_KEY: "0.0.0", FLAMBE_CLASS_KEY: 'Composite'}, 'leaf': {FLAMBE_SOURCE_KEY: 'class Basic(Component):\n pass\n', VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'Basic'}, FLAMBE_DIRECTORIES_KEY: {'', 'leaf'}, KEEP_VARS_KEY: False}
obj = nested_object(from_config=False)
state = obj.get_state()
assert state == expected_state
check_mapping_equivalence(expected_metadata, state._metadata)
check_mapping_equivalence(state._metadata, expected_metadata)
def test_state_custom(self, basic_object_with_state):
obj = basic_object_with_state(from_config=True)
expected_state = {'x': 2019}
assert obj.get_state() == expected_state
# def test_state_custom_nested(nested_object_with_state):
# obj = nested_object_with_state()
# expected_state = {}
# assert obj.get_state() == expected_state
#
# def test_state_pytorch_empty(nn_modules):
# cls, cls_torch_first = nn_modules
# obj, obj_torch_first = cls(), cls_torch_first()
# expected_state = {}
# assert obj.get_state() == expected_state
# assert obj_torch_first.get_state() == expected_state
#
# def test_state_pytorch_nested_no_modules_no_parameters(nested_nn_module):
# obj = nested_nn_module()
# expected_state = {}
# assert obj.get_state() == expected_state
#
# def test_state_pytorch_alternating_nesting(alternating_nn_module):
# obj = alternating_nn_module()
# expected_state = {}
# assert obj.get_state() == expected_state
def test_state_pytorch_alternating_nested_with_modules(self, alternating_nn_module_with_state):
obj = alternating_nn_module_with_state(from_config=True)
t1 = obj.model.linear.weight
t2 = obj.model.linear.bias
expected_state = {'model.leaf.x': 2019, 'model.linear.weight': t1, 'model.linear.bias': t2}
root_source_code = dill.source.getsource(RootTorch)
intermediate_source_code = dill.source.getsource(IntermediateStatefulTorch)
leaf_source_code = dill.source.getsource(BasicStateful)
expected_metadata = OrderedDict({FLAMBE_DIRECTORIES_KEY: set(['', 'model', 'model.leaf']), 'keep_vars': False, '': {VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'RootTorch', FLAMBE_SOURCE_KEY: root_source_code, FLAMBE_CONFIG_KEY: "!RootTorch {}\n"},
'model': {VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'IntermediateStatefulTorch', FLAMBE_SOURCE_KEY: intermediate_source_code, 'version': 1}, # TODO add config back: FLAMBE_CONFIG_KEY: "!IntermediateStatefulTorch {}\n"
'model.leaf': {VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'BasicStateful', FLAMBE_SOURCE_KEY: leaf_source_code}, # TODO add config back: FLAMBE_CONFIG_KEY: "!BasicStateful {}\n"
'model.linear': {'version': 1}})
state = obj.get_state()
check_mapping_equivalence(state._metadata, expected_metadata)
check_mapping_equivalence(expected_metadata, state._metadata)
check_mapping_equivalence(state, expected_state)
check_mapping_equivalence(expected_state, state)
class TestLoadState:
def test_load_state_empty(self):
pass
def test_load_state_nested_empty(self):
pass
def test_load_state_custom_nested(self):
pass
def test_load_state_pytorch(self):
pass
def test_load_state_pytorch_alternating_nested(self):
pass
def test_state_complex_multilayered_nontorch_root(self, complex_multi_layered_nontorch_root):
TORCH_TAG_PREFIX = "torch"
make_component(torch.nn.Module, TORCH_TAG_PREFIX, only_module='torch.nn')
obj = complex_multi_layered_nontorch_root(from_config=True)
t1 = obj.item.child.linear.weight
state = obj.get_state()
new_obj = complex_multi_layered_nontorch_root(from_config=True)
new_obj.load_state(state)
t2 = new_obj.item.child.linear.weight
assert t1.equal(t2)
check_mapping_equivalence(new_obj.get_state(), obj.get_state())
check_mapping_equivalence(obj.get_state(), new_obj.get_state())
class TestClassSave:
def test_class_save(self):
pass
class TestClassLoad:
def test_class_load(self):
pass
class TestModuleSave:
def test_save_single_object(self, basic_object):
pass
def test_save_nested_object(self, nested_object):
pass
def test_save_pytorch_nested_alternating(self, alternating_nn_module_with_state):
pass
class TestModuleLoad:
def test_load_directory_single_file(self, basic_object):
pass
def test_load_nested_directory(self, nested_object):
pass
def test_load_pytorch_alternating(self, alternating_nn_module_with_state):
pass
class TestSerializationIntegration:
def test_state_and_load_roundtrip_single_object(self, basic_object):
old_obj = basic_object(from_config=True)
state = old_obj.get_state()
new_obj = basic_object(from_config=False)
new_obj.load_state(state, strict=False)
assert old_obj.get_state() == new_obj.get_state()
# def test_state_and_load_roundtrip_nested_object(self):
# pass
def test_state_and_load_roundtrip_pytorch_alternating(self, alternating_nn_module_with_state):
old_obj = alternating_nn_module_with_state(from_config=True)
state = old_obj.get_state()
new_obj = alternating_nn_module_with_state(from_config=False)
new_obj.load_state(state, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=True)
# def test_class_save_and_load_roundtrip():
# pass
#
# def test_class_save_and_load_roundtrip_nested():
# pass
#
# def test_class_save_and_load_roundtrip_pytorch():
# pass
def test_save_to_file_and_load_from_file_roundtrip(self, basic_object):
old_obj = basic_object(from_config=True)
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as path:
save_state_to_file(state, path)
state = load_state_from_file(path)
new_obj = basic_object(from_config=False)
new_obj.load_state(state, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=True)
def test_save_to_file_and_load_from_file_roundtrip_pytorch(self, alternating_nn_module_with_state):
old_obj = alternating_nn_module_with_state(from_config=False)
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as path:
save_state_to_file(state, path)
state = load_state_from_file(path)
new_obj = alternating_nn_module_with_state(from_config=False)
new_obj.load_state(state, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
def test_save_to_file_and_load_from_file_roundtrip_complex(self, complex_multi_layered):
TORCH_TAG_PREFIX = "torch"
make_component(torch.nn.Module, TORCH_TAG_PREFIX, only_module='torch.nn')
old_obj = complex_multi_layered(from_config=True)
# Test that the current state is actually saved, for a
# Component-only child of torch objects
old_obj.child.child.child.x = 24
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as path:
save_state_to_file(state, path)
list_files(path)
state_loaded = load_state_from_file(path)
check_mapping_equivalence(state, state_loaded)
# assert False
new_obj = complex_multi_layered(from_config=True)
new_obj.load_state(state_loaded, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
@pytest.mark.parametrize("pickle_only", [True, False])
@pytest.mark.parametrize("compress_save_file", [True, False])
def test_save_to_file_and_load_from_file_roundtrip_complex_nontorch_root(self,
complex_multi_layered_nontorch_root, pickle_only, compress_save_file):
TORCH_TAG_PREFIX = "torch"
make_component(torch.nn.Module, TORCH_TAG_PREFIX, only_module='torch.nn')
old_obj = complex_multi_layered_nontorch_root(from_config=True)
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'savefile.flambe')
save_state_to_file(state, path, compress_save_file, pickle_only)
list_files(path)
print("\n\nHello world\n\n")
if pickle_only:
path += '.pkl'
if compress_save_file:
path += '.tar.gz'
state_loaded = load_state_from_file(path)
print("original state: ", state)
print("loaded state: ", state_loaded)
check_mapping_equivalence(state, state_loaded)
check_mapping_equivalence(state._metadata, state_loaded._metadata)
new_obj = complex_multi_layered_nontorch_root(from_config=True)
int_state = new_obj.get_state()
new_obj.load_state(state_loaded, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata)
check_mapping_equivalence(int_state._metadata, state_loaded._metadata)
@pytest.mark.parametrize("pickle_only", [True, False])
@pytest.mark.parametrize("compress_save_file", [True, False])
def test_module_save_and_load_roundtrip(self, basic_object, pickle_only, compress_save_file):
old_obj = basic_object(from_config=True)
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'savefile.flambe')
save(old_obj, path, compress_save_file, pickle_only)
if pickle_only:
path += '.pkl'
if compress_save_file:
path += '.tar.gz'
new_obj = load(path)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
@pytest.mark.parametrize("pickle_only", [True, False])
@pytest.mark.parametrize("compress_save_file", [True, False])
def test_module_save_and_load_roundtrip_pytorch(self,
alternating_nn_module_with_state,
pickle_only,
compress_save_file):
old_obj = alternating_nn_module_with_state(from_config=True)
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'savefile.flambe')
save(old_obj, path, compress_save_file, pickle_only)
if pickle_only:
path += '.pkl'
if compress_save_file:
path += '.tar.gz'
new_obj = load(path)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
# def test_module_save_and_load_example_encoder(self):
# TORCH_TAG_PREFIX = "torch"
# make_component(torch.nn.Module, TORCH_TAG_PREFIX, only_module='torch.nn')
# make_component(torch.optim.Optimizer, TORCH_TAG_PREFIX, only_module='torch.optim')
# trainer = yaml.load(EXAMPLE_TRAINER_CONFIG)()
# with tempfile.TemporaryDirectory() as path:
# save(trainer, path)
# loaded_trainer = load(path)
# old_state = trainer.get_state()
# new_state = loaded_trainer.get_state()
# check_mapping_equivalence(new_state, old_state)
# check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
def test_module_save_and_load_single_instance_appears_twice(self, make_classes_2):
txt = """
!C
one: !A
akw2: &theb !B
bkw2: test
bkw1: 1
akw1: 8
two: !A
akw1: 8
# Comment Here
akw2: *theb
"""
c = yaml.load(txt)()
c.one.akw2.bkw1 = 6
assert c.one.akw2 is c.two.akw2
assert c.one.akw2.bkw1 == c.two.akw2.bkw1
with tempfile.TemporaryDirectory() as path:
save(c, path)
state = load_state_from_file(path)
loaded_c = load(path)
assert loaded_c.one.akw2 is loaded_c.two.akw2
assert loaded_c.one.akw2.bkw1 == loaded_c.two.akw2.bkw1
| 35.412621 | 370 | 0.665661 |
abf57b0733da8ccb4e9af88912cc415419a9fb91 | 1,881 | py | Python | samples/openapi3/client/petstore/python-experimental/test/test_user_api.py | therockstorm/openapi-generator | 01d0b5d4780ebe2d6025e2b443ec136c6ce16c45 | [
"Apache-2.0"
] | 11,868 | 2018-05-12T02:58:07.000Z | 2022-03-31T21:19:39.000Z | samples/openapi3/client/petstore/python-experimental/test/test_user_api.py | therockstorm/openapi-generator | 01d0b5d4780ebe2d6025e2b443ec136c6ce16c45 | [
"Apache-2.0"
] | 9,672 | 2018-05-12T14:25:43.000Z | 2022-03-31T23:59:30.000Z | samples/openapi3/client/petstore/python-experimental/test/test_user_api.py | therockstorm/openapi-generator | 01d0b5d4780ebe2d6025e2b443ec136c6ce16c45 | [
"Apache-2.0"
] | 4,776 | 2018-05-12T12:06:08.000Z | 2022-03-31T19:52:51.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import unittest
import petstore_api
from petstore_api.api.user_api import UserApi # noqa: E501
class TestUserApi(unittest.TestCase):
"""UserApi unit test stubs"""
def setUp(self):
self.api = UserApi() # noqa: E501
def tearDown(self):
pass
def test_create_user(self):
"""Test case for create_user
Create user # noqa: E501
"""
pass
def test_create_users_with_array_input(self):
"""Test case for create_users_with_array_input
Creates list of users with given input array # noqa: E501
"""
pass
def test_create_users_with_list_input(self):
"""Test case for create_users_with_list_input
Creates list of users with given input array # noqa: E501
"""
pass
def test_delete_user(self):
"""Test case for delete_user
Delete user # noqa: E501
"""
pass
def test_get_user_by_name(self):
"""Test case for get_user_by_name
Get user by user name # noqa: E501
"""
pass
def test_login_user(self):
"""Test case for login_user
Logs user into the system # noqa: E501
"""
pass
def test_logout_user(self):
"""Test case for logout_user
Logs out current logged in user session # noqa: E501
"""
pass
def test_update_user(self):
"""Test case for update_user
Updated user # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.62069 | 174 | 0.611377 |
2c35f64cb93e34800b4b37edd7c579265808728e | 535 | py | Python | MortgageCalculator.py | Xzya/CodeAbbeySolutions | 0a37eb246c24c1d74a6ff6c2ccf525444c5e787a | [
"MIT"
] | 2 | 2021-07-25T13:41:48.000Z | 2022-03-02T21:07:39.000Z | MortgageCalculator.py | Xzya/CodeAbbeySolutions | 0a37eb246c24c1d74a6ff6c2ccf525444c5e787a | [
"MIT"
] | null | null | null | MortgageCalculator.py | Xzya/CodeAbbeySolutions | 0a37eb246c24c1d74a6ff6c2ccf525444c5e787a | [
"MIT"
] | 5 | 2015-10-29T16:11:43.000Z | 2022-03-13T12:50:32.000Z | ##input
# 3800000 5 108
##answer
# 43766
def binary_search(P, R, L, xmin, xmax):
if xmax <= xmin:
return None
x = (xmax + xmin) / 2
newP = P
for i in range(0, L):
temp = (x - (newP * (R/12/100)))
newP -= temp
if newP < -1e-7:
return binary_search(P, R, L, xmin, x)
elif newP > 1e-7:
return binary_search(P, R, L, x, xmax)
else:
return x
def main():
(P, R, L) = (int(x) for x in input().split())
M = binary_search(P, R, L, 0, P/2)
print(round(M))
if __name__ == '__main__':
main()
| 16.71875 | 47 | 0.547664 |
51ec6c9cd56a69b97331386663815f64aa470250 | 10,096 | py | Python | lib/galaxy/webapps/reports/controllers/system.py | julozi/galaxy | 90d9da03975f254ac128747cd04532c3595d6155 | [
"CC-BY-3.0"
] | 2 | 2017-03-28T12:11:41.000Z | 2017-04-22T02:58:25.000Z | lib/galaxy/webapps/reports/controllers/system.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | 2 | 2019-04-03T15:37:17.000Z | 2019-04-03T19:37:09.000Z | lib/galaxy/webapps/reports/controllers/system.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | 1 | 2020-06-30T17:53:16.000Z | 2020-06-30T17:53:16.000Z | import logging
import os
import subprocess
from datetime import datetime, timedelta
from decimal import Decimal
from sqlalchemy import and_, desc, false, null, true
from sqlalchemy.orm import eagerload
from galaxy import model, util
from galaxy.webapps.base.controller import BaseUIController, web
log = logging.getLogger(__name__)
class System(BaseUIController):
@web.expose
def index(self, trans, **kwd):
params = util.Params(kwd)
message = ''
if params.userless_histories_days:
userless_histories_days = params.userless_histories_days
else:
userless_histories_days = '60'
if params.deleted_histories_days:
deleted_histories_days = params.deleted_histories_days
else:
deleted_histories_days = '60'
if params.deleted_datasets_days:
deleted_datasets_days = params.deleted_datasets_days
else:
deleted_datasets_days = '60'
file_path, disk_usage, datasets, file_size_str = self.disk_usage(trans, **kwd)
if 'action' in kwd:
if kwd['action'] == "userless_histories":
userless_histories_days, message = self.userless_histories(trans, **kwd)
elif kwd['action'] == "deleted_histories":
deleted_histories_days, message = self.deleted_histories(trans, **kwd)
elif kwd['action'] == "deleted_datasets":
deleted_datasets_days, message = self.deleted_datasets(trans, **kwd)
return trans.fill_template('/webapps/reports/system.mako',
file_path=file_path,
disk_usage=disk_usage,
datasets=datasets,
file_size_str=file_size_str,
userless_histories_days=userless_histories_days,
deleted_histories_days=deleted_histories_days,
deleted_datasets_days=deleted_datasets_days,
message=message,
nice_size=nice_size)
def userless_histories(self, trans, **kwd):
"""The number of userless histories and associated datasets that have not been updated for the specified number of days."""
params = util.Params(kwd)
message = ''
if params.userless_histories_days:
userless_histories_days = int(params.userless_histories_days)
cutoff_time = datetime.utcnow() - timedelta(days=userless_histories_days)
history_count = 0
dataset_count = 0
for history in trans.sa_session.query(model.History) \
.filter(and_(model.History.table.c.user_id == null(),
model.History.table.c.deleted == true(),
model.History.table.c.update_time < cutoff_time)):
for dataset in history.datasets:
if not dataset.deleted:
dataset_count += 1
history_count += 1
message = "%d userless histories ( including a total of %d datasets ) have not been updated for at least %d days." % (history_count, dataset_count, userless_histories_days)
else:
message = "Enter the number of days."
return str(userless_histories_days), message
def deleted_histories(self, trans, **kwd):
"""
The number of histories that were deleted more than the specified number of days ago, but have not yet been purged.
Also included is the number of datasets associated with the histories.
"""
params = util.Params(kwd)
message = ''
if params.deleted_histories_days:
deleted_histories_days = int(params.deleted_histories_days)
cutoff_time = datetime.utcnow() - timedelta(days=deleted_histories_days)
history_count = 0
dataset_count = 0
disk_space = 0
histories = trans.sa_session.query(model.History) \
.filter(and_(model.History.table.c.deleted == true(),
model.History.table.c.purged == false(),
model.History.table.c.update_time < cutoff_time)) \
.options(eagerload('datasets'))
for history in histories:
for hda in history.datasets:
if not hda.dataset.purged:
dataset_count += 1
try:
disk_space += hda.dataset.file_size
except Exception:
pass
history_count += 1
message = "%d histories ( including a total of %d datasets ) were deleted more than %d days ago, but have not yet been purged, " \
"disk space: %s." % (history_count, dataset_count, deleted_histories_days, nice_size(disk_space, True))
else:
message = "Enter the number of days."
return str(deleted_histories_days), message
def deleted_datasets(self, trans, **kwd):
"""The number of datasets that were deleted more than the specified number of days ago, but have not yet been purged."""
params = util.Params(kwd)
message = ''
if params.deleted_datasets_days:
deleted_datasets_days = int(params.deleted_datasets_days)
cutoff_time = datetime.utcnow() - timedelta(days=deleted_datasets_days)
dataset_count = 0
disk_space = 0
for dataset in trans.sa_session.query(model.Dataset) \
.filter(and_(model.Dataset.table.c.deleted == true(),
model.Dataset.table.c.purged == false(),
model.Dataset.table.c.update_time < cutoff_time)):
dataset_count += 1
try:
disk_space += dataset.file_size
except Exception:
pass
message = "%d datasets were deleted more than %d days ago, but have not yet been purged," \
" disk space: %s." % (dataset_count, deleted_datasets_days, nice_size(disk_space, True))
else:
message = "Enter the number of days."
return str(deleted_datasets_days), message
@web.expose
def dataset_info(self, trans, **kwd):
message = ''
dataset = trans.sa_session.query(model.Dataset).get(trans.security.decode_id(kwd.get('id', '')))
# Get all associated hdas and lddas that use the same disk file.
associated_hdas = trans.sa_session.query(trans.model.HistoryDatasetAssociation) \
.filter(and_(trans.model.HistoryDatasetAssociation.deleted == false(),
trans.model.HistoryDatasetAssociation.dataset_id == dataset.id)) \
.all()
associated_lddas = trans.sa_session.query(trans.model.LibraryDatasetDatasetAssociation) \
.filter(and_(trans.model.LibraryDatasetDatasetAssociation.deleted == false(),
trans.model.LibraryDatasetDatasetAssociation.dataset_id == dataset.id)) \
.all()
return trans.fill_template('/webapps/reports/dataset_info.mako',
dataset=dataset,
associated_hdas=associated_hdas,
associated_lddas=associated_lddas,
message=message)
def get_disk_usage(self, file_path):
is_sym_link = os.path.islink(file_path)
file_system = disk_size = disk_used = disk_avail = disk_cap_pct = mount = None
df_output = subprocess.check_output(['df', '-h', file_path])
for df_line in df_output:
df_line = df_line.strip()
if df_line:
df_line = df_line.lower()
if 'filesystem' in df_line or 'proc' in df_line:
continue
elif is_sym_link:
if ':' in df_line and '/' in df_line:
mount = df_line
else:
try:
disk_size, disk_used, disk_avail, disk_cap_pct, file_system = df_line.split()
break
except Exception:
pass
else:
try:
file_system, disk_size, disk_used, disk_avail, disk_cap_pct, mount = df_line.split()
break
except Exception:
pass
else:
break # EOF
return (file_system, disk_size, disk_used, disk_avail, disk_cap_pct, mount)
@web.expose
def disk_usage(self, trans, **kwd):
file_path = trans.app.config.file_path
disk_usage = self.get_disk_usage(file_path)
min_file_size = 2 ** 32 # 4 Gb
file_size_str = nice_size(min_file_size)
datasets = trans.sa_session.query(model.Dataset) \
.filter(and_(model.Dataset.table.c.purged == false(),
model.Dataset.table.c.file_size > min_file_size)) \
.order_by(desc(model.Dataset.table.c.file_size))
return file_path, disk_usage, datasets, file_size_str
def nice_size(size, include_bytes=False):
"""Returns a readably formatted string with the size"""
niced = False
nice_string = "%s bytes" % size
try:
nsize = Decimal(size)
for x in ['bytes', 'KB', 'MB', 'GB']:
if nsize.compare(Decimal("1024.0")) == Decimal("-1"):
nice_string = "%3.1f %s" % (nsize, x)
niced = True
break
nsize /= Decimal("1024.0")
if not niced:
nice_string = "%3.1f %s" % (nsize, 'TB')
niced = True
if include_bytes and x != 'bytes':
nice_string = "%s (%s bytes)" % (nice_string, size)
except Exception:
pass
return nice_string
| 47.17757 | 184 | 0.571513 |
e0fa38e2ffe41ef585bec85131d0bf6d201d03d8 | 27,215 | py | Python | chainer_/chainercv2/models/preresnet.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 2,649 | 2018-08-03T14:18:00.000Z | 2022-03-31T08:08:17.000Z | chainer_/chainercv2/models/preresnet.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 95 | 2018-08-13T01:46:03.000Z | 2022-03-13T08:38:14.000Z | chainer_/chainercv2/models/preresnet.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 549 | 2018-08-06T08:09:22.000Z | 2022-03-31T08:08:21.000Z | """
PreResNet for ImageNet-1K, implemented in Chainer.
Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4',
'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34',
'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152',
'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'PreResBlock', 'PreResBottleneck',
'PreResUnit', 'PreResInitBlock', 'PreResActivation']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SimpleSequential
class PreResBlock(Chain):
"""
Simple PreResNet block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
use_bias=False,
use_bn=True):
super(PreResBlock, self).__init__()
with self.init_scope():
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_bias=use_bias,
use_bn=use_bn,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn)
def __call__(self, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
return x, x_pre_activ
class PreResBottleneck(Chain):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
conv1_stride):
super(PreResBottleneck, self).__init__()
mid_channels = out_channels // 4
with self.init_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=(stride if conv1_stride else 1),
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if conv1_stride else stride))
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def __call__(self, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x, x_pre_activ
class PreResUnit(Chain):
"""
PreResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
use_bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False):
super(PreResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
with self.init_scope():
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_bias=use_bias,
use_bn=use_bn)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_bias=use_bias)
def __call__(self, x):
identity = x
x, x_pre_activ = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class PreResInitBlock(Chain):
"""
PreResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(PreResInitBlock, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=7,
stride=2,
pad=3,
nobias=True)
self.bn = L.BatchNormalization(size=out_channels)
self.activ = F.relu
self.pool = partial(
F.max_pooling_2d,
ksize=3,
stride=2,
pad=1,
cover_all=False)
def __call__(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class PreResActivation(Chain):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(PreResActivation, self).__init__()
with self.init_scope():
self.bn = L.BatchNormalization(
size=in_channels,
eps=1e-5)
self.activ = F.relu
def __call__(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class PreResNet(Chain):
"""
PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(PreResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "post_activ", PreResActivation(
in_channels=in_channels))
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_preresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = PreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def preresnet10(**kwargs):
"""
PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=10, model_name="preresnet10", **kwargs)
def preresnet12(**kwargs):
"""
PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=12, model_name="preresnet12", **kwargs)
def preresnet14(**kwargs):
"""
PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, model_name="preresnet14", **kwargs)
def preresnetbc14b(**kwargs):
"""
PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs)
def preresnet16(**kwargs):
"""
PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=16, model_name="preresnet16", **kwargs)
def preresnet18_wd4(**kwargs):
"""
PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs)
def preresnet18_wd2(**kwargs):
"""
PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs)
def preresnet18_w3d4(**kwargs):
"""
PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs)
def preresnet18(**kwargs):
"""
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
def preresnet26(**kwargs):
"""
PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs)
def preresnetbc26b(**kwargs):
"""
PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs)
def preresnet34(**kwargs):
"""
PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=34, model_name="preresnet34", **kwargs)
def preresnetbc38b(**kwargs):
"""
PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs)
def preresnet50(**kwargs):
"""
PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, model_name="preresnet50", **kwargs)
def preresnet50b(**kwargs):
"""
PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs)
def preresnet101(**kwargs):
"""
PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, model_name="preresnet101", **kwargs)
def preresnet101b(**kwargs):
"""
PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs)
def preresnet152(**kwargs):
"""
PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, model_name="preresnet152", **kwargs)
def preresnet152b(**kwargs):
"""
PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs)
def preresnet200(**kwargs):
"""
PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, model_name="preresnet200", **kwargs)
def preresnet200b(**kwargs):
"""
PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs)
def preresnet269b(**kwargs):
"""
PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
preresnet10,
preresnet12,
preresnet14,
preresnetbc14b,
preresnet16,
preresnet18_wd4,
preresnet18_wd2,
preresnet18_w3d4,
preresnet18,
preresnet26,
preresnetbc26b,
preresnet34,
preresnetbc38b,
preresnet50,
preresnet50b,
preresnet101,
preresnet101b,
preresnet152,
preresnet152b,
preresnet200,
preresnet200b,
preresnet269b,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet10 or weight_count == 5417128)
assert (model != preresnet12 or weight_count == 5491112)
assert (model != preresnet14 or weight_count == 5786536)
assert (model != preresnetbc14b or weight_count == 10057384)
assert (model != preresnet16 or weight_count == 6967208)
assert (model != preresnet18_wd4 or weight_count == 3935960)
assert (model != preresnet18_wd2 or weight_count == 5802440)
assert (model != preresnet18_w3d4 or weight_count == 8473784)
assert (model != preresnet18 or weight_count == 11687848)
assert (model != preresnet26 or weight_count == 17958568)
assert (model != preresnetbc26b or weight_count == 15987624)
assert (model != preresnet34 or weight_count == 21796008)
assert (model != preresnetbc38b or weight_count == 21917864)
assert (model != preresnet50 or weight_count == 25549480)
assert (model != preresnet50b or weight_count == 25549480)
assert (model != preresnet101 or weight_count == 44541608)
assert (model != preresnet101b or weight_count == 44541608)
assert (model != preresnet152 or weight_count == 60185256)
assert (model != preresnet152b or weight_count == 60185256)
assert (model != preresnet200 or weight_count == 64666280)
assert (model != preresnet200b or weight_count == 64666280)
assert (model != preresnet269b or weight_count == 102065832)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 33.765509 | 120 | 0.610766 |
31c9acf21df88f17daf0cd37b134e06ba5fda37f | 3,457 | py | Python | purity_fb/purity_fb_1dot8/models/bucket_post.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 5 | 2017-09-08T20:47:22.000Z | 2021-06-29T02:11:05.000Z | purity_fb/purity_fb_1dot8/models/bucket_post.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 16 | 2017-11-27T20:57:48.000Z | 2021-11-23T18:46:43.000Z | purity_fb/purity_fb_1dot8/models/bucket_post.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 22 | 2017-10-13T15:33:05.000Z | 2021-11-08T19:56:21.000Z | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.8 Python SDK
Pure Storage FlashBlade REST 1.8 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BucketPost(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account': 'Reference'
}
attribute_map = {
'account': 'account'
}
def __init__(self, account=None): # noqa: E501
"""BucketPost - a model defined in Swagger""" # noqa: E501
self._account = None
self.discriminator = None
if account is not None:
self.account = account
@property
def account(self):
"""Gets the account of this BucketPost. # noqa: E501
The account of the bucket. # noqa: E501
:return: The account of this BucketPost. # noqa: E501
:rtype: Reference
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this BucketPost.
The account of the bucket. # noqa: E501
:param account: The account of this BucketPost. # noqa: E501
:type: Reference
"""
self._account = account
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BucketPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BucketPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.105691 | 204 | 0.571594 |
964ca923951de274dfe53a7e2f914e9fbe8e0dc3 | 1,569 | py | Python | protlearn/features/tests/test_atc.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 24 | 2020-09-17T10:35:44.000Z | 2022-03-09T19:19:01.000Z | protlearn/features/tests/test_atc.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 14 | 2020-08-09T18:23:01.000Z | 2020-11-19T05:48:14.000Z | protlearn/features/tests/test_atc.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 3 | 2020-03-17T16:43:54.000Z | 2020-08-03T06:10:24.000Z | import pytest
import numpy as np
from ..atc import atc
import pkg_resources
PATH = pkg_resources.resource_filename(__name__, 'test_data/')
def test_atc():
"Test sequence compositions"
# load data
X_list = open(PATH+'multiple.txt').read().splitlines()
X_err = 'AGT2HT9'
# test relative composition
atc_rel, bonds = atc(X_list)
# test array contents
np.testing.assert_almost_equal(atc_rel[0], np.array([
0.27083333, 0.54861111, 0.07638889, 0.10416667, 0.]))
np.testing.assert_almost_equal(atc_rel[1], np.array([
0.2585034 , 0.52380952, 0.06122449, 0.14965986, 0.00680272]))
np.testing.assert_almost_equal(atc_rel[2], np.array([
0.234375 , 0.5 , 0.09375 , 0.1640625, 0.0078125]))
np.testing.assert_almost_equal(bonds[0], np.array([138., 127., 11.]))
np.testing.assert_almost_equal(bonds[1], np.array([140., 129., 11.]))
np.testing.assert_almost_equal(bonds[2], np.array([120., 108., 12.]))
# test if frequencies add to 1
for i in range(len(X_list)):
assert round(atc_rel[0].sum()) == 1
# test absolute composition
atc_abs, bonds = atc(X_list, method='absolute')
# test array contents
assert np.array_equal(atc_abs[0], np.array([
39., 79., 11., 15., 0.]))
assert np.array_equal(atc_abs[1], np.array([
38., 77., 9., 22., 1.]))
assert np.array_equal(atc_abs[2], np.array([
30., 64., 12., 21., 1.]))
# test ValueError
with pytest.raises(ValueError):
atc_err, bond_err = atc(X_err) | 33.382979 | 75 | 0.6297 |
075c96d633523b239579f75b4b68da4662ad4dfc | 369 | py | Python | Script/Commands/Messages/Creators/refresh_dbl.py | PowerEvolved/Clash-Of-Clans-Discord-Bot | 3c2c534c85d314e3cf35786220a49b1d563b022f | [
"BSD-3-Clause"
] | 7 | 2021-12-01T13:49:34.000Z | 2022-03-02T08:27:09.000Z | Script/Commands/Messages/Creators/refresh_dbl.py | BilouKass/Clash-Of-Clans-Discord-Bot | 4dad2349581e2be1c2280b9f8bd4070777db1ef4 | [
"BSD-3-Clause"
] | null | null | null | Script/Commands/Messages/Creators/refresh_dbl.py | BilouKass/Clash-Of-Clans-Discord-Bot | 4dad2349581e2be1c2280b9f8bd4070777db1ef4 | [
"BSD-3-Clause"
] | 5 | 2021-05-01T14:26:20.000Z | 2022-02-13T19:10:15.000Z | # Refreshes the top.gg guilds count
from Script.Clients.discord_client import Clash_info
from Script.Clients.top_gg_client import Dbl_client
from Script.import_emojis import Emojis
async def refresh_dbl(ctx):
await Dbl_client.post_guild_count(len(Clash_info.guilds))
await ctx.send(str(Emojis["Yes"]) + " (https://top.gg/bot/704688212832026724)")
return
| 30.75 | 83 | 0.783198 |
6edfe761dab7c4b5c1e39ab061892719a5b31c8e | 3,035 | py | Python | 02_data_types/05_dictionaries.py | twiindan/selenium_lessons | 798557e8f584f9e6655414c13f232017483f0439 | [
"Apache-2.0"
] | null | null | null | 02_data_types/05_dictionaries.py | twiindan/selenium_lessons | 798557e8f584f9e6655414c13f232017483f0439 | [
"Apache-2.0"
] | null | null | null | 02_data_types/05_dictionaries.py | twiindan/selenium_lessons | 798557e8f584f9e6655414c13f232017483f0439 | [
"Apache-2.0"
] | 1 | 2020-07-16T09:49:47.000Z | 2020-07-16T09:49:47.000Z | #-*- coding: utf-8 -*-
'''
MOD 06: Dictionaries
'''
spam = {"one": 1, "two": 2, "three": 3} # This is a dictionary
print(spam)
print(type(spam))
eggs = {1: "one",
2: "two",
3: "three"} # Again, no problem with multiline declaration
print(eggs)
# Still more ways to declare dictionaries
spam = dict(one=1, two=2, three=3)
print(spam)
#===============================================================================
# Python mappings
# - dict:
# - Comma-separated list of hashable key, colon and arbitrary object between curly brackets
# - Mutable
# - Unordered
# - Access by key
# - Heavily optimized:
# - Creation with n items is O(n)
# - Arbitrary access is O(1)
# - Adding a new key is amortized O(1)
#===============================================================================
# Let's play a bit with dictionaries
spam = {"one": 1, "two": 2, "three": 3}
print(spam["two"]) # Access by key, may raise an exception
spam = {"one": 1, "two": 2, "three": 3}
print("two" in spam) # Check keys membership
print(2 not in spam) # Check keys membership
spam = {"one": 1, "two": 2, "three": 3}
print(spam.get("two"))
print(spam.get("four"))
print(spam.get("four", 4)) # Safer access by key, never raises an exception, optional default value
spam = {"one": 1, "two": 2, "three": 3}
print(list(spam.keys())) # Retrieve keys list (copy) in arbitrary order
print(list(spam.values())) # Retrieve values list (copy) in arbitrary order
print(list(spam.items())) # Retrieve key, values pairs list (copy) in arbitrary order
# Let's play a bit with inplace modifications of dicts content
spam = {"one": 1, "two": 2, "three": 3}
spam["two"] = 22 # Set or replace a key value
spam["four"] = 44 # Set or replace a key value
print(spam)
spam = {"one": 1, "two": 2, "three": 3}
print(spam.popitem())
print(spam)
spam = {"one": 1, "two": 2, "three": 3}
print(spam.pop("two")) # Pop (remove and return) given item, may raise an exception
print(spam.pop("four", 4)) # Pop (remove and return) given item with optional default value
print(spam)
spam = {"one": 1, "two": 2, "three": 3}
eggs = {"three": 33, "four": 44}
spam.update(eggs) # Update dictionary with other dict content
print(spam)
spam = {"one": 1, "two": 2, "three": 3}
eggs = {1: "one", 2: "two", 3: "three"}
spam.update(two=22, four=44) # Like dict constructor, it accepts keyword arguments
print(spam)
print(eggs)
#===============================================================================
# SOURCES:
# - http://docs.python.org/2/library/stdtypes.html#mapping-types-dict
# - http://stackoverflow.com/a/1419324
# - http://wiki.python.org/moin/TimeComplexity
#===============================================================================
| 33.351648 | 114 | 0.519934 |
9cf3f1064905620aa8b88c5853177db16fa47b1c | 1,648 | py | Python | models/body_80_200.py | VisualComputingInstitute/BiternionNets-ROS | f29aca8d761d453ccef372d648a805aec7347091 | [
"MIT"
] | 12 | 2016-04-19T13:24:59.000Z | 2021-06-02T10:32:00.000Z | models/body_80_200.py | VisualComputingInstitute/BiternionNets-ROS | f29aca8d761d453ccef372d648a805aec7347091 | [
"MIT"
] | 4 | 2016-02-28T21:48:07.000Z | 2018-10-28T15:59:26.000Z | models/body_80_200.py | VisualComputingInstitute/BiternionNets-ROS | f29aca8d761d453ccef372d648a805aec7347091 | [
"MIT"
] | 8 | 2016-08-29T13:17:11.000Z | 2020-08-18T08:21:40.000Z | import cv2
import numpy as np
import DeepFried2 as df
from lbtoolbox.augmentation import AugmentationPipeline, Cropper
from df_extras import Flatten, Biternion
def mknet():
return df.Sequential( # 184x76
df.SpatialConvolution( 3, 24, (3, 3)), # 182x74
df.BatchNormalization(24),
df.ReLU(),
df.SpatialConvolution(24, 24, (3, 3)), # 180x72
df.SpatialMaxPooling((3, 3)), # 60x24
df.BatchNormalization(24),
df.ReLU(),
df.SpatialConvolution(24, 48, (3, 3)), # 58x22
df.BatchNormalization(48),
df.ReLU(),
df.SpatialConvolution(48, 48, (3, 3)), # 56x20
df.SpatialMaxPooling((2, 2)), # 28x10
df.BatchNormalization(48),
df.ReLU(),
df.SpatialConvolution(48, 64, (3, 3)), # 26x8
df.BatchNormalization(64),
df.ReLU(),
df.SpatialConvolution(64, 64, (3, 3)), # 24x6
df.SpatialMaxPooling((2, 2)), # 12x3
df.BatchNormalization(64),
df.ReLU(),
df.SpatialConvolution(64, 64, (3, 2)), # 10x2
df.BatchNormalization(64),
df.ReLU(),
df.Dropout(0.2),
Flatten(),
df.Linear(64*10*2, 512),
df.ReLU(),
df.Dropout(0.5),
df.Linear(512, 2, init=df.init.normal(0.01)),
Biternion()
)
def mkaug(Xtr, ytr):
return AugmentationPipeline(Xtr, ytr, Cropper((184,76)))
def preproc(im):
im = cv2.resize(im, (80, 200))
im = np.rollaxis(im, 2, 0)
return im.astype(df.floatX)/255
def getrect(x,y,w,h):
# Here we use the full box.
# We know from the detector that full-height = 3x width.
# If that's more than is seen on camera, it will be clipped.
return x,y+int(w*0.8),w,2*w
| 28.912281 | 64 | 0.614684 |
d1384a9fb6af37634f385a2ab572ff2eae912757 | 1,401 | py | Python | Chapter 13/cell_transpose.py | ostin-r/automate-boring-stuff-solutions | 78f0a2981e6520ff2907285e666168a0f35eba02 | [
"FTL"
] | 4 | 2021-06-14T10:37:58.000Z | 2021-12-30T17:49:17.000Z | Chapter 13/cell_transpose.py | ostin-r/automate-boring-stuff-solutions | 78f0a2981e6520ff2907285e666168a0f35eba02 | [
"FTL"
] | null | null | null | Chapter 13/cell_transpose.py | ostin-r/automate-boring-stuff-solutions | 78f0a2981e6520ff2907285e666168a0f35eba02 | [
"FTL"
] | 1 | 2021-07-29T15:26:54.000Z | 2021-07-29T15:26:54.000Z | '''
Austin Richards 3/26/21
cell_transpose.py transposes all columns in a sheet
'''
import os
import logging as log
import openpyxl as xl
from openpyxl.utils import get_column_letter
log.basicConfig(level=log.DEBUG, format='%(asctime)s : %(message)s')
def transpose_all(file):
wb = xl.load_workbook(file)
sheet = wb.active
data = [[] for x in range(sheet.max_column)] # a list of lists to hold column data
new_rows = sheet.max_column # clear up the confusion of transposing...
new_cols = sheet.max_row # also important to get these numbers before cutting/pasting data
# cut data into a list
for col in range(1, sheet.max_column + 1):
col_letter = get_column_letter(col)
for row in range(1, sheet.max_row + 1):
current_value = sheet[col_letter + str(row)].value
sheet[col_letter + str(row)].value = '' # cut
data[col - 1].append(current_value) # store in list
# paste in new locations
for row in range(1, new_rows + 1):
for col in range(1, new_cols + 1):
col_letter = get_column_letter(col)
new_value = data[row - 1][col - 1] # note that rows/cols are flipped
sheet[col_letter + str(row)].value = new_value
print('Saving file...')
wb.save('transpose-example.xlsx')
print('Done!')
os.chdir('Chapter 13')
transpose_all('transpose-this.xlsx') | 31.840909 | 97 | 0.65596 |
63341e0d5f2f44f6eae40653b0dee466395f719d | 2,928 | py | Python | workshop/forms.py | grinyahoo/wm-django | 1ba221ba989666dba66c45dad9745836f30c42f4 | [
"MIT"
] | null | null | null | workshop/forms.py | grinyahoo/wm-django | 1ba221ba989666dba66c45dad9745836f30c42f4 | [
"MIT"
] | 5 | 2020-06-05T23:30:58.000Z | 2022-02-10T08:27:50.000Z | workshop/forms.py | grinyahoo/wm-django | 1ba221ba989666dba66c45dad9745836f30c42f4 | [
"MIT"
] | null | null | null | import datetime
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from .models import Customer, Employee, Vehicle, Task, Invoice
# TODO: make timezone aware
YEARS = tuple((x,x) for x in range(datetime.date.today().year, 1950, -1))
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
fields = ['name', 'address', 'city', 'state', 'zip', 'phone']
widgets = {
'address': forms.TextInput()
}
class TaskForm(forms.ModelForm):
class Meta:
model = Task
fields = ['customer', 'vehicle', 'description', 'amount', 'employee']
def __init__(self, *args, q={}, **kwargs):
super(TaskForm, self).__init__(*args, **kwargs)
self.fields['vehicle'] = forms.ModelChoiceField(
queryset=Vehicle.objects.filter(user=q['user'])
)
self.fields['customer'] = forms.ModelChoiceField(
queryset=Customer.objects.filter(user=q['user'])
)
self.fields['employee'] = forms.ModelChoiceField(
queryset=Employee.objects.filter(user=q['user'])
)
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
exclude = ['user']
widgets = {
'notes': forms.Textarea()
}
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'Add employee',
'name',
'cost_per_hour',
'phone',
'notes'
),
ButtonHolder(
Submit('submit', 'Submit', css_class="button white"),
)
)
super(EmployeeForm, self).__init__(*args, **kwargs)
class VehicleForm(forms.ModelForm):
class Meta:
model = Vehicle
exclude = ['user']
widgets = {
'year': forms.Select(choices=YEARS),
}
class InvoiceForm(forms.ModelForm):
class Meta:
model = Invoice
fields = ['customer', 'tasks',]
widgets = {
'customer': forms.HiddenInput(),
'tasks': forms.CheckboxSelectMultiple(),
}
def __init__(self, *args, q={}, **kwargs):
super(InvoiceForm, self).__init__(*args, **kwargs)
if q:
self.fields['tasks'].queryset = Task.objects.filter(user=q['user'], customer=q['customer'], invoiced=0)
self.initial['customer'] = Customer.objects.get(user=q['user'], id=q['customer']).id
class CustomAuthenticationForm(AuthenticationForm):
# username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':'username'}))
username = forms.CharField(widget=forms.TextInput())
password = forms.CharField(widget=forms.PasswordInput())
| 30.5 | 115 | 0.587773 |
fcc1b2fe3e3da21504f7936a796d802b35286ea4 | 7,653 | py | Python | uos3/configUp/utils.py | aliaksei135/telecommand-server | eb6f23f03ff75a18a5115f3038bb4d4192594627 | [
"MIT"
] | null | null | null | uos3/configUp/utils.py | aliaksei135/telecommand-server | eb6f23f03ff75a18a5115f3038bb4d4192594627 | [
"MIT"
] | null | null | null | uos3/configUp/utils.py | aliaksei135/telecommand-server | eb6f23f03ff75a18a5115f3038bb4d4192594627 | [
"MIT"
] | 1 | 2020-04-22T20:39:49.000Z | 2020-04-22T20:39:49.000Z | import struct
from .models import config
# https://truct.readthedocs.io/en/latest/#example-usage
CONFIG_BYTES_FORMAT_STR = ('?' # CF00 tx_enable 1
'B' # CF01 tx_interval 8
'B' # CF02 tx_interval_downlink 8
'B' # CF03 tx_datarate 4
'B' # CF04 tx_power 4
'B' # CF05 batt_overtemp 8
'B' # CF06 obc_overtemp 8
'B' # CF07 pa_overtemp 8
'f' # CF08 low_voltage_threshold 8
'f' # CF09 low_voltage_recovery 8
'H' # CF10 eps_health_acquisition_interval 16
'H' # CF11 check_health_acquisition_interval 16
'H' # CF12 imu_acquisition_interval 16
'B' # CF13 imu_sample_count 4
'H' # CF14 imu_sample_interval 8
'B' # CF15 imu_gyro_bandwidth 3
'B' # CF16 imu_gyro_measurement_range 2
'H' # CF17 gps_acquisition_interval 16
'B' # CF18 gps_sample_count 3
'H' # CF19 gps_sample_interval 8
'B' # CF20 image_acquisition_profile 2
'?' # CF21 power_rail_1 1
'?' # CF22 power_rail_3 1
'?' # CF23 power_rail_5 1
'?' # CF24 power_rail_6 1
'?' # CF25 imu_accel_enabled 1
'?' # CF26 imu_gyro_enabled 1
'?' # CF27 imu_magno_enabled 1
'?' # CF28 silent_flag 1
)
TELECOMMAND_BYTES_FORMAT_STR = ('L' # TC0 time 32
'B' # TC1 operational_mode 4
'?' # TC2 self_test 1
'?' # TC3 reset_power_rail_1 1
'?' # TC4 reset_power_rail_2 1
'?' # TC5 reset_power_rail_3 1
'?' # TC6 reset_power_rail_4 1
'?' # TC7 reset_power_rail_5 1
'?' # TC8 reset_power_rail_6 1
'H' # TC9 telemetry_go_silent 16
'L' # TC10 downlink_stop_time 32
'L' # TC11 image_acquisition_time 32
)
def config_to_binary(instance):
"""
Encode telecommand and configuration to format specified by
SCF v2.1 20190830
:param instance: instance of config model
:return: tuple of (telecommand bytes, configuration bytes)
"""
telecommand_bytes = struct.pack(TELECOMMAND_BYTES_FORMAT_STR,
instance.time,
instance.operational_mode,
instance.self_test,
instance.reset_power_rail_1,
instance.reset_power_rail_2,
instance.reset_power_rail_3,
instance.reset_power_rail_4,
instance.reset_power_rail_5,
instance.reset_power_rail_6,
instance.telemetry_go_silent,
instance.downlink_stop_time,
instance.image_acquisition_time)
config_bytes = struct.pack(CONFIG_BYTES_FORMAT_STR,
instance.tx_enable,
instance.tx_interval,
instance.tx_interval_downlink,
instance.tx_datarate,
instance.tx_power,
instance.batt_overtemp,
instance.obc_overtemp,
instance.pa_overtemp,
instance.low_voltage_threshold,
instance.low_voltage_recovery,
instance.eps_health_acquisition_interval,
instance.check_health_acquisition_interval,
instance.imu_acquisition_interval,
instance.imu_sample_count,
instance.imu_sample_interval,
instance.imu_gyro_bandwidth,
instance.imu_gyro_measurement_range,
instance.gps_acquisition_interval,
instance.gps_sample_count,
instance.gps_sample_interval,
instance.image_acquisition_profile,
instance.power_rail_1,
instance.power_rail_3,
instance.power_rail_5,
instance.power_rail_6,
instance.imu_accel_enabled,
instance.imu_gyro_enabled,
instance.imu_magno_enabled,
instance.silent_flag)
return telecommand_bytes, config_bytes
def binary_to_config(telecommand_bytes, config_bytes):
telecommands = struct.unpack(TELECOMMAND_BYTES_FORMAT_STR, telecommand_bytes)
configuration = struct.unpack(CONFIG_BYTES_FORMAT_STR, config_bytes)
config_instance = config(
tx_enable=configuration[0],
tx_interval=configuration[1],
tx_interval_downlink=configuration[2],
tx_datarate=configuration[3],
tx_power=configuration[4],
batt_overtemp=configuration[5],
obc_overtemp=configuration[6],
pa_overtemp=configuration[7],
low_voltage_threshold=configuration[8],
low_voltage_recovery=configuration[9],
eps_health_acquisition_interval=configuration[10],
check_health_acquisition_interval=configuration[11],
imu_acquisition_interval=configuration[12],
imu_sample_count=configuration[13],
imu_sample_interval=configuration[14],
imu_gyro_bandwidth=configuration[15],
imu_gyro_measurement_range=configuration[16],
gps_acquisition_interval=configuration[17],
gps_sample_count=configuration[18],
gps_sample_interval=configuration[19],
image_acquisition_profile=configuration[20],
power_rail_1=configuration[21],
power_rail_3=configuration[22],
power_rail_5=configuration[23],
power_rail_6=configuration[24],
imu_accel_enabled=configuration[25],
imu_gyro_enabled=configuration[26],
imu_magno_enabled=configuration[27],
silent_flag=configuration[28],
time=telecommands[0],
operational_mode=telecommands[1],
self_test=telecommands[2],
reset_power_rail_1=telecommands[3],
reset_power_rail_2=telecommands[4],
reset_power_rail_3=telecommands[5],
reset_power_rail_4=telecommands[6],
reset_power_rail_5=telecommands[7],
reset_power_rail_6=telecommands[8],
telemetry_go_silent=telecommands[9],
downlink_stop_time=telecommands[10],
image_acquisition_time=telecommands[11]
)
return config_instance
| 48.745223 | 81 | 0.505815 |
11c36fa1b23517bfc79a5c416ec9eb94905bbf1d | 450 | py | Python | export_messages.py | Totskoz/time_counter | 137363c12ceefff7617ba443814bfd0ac336bbf1 | [
"FSFAP"
] | null | null | null | export_messages.py | Totskoz/time_counter | 137363c12ceefff7617ba443814bfd0ac336bbf1 | [
"FSFAP"
] | null | null | null | export_messages.py | Totskoz/time_counter | 137363c12ceefff7617ba443814bfd0ac336bbf1 | [
"FSFAP"
] | null | null | null | # Author: https://github.com/Gugu7264
import os
from discord import Intents
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv("dev.env")
client = commands.Bot(command_prefix=os.getenv("prefix"), intents=Intents.all())
@bot.command()
async def copy(ctx):
with open("file.txt", "w") as f:
async for message in ctx.history(limit=1000):
f.write(message.content + "\n")
await ctx.send("Done!")
| 21.428571 | 80 | 0.691111 |
56167d38f9283a0673c5644999af6e2ea1b39529 | 415 | py | Python | olc_webportalv2/cowbat/migrations/0014_sequencingrun_progress.py | OLC-Bioinformatics/olc_genomics_portal | d70ec669a3a49106f8290fff5dee089726259a23 | [
"MIT"
] | 3 | 2019-01-03T21:22:21.000Z | 2019-04-23T15:47:29.000Z | olc_webportalv2/cowbat/migrations/0014_sequencingrun_progress.py | OLC-Bioinformatics/olc_genomics_portal | d70ec669a3a49106f8290fff5dee089726259a23 | [
"MIT"
] | 49 | 2019-01-03T18:15:12.000Z | 2022-03-11T23:37:20.000Z | olc_webportalv2/cowbat/migrations/0014_sequencingrun_progress.py | OLC-Bioinformatics/olc_webportalv2 | d70ec669a3a49106f8290fff5dee089726259a23 | [
"MIT"
] | 58 | 2019-01-03T21:21:59.000Z | 2021-11-02T18:00:20.000Z | # Generated by Django 2.1.5 on 2019-09-06 14:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cowbat', '0013_auto_20190823_1932'),
]
operations = [
migrations.AddField(
model_name='sequencingrun',
name='progress',
field=models.CharField(default='Unprocessed', max_length=64),
),
]
| 21.842105 | 73 | 0.614458 |
a1719eda6c997d015daa0c5453cf88f63315b391 | 25,973 | py | Python | atnresilience/create_atn_db.py | TimKrash/airport-viz | 9a56ae2fd6c7dcc508ca722120e8d5860860d816 | [
"Unlicense"
] | null | null | null | atnresilience/create_atn_db.py | TimKrash/airport-viz | 9a56ae2fd6c7dcc508ca722120e8d5860860d816 | [
"Unlicense"
] | null | null | null | atnresilience/create_atn_db.py | TimKrash/airport-viz | 9a56ae2fd6c7dcc508ca722120e8d5860860d816 | [
"Unlicense"
] | null | null | null | import os
from datetime import datetime as datetime
from sqlalchemy import create_engine
import sqlite3
import pandas as pd
# print(os.getcwd())
import db_tools as DBTools
# if __name__ == "__main__":
# import db_tools as DBTools
# else:
# import atnresilience.db_tools as DBTools
class ATNLoader(DBTools.DBLoader):
def __init__(self, db_path = None, raw_path = None):
"""
Initialize common parameters used for the database and inputs.
If none given, they are defaulted to their expected locations within the repository.
Default Database Location: /data/processed/atn_db.sqlite
Default Raw Data Folder: /data/raw/
Parameters
----------
db_path: string
Path to the database
raw_path: string
Path to folder containing raw data
Returns
----------
None
"""
root_dir = os.path.abspath(os.path.join(os.getcwd(),".."))
if db_path:
self.db_path = db_path
else:
self.db_path = os.path.join(root_dir,'data','processed','atn_db.sqlite')
if raw_path:
self.raw_path = raw_path
else:
self.raw_path = os.path.join(root_dir,'data','raw','')
def col_parse(self, cols):
"""
Creates the columns string which will be used to create the db,
since we are taking a list of columns, the list must be converted
to a string including the data type to be used as a SQL command.
The function has all possible column names and will give them the
approriate type. If name is given that is not a column in the data,
it will return "Columns does not exist in data".
Parameters
----------
cols : list
Specify the column names to import from the raw data.
Returns
-------
db_cols : string
The columns with type in the db.
Notes
-----
Since SQL command to take the columns is a string that requires data types and the pandas command
(used in insert_data function) to import the db data is a list, the cols list needs to be converted
to the SQL command which is handled by the col_parse function
"""
db_cols = ""
int_not_null = ['Quarter','Month','Day_of_Month','Day_Of_Week','Airline_ID','Origin_Airport_ID','Origin_Airport_Seq_ID','Origin_Market_ID','OriginWac','Destination_Airport_ID','DestAirportSeqID','Destination_Market_ID','Can_Status']
int_null = ['Year','Flight_Number','Day_Of_Year','Origin_State_Fips','Destination_State_Fips','Dest_Wac','Dep_Delay','Pos_Dep_Delay','Dep_Del_15','Departure_Delay_Groups','Taxi_Out','Taxi_In','Arr_Delay','Pos_Arr_Delay','Arr_Del_15','Arrival_Delay_Minutes','Arr_Del_15','Arrival_Delay_Groups','Div_Status','Scheduled_Elapsed_Time','Actual_Elapsed_Time','Air_Time','Flights','Distance','Distance_Group','Carrier_Delay','Weather_Delay','Natl_Airspace_System_Delay','Security_Delay','Late_Aircraft_Delay','Total_Add_G_Time','Longest_Add_G_Time','Div_Airport_Landings','Div_Landing_Status','Div_Elapsed_Time','Div_Arrival_Delay','Div_Distance','Div_Airport_1_ID','Div_Airport_1_Seq_ID','Div_1_Total_G_Time','Div_1_Longest_G_Time','Div_Airport_2_ID','Div_Airport_2_ID','Div_2_Total_G_Time','Div_2_Longest_G_Time','Div_Airport_3_ID','Div_Airport_3_Seq_ID','Div_3_Total_G_Time','Div_3_Longest_G_Time','Div_Airport_4_ID','Div_Airport_4_Seq_ID','Div_4_Total_G_Time','Div_4_Longest_G_Time','Div_Airport_5_ID','Div_Airport_5_Seq_ID','Div_5_Total_G_Time','Div_5_Longest_G_Time','Combined_Arr_Delay']
date_not_null = ['Flight_Date']
time_not_null = ['Scheduled_Dep_Time','Scheduled_Arr_Time']
var_10_null = ['Unique_Carrier_ID','Carrier','Origin_Airport_Code','Origin_State','Destination_Airport_Code','Actual_Dep_Time']
var_10_not_null = ['Dest_State']
var_45_null = ['Tail_Number','Origin_City_Name','Origin_State_Name','Dest_City_Name','Dest_State_Name','Dep_Time_Blk','Wheels_Off','Wheels_On','Actual_Arr_Time','Arr_Time_Blk','Can_Reason','First_Dep_Time','Div_Airport_1','Div_1_Wheels_On','Div_1_Wheels_Off','Div_1_Tail_Num','Div_Airport_2','Div_2_Wheels_On','Div_2_Wheels_Off','Div_2_Tail_Num','Div_Airport_3','Div_3_Wheels_On','Div_3_Wheels_Off','Div_3_Tail_Num','Div_Airport_4','Div_4_Wheels_On','Div_4_Wheels_Off','Div_4_Tail_Num','Div_Airport_5','Div_5_Wheels_On','Div_5_Wheels_Off','Div_5_Tail_Num']
#Read the provided cols list and create the string for the db columns while appending the data types. If a column name is given but it not a column given in the raw data, it will print does not exist.
for line in cols:
if line in int_not_null:
db_cols = db_cols + ",'" + line + "' INT NULL"
elif line in int_null:
db_cols = db_cols + ",'" + line + "' INT"
elif line in date_not_null:
db_cols = db_cols + ",'" + line + "' DATE NULL"
elif line in time_not_null:
db_cols = db_cols + ",'" + line + "' TIME NULL"
elif line in var_10_null:
db_cols = db_cols + ",'" + line + "' VARCHAR(10) NULL"
elif line in var_10_not_null:
db_cols = db_cols + ",'" + line + "' VARCHAR(10) NULL"
elif line in var_45_null:
db_cols = db_cols + ",'" + line + "' VARCHAR(45)"
else:
print("Column %s does not exist in data." %line)
#Return 1: because the first item will be a comma which we will want to omit
return(db_cols[1:])
def create_db(self):
"""
Creates the table atn_performance in the database at the specified input location if one does not exist.
Parameters
----------
db_path: string
Path to the location of the atn database
cols: list
A list of column titles that will be created on the SQL table
Returns
-------
Creates a db at the give path.
If one already exists, no action will be taken.
Notes
-----
"""
#Specify columns for the database table based on data used. Does not need to be changed
cols = ["Year", "Flight_Date", "Day_Of_Year", "Unique_Carrier_ID", "Airline_ID", "Tail_Number", "Flight_Number",
"Origin_Airport_ID", "Origin_Market_ID", "Origin_Airport_Code", "Origin_State", "Destination_Airport_ID",
"Destination_Market_ID", "Destination_Airport_Code", "Dest_State", "Scheduled_Dep_Time", "Actual_Dep_Time",
"Dep_Delay", "Pos_Dep_Delay", "Scheduled_Arr_Time", "Actual_Arr_Time", "Arr_Delay", "Pos_Arr_Delay",
"Combined_Arr_Delay", "Can_Status", "Can_Reason", "Div_Status", "Scheduled_Elapsed_Time",
"Actual_Elapsed_Time", "Carrier_Delay", "Weather_Delay", "Natl_Airspace_System_Delay", "Security_Delay",
"Late_Aircraft_Delay", "Div_Airport_Landings", "Div_Landing_Status", "Div_Elapsed_Time", "Div_Arrival_Delay",
"Div_Airport_1_ID", "Div_1_Tail_Num", "Div_Airport_2_ID", "Div_2_Tail_Num", "Div_Airport_3_ID", "Div_3_Tail_Num",
"Div_Airport_4_ID", "Div_4_Tail_Num", "Div_Airport_5_ID", "Div_5_Tail_Num"]
db_cols = self.col_parse(cols)
sql = '''
CREATE TABLE IF NOT EXISTS atn_performance (
%s,
UNIQUE(Flight_Date, Origin_Airport_ID, Unique_Carrier_ID, Flight_Number) ON CONFLICT REPLACE
)
'''%(db_cols,)
# Execute the SQL statement
self.db_query(sql)
def import_data(self, year):
"""
Imports the data for a specified year (all 12 months) to the database.
Fixed columns are imported. This is based on the project spec.
Parameters
----------
db_path: string
Path to the location of the atn database
raw_path: string
Path to the location of the raw data folder
year: int
Year of data to import
Returns
-------
Does not return any specific data, but will return prompts when finished running.
Notes
-----
"""
#Take the raw csv file for each month, process it based on parameters, and append it to the dataframe to be exported to the db.
import_cols = ['FL_DATE', 'UNIQUE_CARRIER', 'OP_UNIQUE_CARRIER', 'AIRLINE_ID', 'OP_CARRIER_AIRLINE_ID',
'TAIL_NUM', 'FL_NUM', 'OP_CARRIER_FL_NUM', 'ORIGIN_AIRPORT_ID', 'ORIGIN_CITY_MARKET_ID', 'ORIGIN',
'ORIGIN_STATE_ABR', 'DEST_AIRPORT_ID', 'DEST_CITY_MARKET_ID', 'DEST', 'DEST_STATE_ABR', 'CRS_DEP_TIME',
'DEP_TIME', 'DEP_DELAY', 'DEP_DELAY_NEW', 'CRS_ARR_TIME', 'ARR_TIME', 'ARR_DELAY', 'ARR_DELAY_NEW', 'CANCELLED',
'CANCELLATION_CODE', 'DIVERTED', 'CRS_ELAPSED_TIME', 'ACTUAL_ELAPSED_TIME', 'CARRIER_DELAY', 'WEATHER_DELAY',
'NAS_DELAY','SECURITY_DELAY', 'LATE_AIRCRAFT_DELAY', 'DIV_AIRPORT_LANDINGS', 'DIV_REACHED_DEST', 'DIV_ACTUAL_ELAPSED_TIME',
'DIV_ARR_DELAY', 'DIV1_AIRPORT_ID', 'DIV1_TAIL_NUM', 'DIV2_AIRPORT_ID', 'DIV2_TAIL_NUM', 'DIV3_AIRPORT_ID', 'DIV3_TAIL_NUM',
'DIV4_AIRPORT_ID', 'DIV4_TAIL_NUM', 'DIV5_AIRPORT_ID', 'DIV5_TAIL_NUM']
import_dict = {'FL_DATE': 'Flight_Date',
'UNIQUE_CARRIER': 'Unique_Carrier_ID',
'OP_UNIQUE_CARRIER': 'Unique_Carrier_ID',
'AIRLINE_ID': 'Airline_ID',
'OP_CARRIER_AIRLINE_ID': 'Airline_ID',
'TAIL_NUM': 'Tail_Number',
'FL_NUM': 'Flight_Number',
'OP_CARRIER_FL_NUM': 'Flight_Number',
'ORIGIN_AIRPORT_ID': 'Origin_Airport_ID',
'ORIGIN_CITY_MARKET_ID': 'Origin_Market_ID',
'ORIGIN': 'Origin_Airport_Code',
'ORIGIN_STATE_ABR': 'Origin_State',
'DEST_AIRPORT_ID': 'Destination_Airport_ID',
'DEST_CITY_MARKET_ID': 'Destination_Market_ID',
'DEST': 'Destination_Airport_Code',
'DEST_STATE_ABR': 'Dest_State',
'CRS_DEP_TIME': 'Scheduled_Dep_Time',
'DEP_TIME': 'Actual_Dep_Time',
'DEP_DELAY': 'Dep_Delay',
'DEP_DELAY_NEW': 'Pos_Dep_Delay',
'CRS_ARR_TIME': 'Scheduled_Arr_Time',
'ARR_TIME': 'Actual_Arr_Time',
'ARR_DELAY': 'Arr_Delay',
'ARR_DELAY_NEW': 'Pos_Arr_Delay',
'CANCELLED': 'Can_Status',
'CANCELLATION_CODE': 'Can_Reason',
'DIVERTED': 'Div_Status',
'CRS_ELAPSED_TIME': 'Scheduled_Elapsed_Time',
'ACTUAL_ELAPSED_TIME': 'Actual_Elapsed_Time',
'CARRIER_DELAY': 'Carrier_Delay',
'WEATHER_DELAY': 'Weather_Delay',
'NAS_DELAY': 'Natl_Airspace_System_Delay',
'SECURITY_DELAY': 'Security_Delay',
'LATE_AIRCRAFT_DELAY': 'Late_Aircraft_Delay',
'DIV_AIRPORT_LANDINGS': 'Div_Airport_Landings',
'DIV_REACHED_DEST': 'Div_Landing_Status',
'DIV_ACTUAL_ELAPSED_TIME': 'Div_Elapsed_Time',
'DIV_ARR_DELAY': 'Div_Arrival_Delay',
'DIV1_AIRPORT_ID': 'Div_Airport_1_ID',
'DIV1_TAIL_NUM': 'Div_1_Tail_Num',
'DIV2_AIRPORT_ID': 'Div_Airport_2_ID',
'DIV2_TAIL_NUM': 'Div_2_Tail_Num',
'DIV3_AIRPORT_ID': 'Div_Airport_3_ID',
'DIV3_TAIL_NUM': 'Div_3_Tail_Num',
'DIV4_AIRPORT_ID': 'Div_Airport_4_ID',
'DIV4_TAIL_NUM': 'Div_4_Tail_Num',
'DIV5_AIRPORT_ID': 'Div_Airport_5_ID',
'DIV5_TAIL_NUM': 'Div_5_Tail_Num'}
#Loop through the csv for each month of the year.
#During each iteration, create the Combined_Arr_Delay column and make sure that the CRS Dep and ARR columns follow the 0000 format. In each loop, the data from the month will be added to the db
for i in range(1,13):
csv_import_cols = []
month = str(year*100+i)
csv_cols = list(pd.read_csv('%s%s.csv' %(self.raw_path,month)).columns.values)
#For every column header in the csv, check if it is a column to be imported (import_col)
for column in csv_cols:
if column in import_cols:
csv_import_cols.append(column)
else:
continue
raw_file = '%s%s.csv' %(self.raw_path,month)
import_df = pd.read_csv(raw_file, low_memory=False, usecols = csv_import_cols)
import_df['Combined_Arr_Delay'] = import_df[['ARR_DELAY', 'DIV_ARR_DELAY']].max(axis=1)
if "CRS_DEP_TIME" in import_cols:
import_df.loc[import_df.CRS_DEP_TIME == 2400, 'CRS_DEP_TIME'] = 0000
import_df.CRS_DEP_TIME = import_df.CRS_DEP_TIME.astype(str).str.zfill(4)
if "CRS_ARR_TIME" in import_cols:
import_df.loc[import_df.CRS_ARR_TIME == 2400, 'CRS_ARR_TIME'] = 0000
import_df.CRS_ARR_TIME = import_df.CRS_ARR_TIME.astype(str).str.zfill(4)
#df_all = pd.concat([df_all,import_df],ignore_index=True)
#df_all.append(import_df, ignore_index=True)
import_df.rename(columns=import_dict, inplace=True) #change all the col names
import_df['Day_Of_Year'] = pd.to_datetime(import_df.Flight_Date.astype(str) + ' ' + import_df.Scheduled_Dep_Time.astype(str)).dt.dayofyear
import_df['Year'] = year
# Load to database
self.df_to_db('atn_performance', import_df)
print("Finished inserting month %s to DB." %(i,))
print("Finished inserting data for year %s" %(year,))
#After the df_all dataframe is created fro the whole year, change the column names based on rename_cols
#renamed_cols = ["Flight_Date", "Date_Time", "Unique_Carrier_ID", "Airline_ID", "Tail_Number", "Flight_Number", "Origin_Airport_ID", "Origin_Market_ID ", "Origin_Airport_Code", "Origin_State", "Destination_Airport_ID", "Destination_Market_ID", "Destination_Airport_Code", "Dest_State", "Scheduled_Dep_Time", "Actual_Dep_Time", "Dep_Delay", "Pos_Dep_Delay", "Scheduled_Arr_Time", "Actual_Arr_Time", "Arr_Delay", "Pos_Arr_Delay", "Can_Status", "Can_Reason", "Div_Status", "Scheduled_Elapsed_Time", "Actual_Elapsed_Time", "Carrier_Delay", "Weather_Delay", "Natl_Airspace_System_Delay","Security_Delay", "Late_Aircraft_Delay", "Div_Airport_Landings", "Div_Landing_Status", "Div_Elapsed_Time", "Div_Arrival_Delay", "Div_Airport_1_ID", "Div_1_Tail_#", "Div_Airport_2_ID", "Div_2_Tail_#", "Div_Airport_3_ID", "Div_3_Tail_#", "Div_Airport_4_ID", "Div_4_Tail_#", "Div Airport_5_ID", "Div_5_Tail_#"]
class CoordinateLoader(DBTools.DBLoader):
def __init__(self, db_path = None, raw_path = None):
"""
Initialize common parameters used for the coordinate database and inputs.
If none given, they are defaulted to their expected locations within the repository.
Default Database Location: /data/processed/atn_db.sqlite
Default Raw Data Folder: /data/raw/
Parameters
----------
db_path: string
Path to the database
Returns
----------
None
Performs the query passed
"""
root_dir = os.path.abspath(os.path.join(os.getcwd(),".."))
if db_path:
self.db_path = db_path
else:
self.db_path = os.path.join(root_dir,'data','processed','atn_db.sqlite')
if raw_path:
self.raw_path = raw_path
else:
self.raw_path = os.path.join(root_dir,'data','raw','')
def create_coords_table(self):
"""
Creates the table airportCoords in the database at the specified input location if one does not exist.
Data can be downloaded from openflights: https://openflights.org/data.html
The extended dataset is used as some airports that appear in BTS data is not in the "clean" set.
Parameters
----------
None
Returns
-------
None
Creates a db at the give path.
If one already exists, no action will be taken.
"""
##Change column names to match the ones from csv
query = '''
CREATE TABLE IF NOT EXISTS airportCoords(
IATA TEXT,
lat DECIMAL,
long DECIMAL,
UNIQUE(IATA) ON CONFLICT REPLACE)
'''
self.db_query(query)
def import_coords_data(self):
"""
Imports the airport coordinate data to the database.
Parameters
----------
db_path: string
Path to the location of the atn database
raw_path: string
Path to the location of the raw data folder
year: int
Year of data to import
Returns
-------
Does not return any specific data, but will return prompts when finished running.
Notes
-----
"""
import_cols = ['IATA', 'lat', 'long']
load_file_path = os.path.join(self.raw_path,'airport_data.csv')
coord_df = pd.read_csv(load_file_path, usecols = import_cols)
self.df_to_db('airportCoords', coord_df)
class ACDataLoader(DBTools.DBLoader):
"""
Create and load a database for US aircraft registration data from the FAA N-Number registry database.
"""
def __init__(self, db_path = None, raw_path = None):
"""
Initialize common parameters used for the database and inputs.
If none given, they are defaulted to their expected locations within the repository.
Default Database Location: /data/processed/atn_db.sqlite
Default Raw Data Folder: /data/raw/
Parameters
----------
db_path: string
Path to the database
raw_path: string
Path to folder containing raw data
Returns
----------
None
"""
root_dir = os.path.abspath(os.path.join(os.getcwd(),".."))
if db_path:
self.db_path = db_path
else:
self.db_path = os.path.join(root_dir,'data','processed','atn_db.sqlite')
if raw_path:
self.raw_path = raw_path
else:
self.raw_path = os.path.join(root_dir,'data','raw','')
def create_registry_table(self):
"""
Creates the table nnum_master in the database at the specified input location if one does not exist.
The nnum_mater table contains the data from MASTER from the FAA N-Number registry that provides
data on each releaseable aircraft registered with United States FAA and associated airframe and engine
parameters such as the aircraft type, owner, airworthiness, productino year, and location of registration.
The FAA data can be obtained from:
https://www.faa.gov/licenses_certificates/aircraft_certification/aircraft_registry/releasable_aircraft_download/
Parameters
----------
None
Returns
-------
None
Creates a db at the give path.
If one already exists, no action will be taken.
"""
query = '''
CREATE TABLE IF NOT EXISTS nnum_master(
N_NUMBER TEXT,
NAME TEXT,
MFR_CODE TEXT,
SERIAL_NUM TEXT,
ISSUE_DATE TEXT,
UNIQUE(N_NUMBER) ON CONFLICT REPLACE
)
'''
self.db_query(query)
def create_dereg_table(self):
"""
Creates the table nnum_dereg in the database at the specified input location if one does not exist.
The nnum_mater table contains the data from MASTER from the FAA N-Number registry that provides
data on each releaseable aircraft registered with United States FAA and associated airframe and engine
parameters such as the aircraft type, owner, airworthiness, productino year, and location of registration.
The FAA data can be obtained from:
https://www.faa.gov/licenses_certificates/aircraft_certification/aircraft_registry/releasable_aircraft_download/
Parameters
----------
None
Returns
-------
None
Creates a db at the give path.
If one already exists, no action will be taken.
"""
query = '''
CREATE TABLE IF NOT EXISTS nnum_dereg(
N_NUMBER TEXT,
NAME TEXT,
MFR_CODE TEXT,
SERIAL_NUM TEXT,
ISSUE_DATE TEXT,
CANCEL_DATE TEXT,
UNIQUE(N_NUMBER) ON CONFLICT REPLACE
)
'''
self.db_query(query)
def create_ac_ref_table(self):
'''
Creates the table ac_ref in the specified input location if one does not exist.
The ac_ref table contains the data from ACTREF from the FAA N-Number registry that provides
data on each aircraft type such as the manufacturer, speed, and number of seats.
'''
query = '''
CREATE TABLE IF NOT EXISTS ac_ref(
CODE TEXT,
NO_SEATS INTEGER,
MFR TEXT,
MODEL TEXT,
AC_WEIGHT TEXT,
SPEED INT,
TYPE_AC INT,
UNIQUE(CODE) ON CONFLICT REPLACE
)
'''
self.db_query(query)
def import_current_nnum_data(self):
"""
Imports the FAA n-number registry and ACTREF data into the database tables.
The files MASTER.txt and ACTREF.txt must be in the data/raw/ folder.
If the issue date in the MASTER is empty, it will be filled with 1990-01-01.
"""
nnum_col_dict = {
'N-Number' : 'N_NUMBER',
'MFR MDL Code' : 'MFR_CODE',
'Serial Number' : 'SERIAL_NUM',
'Cert Issue Date' : 'ISSUE_DATE'
}
nnum_import_cols = ['N-Number', 'MFR MDL Code', 'Serial Number','Cert Issue Date']
nnum_load_file_path = os.path.join(self.raw_path,'MASTER.txt')
nnum_df = pd.read_csv(nnum_load_file_path,usecols=nnum_import_cols)#
nnum_df.rename(columns=nnum_col_dict,inplace=True)
# Cleanup and prepend N to N number
nnum_df['N_NUMBER'] = 'N' + nnum_df['N_NUMBER'].apply(lambda x: x.strip())
# fill empty issue dates with Jan 1 1990
# nnum_df['ISSUE_DATE'] = nnum_df['ISSUE_DATE'].apply(lambda x: '19900101' if x.strip() == '' else x)
nnum_df['ISSUE_DATE'] = nnum_df['ISSUE_DATE'].apply(
lambda x: '1990-01-01' if x.strip() == '' else datetime.strptime(x,'%Y%m%d').strftime('%Y-%m-%d'))
# nnum_df['ISSUE_DATE'] = nnum_df['ISSUE_DATE'].apply(
# lambda x: datetime.strptime(x,'%Y%m%d').strftime('%Y-%m-%d'))
self.df_to_db('nnum_master',nnum_df)
def import_dereg_nnum_data(self):
"""
Imports the FAA deregistered n-number registry into the database tables.
The file DEREG.txt must be in the data/raw/ folder.
If the issue date in the DEREG file is empty, it will be filled with 1900-01-01.
If the cancel date in the DEREG file is empty, it will be filled with 1990-01-01.
"""
nnum_col_dict = {
'N-NUMBER' : 'N_NUMBER',
'MFR-MDL-CODE' : 'MFR_CODE',
'SERIAL-NUMBER' : 'SERIAL_NUM',
'CERT-ISSUE-DATE' : 'ISSUE_DATE',
'CANCEL-DATE' : 'CANCEL_DATE'
}
nnum_import_cols = ['N-NUMBER', 'MFR-MDL-CODE', 'SERIAL-NUMBER','CERT-ISSUE-DATE','CANCEL-DATE']
nnum_load_file_path = os.path.join(self.raw_path,'DEREG.txt')
nnum_df = pd.read_csv(nnum_load_file_path,usecols=nnum_import_cols)
nnum_df.rename(columns=nnum_col_dict,inplace=True)
# Cleanup
nnum_df['N_NUMBER'] = 'N' + nnum_df['N_NUMBER'].apply(lambda x: x.strip())
# Fill empty cells or convert to datetime format otherwise
nnum_df['ISSUE_DATE'] = nnum_df['ISSUE_DATE'].apply(
lambda x: '1900-01-01' if x.strip() == '' else datetime.strptime(x,'%Y%m%d').strftime('%Y-%m-%d'))
nnum_df['CANCEL_DATE'] = nnum_df['CANCEL_DATE'].apply(
lambda x: '1990-01-01' if x.strip() == '' else datetime.strptime(x,'%Y%m%d').strftime('%Y-%m-%d'))
self.df_to_db('nnum_dereg',nnum_df)
def import_acref_data(self):
"""
Imports the FAA ACTREF data into the database tables.
The file ACTREF.txt must be in the data/raw/ folder.
"""
acref_col_dict = {
'CODE' : 'CODE',
'NO-SEATS' : 'NO_SEATS',
'MFR' : 'MFR',
'MODEL' : 'MODEL',
'AC-WEIGHT' : 'AC_WEIGHT',
'SPEED' : 'SPEED',
'TYPE-ACFT' : 'TYPE_AC'
}
acref_import_cols = ['CODE', 'NO-SEATS', 'MFR', 'MODEL', 'AC-WEIGHT','SPEED','TYPE-ACFT']
acref_load_file_path = os.path.join(self.raw_path,'AcftRef.txt')
self.csv_loader(acref_load_file_path,acref_import_cols,acref_col_dict,'ac_ref')
def main():
pass
if __name__ == "__main__":
main() | 43.43311 | 1,095 | 0.607015 |
7a46d83e076f7f70f0864eb6b75d7462a1145964 | 241 | py | Python | setup.py | deeplego/wl-graph-kernels | ea046737f91017380090cb8d061efc1a44fefb5e | [
"MIT"
] | 20 | 2019-05-29T20:57:36.000Z | 2021-08-21T09:32:34.000Z | setup.py | deeplego/wl-graph-kernels | ea046737f91017380090cb8d061efc1a44fefb5e | [
"MIT"
] | null | null | null | setup.py | deeplego/wl-graph-kernels | ea046737f91017380090cb8d061efc1a44fefb5e | [
"MIT"
] | 4 | 2019-05-13T08:08:28.000Z | 2021-08-31T14:31:14.000Z | from setuptools import setup, find_packages
from wlkernel import __version__
setup(
name='wlkernel',
version=__version__,
description='Weisfeiler-Lehman kernel for RDF graphs',
packages=find_packages(exclude=['tests']),
)
| 20.083333 | 58 | 0.746888 |
c8e1833c8f013907afb20925decef986ee144f05 | 28,233 | py | Python | flexget/plugins/clients/deluge.py | vxcamiloxv/Flexget | f18e53b59b768515d8e67464b8cc41bddfc00c33 | [
"MIT"
] | null | null | null | flexget/plugins/clients/deluge.py | vxcamiloxv/Flexget | f18e53b59b768515d8e67464b8cc41bddfc00c33 | [
"MIT"
] | null | null | null | flexget/plugins/clients/deluge.py | vxcamiloxv/Flexget | f18e53b59b768515d8e67464b8cc41bddfc00c33 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import base64
import re
import sys
import logging
import os
import time
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.pathscrub import pathscrub
from flexget.utils.template import RenderError
log = logging.getLogger('deluge')
class DelugePlugin(object):
"""Base class for deluge plugins, contains settings and methods for connecting to a deluge daemon."""
def on_task_start(self, task, config):
"""Fail early if we can't import/configure the deluge client."""
self.setup_client(config)
def setup_client(self, config):
try:
from deluge_client import DelugeRPCClient
except ImportError as e:
log.debug('Error importing deluge-client: %s' % e)
raise plugin.DependencyError('deluge', 'deluge-client',
'deluge-client >=1.5 is required. `pip install deluge-client` to install.',
log)
config = self.prepare_config(config)
if config['host'] in ['localhost', '127.0.0.1'] and not config.get('username'):
# If an username is not specified, we have to do a lookup for the localclient username/password
auth = self.get_localhost_auth()
if auth and auth[0]:
config['username'], config['password'] = auth
else:
raise plugin.PluginError('Unable to get local authentication info for Deluge. You may need to '
'specify an username and password from your Deluge auth file.')
return DelugeRPCClient(config['host'], config['port'], config['username'], config['password'],
decode_utf8=True)
def prepare_config(self, config):
config.setdefault('host', 'localhost')
config.setdefault('port', 58846)
return config
@staticmethod
def get_localhost_auth():
if sys.platform.startswith('win'):
auth_file = os.path.join(os.getenv('APPDATA'), 'deluge', 'auth')
else:
auth_file = os.path.expanduser('~/.config/deluge/auth')
if not os.path.isfile(auth_file):
return None
with open(auth_file) as auth:
for line in auth:
line = line.strip()
if line.startswith('#') or not line:
# This is a comment or blank line
continue
lsplit = line.split(':')
if lsplit[0] == 'localclient':
username, password = lsplit[:2]
return username, password
class InputDeluge(DelugePlugin):
"""Create entries for torrents in the deluge session."""
# Fields we provide outside of the deluge_ prefixed namespace
settings_map = {
'name': 'title',
'hash': 'torrent_info_hash',
'num_peers': 'torrent_peers',
'num_seeds': 'torrent_seeds',
'total_size': ('content_size', lambda size: size / 1024 / 1024),
'files': ('content_files', lambda file_dicts: [f['path'] for f in file_dicts])}
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'config_path': {'type': 'string', 'format': 'path'},
'filter': {
'type': 'object',
'properties': {
'label': {'type': 'string'},
'state': {
'type': 'string',
'enum': ['active', 'downloading', 'seeding', 'queued', 'paused']
}
},
'additionalProperties': False
},
},
'additionalProperties': False
}
]
}
def on_task_start(self, task, config):
config = self.prepare_config(config)
super(InputDeluge, self).on_task_start(task, config)
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
if 'filter' in config:
filter = config['filter']
if 'label' in filter:
filter['label'] = filter['label'].lower()
if 'state' in filter:
filter['state'] = filter['state'].capitalize()
super(InputDeluge, self).prepare_config(config)
return config
def on_task_input(self, task, config):
"""Generates and returns a list of entries from the deluge daemon."""
config = self.prepare_config(config)
# Reset the entries list
client = self.setup_client(config)
client.connect()
entries = self.generate_entries(client, config)
client.disconnect()
return entries
def generate_entries(self, client, config):
entries = []
filter = config.get('filter', {})
torrents = client.call('core.get_torrents_status', filter or {}, [])
for hash, torrent_dict in torrents.items():
# Make sure it has a url so no plugins crash
entry = Entry(deluge_id=hash, url='')
config_path = os.path.expanduser(config.get('config_path', ''))
if config_path:
torrent_path = os.path.join(config_path, 'state', hash + '.torrent')
if os.path.isfile(torrent_path):
entry['location'] = torrent_path
if not torrent_path.startswith('/'):
torrent_path = '/' + torrent_path
entry['url'] = 'file://' + torrent_path
else:
log.warning('Did not find torrent file at %s', torrent_path)
for key, value in torrent_dict.items():
# All fields provided by deluge get placed under the deluge_ namespace
entry['deluge_' + key] = value
# Some fields also get special handling
if key in self.settings_map:
flexget_key = self.settings_map[key]
if isinstance(flexget_key, tuple):
flexget_key, format_func = flexget_key
value = format_func(value)
entry[flexget_key] = value
entries.append(entry)
return entries
class OutputDeluge(DelugePlugin):
"""Add the torrents directly to deluge, supporting custom save paths."""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'action': {'type': 'string', 'enum': ['add', 'remove', 'purge', 'pause', 'resume']},
'path': {'type': 'string'},
'move_completed_path': {'type': 'string'},
'label': {'type': 'string'},
'queue_to_top': {'type': 'boolean'},
'auto_managed': {'type': 'boolean'},
'max_up_speed': {'type': 'number'},
'max_down_speed': {'type': 'number'},
'max_connections': {'type': 'integer'},
'max_up_slots': {'type': 'integer'},
'ratio': {'type': 'number'},
'remove_at_ratio': {'type': 'boolean'},
'add_paused': {'type': 'boolean'},
'compact': {'type': 'boolean'},
'content_filename': {'type': 'string'},
'main_file_only': {'type': 'boolean'},
'main_file_ratio': {'type': 'number'},
'magnetization_timeout': {'type': 'integer'},
'keep_subs': {'type': 'boolean'},
'hide_sparse_files': {'type': 'boolean'},
'enabled': {'type': 'boolean'},
'container_directory': {'type': 'string'},
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
super(OutputDeluge, self).prepare_config(config)
config.setdefault('enabled', True)
config.setdefault('action', 'add')
config.setdefault('path', '')
config.setdefault('move_completed_path', '')
config.setdefault('label', '')
config.setdefault('main_file_ratio', 0.90)
config.setdefault('magnetization_timeout', 0)
config.setdefault('keep_subs', True) # does nothing without 'content_filename' or 'main_file_only' enabled
config.setdefault('hide_sparse_files', False) # does nothing without 'main_file_only' enabled
return config
def __init__(self):
self.deluge_version = None
self.options = {'max_up_speed': 'max_upload_speed', 'max_down_speed': 'max_download_speed',
'max_connections': 'max_connections', 'max_up_slots': 'max_upload_slots',
'auto_managed': 'auto_managed', 'ratio': 'stop_ratio', 'remove_at_ratio': 'remove_at_ratio',
'add_paused': 'add_paused', 'compact': 'compact_allocation'}
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate the temp files we will load into deluge
then verify they are valid torrents
"""
config = self.prepare_config(config)
if not config['enabled']:
return
# If the download plugin is not enabled, we need to call it to get our temp .torrent files
if 'download' not in task.config:
download = plugin.get('download', self)
for entry in task.accepted:
if entry.get('deluge_id'):
# The torrent is already loaded in deluge, we don't need to get anything
continue
if config['action'] != 'add' and entry.get('torrent_info_hash'):
# If we aren't adding the torrent new, all we need is info hash
continue
download.get_temp_file(task, entry, handle_magnets=True)
@plugin.priority(135)
def on_task_output(self, task, config):
"""Add torrents to deluge at exit."""
config = self.prepare_config(config)
client = self.setup_client(config)
# don't add when learning
if task.options.learn:
return
if not config['enabled'] or not (task.accepted or task.options.test):
return
client.connect()
if task.options.test:
log.debug('Test connection to deluge daemon successful.')
client.disconnect()
return
# loop through entries to get a list of labels to add
labels = set()
for entry in task.accepted:
label = entry.get('label', config.get('label'))
if label and label.lower() != 'no label':
try:
label = self._format_label(entry.render(entry.get('label', config.get('label'))))
log.debug('Rendered label: %s', label)
except RenderError as e:
log.error('Error rendering label `%s`: %s', label, e)
continue
labels.add(label)
if labels:
# Make sure the label plugin is available and enabled, then add appropriate labels
enabled_plugins = client.call('core.get_enabled_plugins')
label_enabled = 'Label' in enabled_plugins
if not label_enabled:
available_plugins = client.call('core.get_available_plugins')
if 'Label' in available_plugins:
log.debug('Enabling label plugin in deluge')
label_enabled = client.call('core.enable_plugin', 'Label')
else:
log.error('Label plugin is not installed in deluge')
if label_enabled:
d_labels = client.call('label.get_labels')
for label in labels:
if label not in d_labels:
log.debug('Adding the label `%s` to deluge', label)
client.call('label.add', label)
# add the torrents
torrent_ids = client.call('core.get_session_state')
for entry in task.accepted:
# Generate deluge options dict for torrent add
add_opts = {}
try:
path = entry.render(entry.get('path', config['path']))
if path:
add_opts['download_location'] = pathscrub(os.path.expanduser(path))
except RenderError as e:
log.error('Could not set path for %s: %s', entry['title'], e)
for fopt, dopt in self.options.items():
value = entry.get(fopt, config.get(fopt))
if value is not None:
add_opts[dopt] = value
if fopt == 'ratio':
add_opts['stop_at_ratio'] = True
# Make another set of options, that get set after the torrent has been added
modify_opts = {
'queue_to_top': entry.get('queue_to_top', config.get('queue_to_top')),
'main_file_only': entry.get('main_file_only', config.get('main_file_only', False)),
'main_file_ratio': entry.get('main_file_ratio', config.get('main_file_ratio')),
'hide_sparse_files': entry.get('hide_sparse_files', config.get('hide_sparse_files', True)),
'keep_subs': entry.get('keep_subs', config.get('keep_subs', True)),
'container_directory': config.get('container_directory', '')
}
try:
label = entry.render(entry.get('label', config['label']))
modify_opts['label'] = self._format_label(label)
except RenderError as e:
log.error('Error setting label for `%s`: %s', entry['title'], e)
try:
move_completed_path = entry.render(entry.get('move_completed_path', config['move_completed_path']))
modify_opts['move_completed_path'] = pathscrub(os.path.expanduser(move_completed_path))
except RenderError as e:
log.error('Error setting move_completed_path for %s: %s', entry['title'], e)
try:
content_filename = entry.get('content_filename', config.get('content_filename', ''))
modify_opts['content_filename'] = pathscrub(entry.render(content_filename))
except RenderError as e:
log.error('Error setting content_filename for %s: %s', entry['title'], e)
torrent_id = entry.get('deluge_id') or entry.get('torrent_info_hash')
torrent_id = torrent_id and torrent_id.lower()
if torrent_id in torrent_ids:
log.info('%s is already loaded in deluge, setting options', entry['title'])
# Entry has a deluge id, verify the torrent is still in the deluge session and apply options
# Since this is already loaded in deluge, we may also need to change the path
modify_opts['path'] = add_opts.pop('download_location', None)
client.call('core.set_torrent_options', [torrent_id], add_opts)
self._set_torrent_options(client, torrent_id, entry, modify_opts)
elif config['action'] != 'add':
log.warning('Cannot %s %s, because it is not loaded in deluge.', config['action'], entry['title'])
continue
else:
magnet, filedump = None, None
if entry.get('url', '').startswith('magnet:'):
magnet = entry['url']
else:
if not os.path.exists(entry['file']):
entry.fail('Downloaded temp file \'%s\' doesn\'t exist!' % entry['file'])
del (entry['file'])
return
with open(entry['file'], 'rb') as f:
filedump = base64.encodestring(f.read())
log.verbose('Adding %s to deluge.', entry['title'])
added_torrent = None
if magnet:
added_torrent = client.call('core.add_torrent_magnet', magnet, add_opts)
if config.get('magnetization_timeout'):
timeout = config['magnetization_timeout']
log.verbose('Waiting %d seconds for "%s" to magnetize', timeout, entry['title'])
for _ in range(timeout):
time.sleep(1)
try:
status = client.call('core.get_torrent_status', torrent_id, ['files'])
except Exception as err:
log.error('wait_for_metadata Error: %s', err)
break
if status.get('files'):
log.info('"%s" magnetization successful', entry['title'])
break
else:
log.warning('"%s" did not magnetize before the timeout elapsed, '
'file list unavailable for processing.', entry['title'])
else:
try:
added_torrent = client.call('core.add_torrent_file', entry['title'], filedump, add_opts)
except Exception as e:
log.info('%s was not added to deluge! %s', entry['title'], e)
entry.fail('Could not be added to deluge')
if not added_torrent:
log.error('There was an error adding %s to deluge.' % entry['title'])
else:
self._set_torrent_options(client, added_torrent, entry, modify_opts)
if config['action'] in ('remove', 'purge'):
client.call('core.remove_torrent', torrent_id, config['action'] == 'purge')
elif config['action'] == 'pause':
client.call('core.pause_torrent', [torrent_id])
elif config['action'] == 'resume':
client.call('core.resume_torrent', [torrent_id])
client.disconnect()
def on_task_learn(self, task, config):
""" Make sure all temp files are cleaned up when entries are learned """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get('download', self)
download.cleanup_temp_files(task)
def on_task_abort(self, task, config):
"""Make sure normal cleanup tasks still happen on abort."""
self.on_task_learn(task, config)
def _format_label(self, label):
"""Makes a string compliant with deluge label naming rules"""
# "No Label" is a special identifier to unset a label
if label.lower() == 'no label':
return 'No Label'
return re.sub('[^\w-]+', '_', label.lower())
def _set_torrent_options(self, client, torrent_id, entry, opts):
"""Gets called when a torrent was added to the daemon."""
log.info('%s successfully added to deluge.', entry['title'])
entry['deluge_id'] = torrent_id
if opts.get('move_completed_path'):
client.call('core.set_torrent_move_completed', torrent_id, True)
client.call('core.set_torrent_move_completed_path', torrent_id, opts['move_completed_path'])
log.debug('%s move on complete set to %s', entry['title'], opts['move_completed_path'])
if opts.get('label'):
client.call('label.set_torrent', torrent_id, opts['label'])
if opts.get('queue_to_top') is not None:
if opts['queue_to_top']:
client.call('core.queue_top', [torrent_id])
log.debug('%s moved to top of queue', entry['title'])
else:
client.call('core.queue_bottom', [torrent_id])
log.debug('%s moved to bottom of queue', entry['title'])
status_keys = ['files', 'total_size', 'save_path', 'move_on_completed_path', 'move_on_completed', 'progress']
status = client.call('core.get_torrent_status', torrent_id, status_keys)
# Determine where the file should be
move_now_path = None
if opts.get('move_completed_path'):
if status['progress'] == 100:
move_now_path = opts['move_completed_path']
else:
# Deluge will unset the move completed option if we move the storage, forgo setting proper
# path, in favor of leaving proper final location.
log.debug('Not moving storage for %s, as this will prevent move_completed_path.', entry['title'])
elif opts.get('path'):
move_now_path = opts['path']
if move_now_path and os.path.normpath(move_now_path) != os.path.normpath(status['save_path']):
log.debug('Moving storage for %s to %s', entry['title'], move_now_path)
client.call('core.move_storage', [torrent_id], move_now_path)
big_file_name = ''
if opts.get('content_filename') or opts.get('main_file_only'):
# find a file that makes up more than main_file_ratio (default: 90%) of the total size
main_file = None
for file in status['files']:
if file['size'] > (status['total_size'] * opts.get('main_file_ratio')):
main_file = file
break
def file_exists(filename):
# Checks the download path as well as the move completed path for existence of the file
if os.path.exists(os.path.join(status['save_path'], filename)):
return True
elif status.get('move_on_completed') and status.get('move_on_completed_path'):
if os.path.exists(os.path.join(status['move_on_completed_path'], filename)):
return True
else:
return False
def unused_name(name):
# If on local computer, tries appending a (#) suffix until a unique filename is found
if client.host in ['127.0.0.1', 'localhost']:
counter = 2
while file_exists(name):
name = ''.join([os.path.splitext(name)[0],
" (", str(counter), ')',
os.path.splitext(name)[1]])
counter += 1
else:
log.debug('Cannot ensure content_filename is unique when adding to a remote deluge daemon.')
return name
def rename(file, new_name):
# Renames a file in torrent
client.call('core.rename_files', torrent_id, [(file['index'], new_name)])
log.debug('File %s in %s renamed to %s', file['path'], entry['title'], new_name)
if main_file is not None:
# proceed with renaming only if such a big file is found
# find the subtitle file
keep_subs = opts.get('keep_subs')
sub_file = None
if keep_subs:
sub_exts = [".srt", ".sub"]
for file in status['files']:
ext = os.path.splitext(file['path'])[1]
if ext in sub_exts:
sub_file = file
break
# check for single file torrents so we dont add unnecessary folders
top_files_dir = "/"
if os.path.dirname(main_file['path']) is not ("" or "/"):
# check for top folder in user config
if opts.get('content_filename') and os.path.dirname(opts['content_filename']) is not "":
top_files_dir = os.path.dirname(opts['content_filename']) + "/"
else:
top_files_dir = os.path.dirname(main_file['path']) + "/"
if opts.get('content_filename'):
# rename the main file
big_file_name = (top_files_dir +
os.path.basename(opts['content_filename']) +
os.path.splitext(main_file['path'])[1])
big_file_name = unused_name(big_file_name)
rename(main_file, big_file_name)
# rename subs along with the main file
if sub_file is not None and keep_subs:
sub_file_name = (os.path.splitext(big_file_name)[0] +
os.path.splitext(sub_file['path'])[1])
rename(sub_file, sub_file_name)
if opts.get('main_file_only'):
# download only the main file (and subs)
file_priorities = [1 if f == main_file or f == sub_file and keep_subs else 0
for f in status['files']]
client.call('core.set_torrent_file_priorities', torrent_id, file_priorities)
if opts.get('hide_sparse_files'):
# hide the other sparse files that are not supposed to download but are created anyway
# http://dev.deluge-torrent.org/ticket/1827
# Made sparse files behave better with deluge http://flexget.com/ticket/2881
sparse_files = [f for f in status['files']
if f != main_file and (f != sub_file or not keep_subs)]
rename_pairs = [(f['index'],
top_files_dir + ".sparse_files/" + os.path.basename(f['path']))
for f in sparse_files]
client.call('core.rename_files', torrent_id, rename_pairs)
else:
log.warning('No files in "%s" are > %d%% of content size, no files renamed.', entry['title'],
opts.get('main_file_ratio') * 100)
container_directory = pathscrub(entry.render(entry.get('container_directory',
opts.get('container_directory', ''))))
if container_directory:
if big_file_name:
folder_structure = big_file_name.split(os.sep)
elif len(status['files']) > 0:
folder_structure = status['files'][0]['path'].split(os.sep)
else:
folder_structure = []
if len(folder_structure) > 1:
log.verbose('Renaming Folder %s to %s', folder_structure[0], container_directory)
client.call('core.rename_folder', torrent_id, folder_structure[0], container_directory)
else:
log.debug('container_directory specified however the torrent %s does not have a directory structure; '
'skipping folder rename', entry['title'])
@event('plugin.register')
def register_plugin():
plugin.register(InputDeluge, 'from_deluge', api_ver=2)
plugin.register(OutputDeluge, 'deluge', api_ver=2)
| 48.344178 | 118 | 0.533489 |
cf6c421c8bd1e1c3830f0d0e47cd7f4d4eb08d07 | 102 | py | Python | buildout/build.py | Hejtman/astro-empires | c334e9aa8b9c6eab183f9b0fd98eb042f7b7ab79 | [
"MIT"
] | null | null | null | buildout/build.py | Hejtman/astro-empires | c334e9aa8b9c6eab183f9b0fd98eb042f7b7ab79 | [
"MIT"
] | null | null | null | buildout/build.py | Hejtman/astro-empires | c334e9aa8b9c6eab183f9b0fd98eb042f7b7ab79 | [
"MIT"
] | null | null | null | import json
from game import Game
game = Game(85, 4, 4, 3, 1, 1, 'J14:54:16:41')
game.save('data')
| 12.75 | 46 | 0.627451 |
a1fdc3690ad865dd91536275f04e0ca895b3ae1f | 739 | py | Python | gammapy/scripts/tests/test_all.py | joleroi/gammapy | c4e0c4bd74c79d30e0837559d18b7a1a269f70d9 | [
"BSD-3-Clause"
] | null | null | null | gammapy/scripts/tests/test_all.py | joleroi/gammapy | c4e0c4bd74c79d30e0837559d18b7a1a269f70d9 | [
"BSD-3-Clause"
] | null | null | null | gammapy/scripts/tests/test_all.py | joleroi/gammapy | c4e0c4bd74c79d30e0837559d18b7a1a269f70d9 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.tests.helper import pytest
from ...utils.scripts import get_all_main_functions
SCRIPTS = get_all_main_functions()
NAMES = sorted(SCRIPTS.keys())
@pytest.mark.parametrize("name", NAMES)
def test_help(name):
"""Test that --help works for all scripts."""
# main = SCRIPTS[name].resolve()
main = SCRIPTS[name]
with pytest.raises(SystemExit) as exc:
main(['--help'])
# TODO: how to assert that it ran OK?
# Assert exit code or what was printed to sys.stdout?
# print(exc.value)
# assert exc.value == SystemExit(0)
| 32.130435 | 66 | 0.687415 |
0cc981f379c03f59861f1c601da5a2e1adc407ca | 193 | py | Python | versions/extra_editions/Aquaman/documentation/gen_a_quick_passwd.py | flipchan/LayerProx | 345e6905b8f36b3a11e096d6c3a73ea0ebf69b24 | [
"Apache-2.0"
] | 13 | 2016-09-28T23:59:25.000Z | 2021-06-28T00:55:08.000Z | extra_editions/Aquaman/documentation/gen_a_quick_passwd.py | flipchan/LayerProx | 345e6905b8f36b3a11e096d6c3a73ea0ebf69b24 | [
"Apache-2.0"
] | 1 | 2016-10-11T17:40:33.000Z | 2016-10-11T17:40:33.000Z | versions/extra_editions/Aquaman/documentation/gen_a_quick_passwd.py | flipchan/LayerProx | 345e6905b8f36b3a11e096d6c3a73ea0ebf69b24 | [
"Apache-2.0"
] | 1 | 2016-10-10T16:18:41.000Z | 2016-10-10T16:18:41.000Z | from os import urandom
import base64
mynumber = 81 #change this if needed
myl = base64.b64encode(urandom(mynumber))
print 'ur generated key with ' + str(mynumber) + 'chars is: ' + str(myl)
| 21.444444 | 73 | 0.720207 |
50f34754a53d360aea5e567f709d9137869e1a13 | 215 | py | Python | Tools/grmm/lib/jython/Lib/test/bugs/pr235.py | arne-cl/codra-rst-parser | a03631aace2146da2fed0c0c8f0a3fe1c8c5483d | [
"Apache-2.0"
] | 8 | 2016-11-24T09:38:31.000Z | 2021-04-23T13:04:48.000Z | Tools/grmm/lib/jython/Lib/test/bugs/pr235.py | arne-cl/codra-rst-parser | a03631aace2146da2fed0c0c8f0a3fe1c8c5483d | [
"Apache-2.0"
] | 1 | 2019-01-16T00:58:12.000Z | 2019-12-30T14:33:16.000Z | Tools/grmm/lib/jython/Lib/test/bugs/pr235.py | arne-cl/codra-rst-parser | a03631aace2146da2fed0c0c8f0a3fe1c8c5483d | [
"Apache-2.0"
] | 4 | 2018-12-04T12:21:05.000Z | 2021-02-05T08:00:14.000Z | # PR#235, JPython crashes (i.e. uncaught Java exception) under strange
# (illegal) input.
bogus = '''\
def f(x, z, x):
pass
f(y=1)
'''
try:
compile(bogus, '<string>', 'exec')
except SyntaxError:
pass
| 14.333333 | 70 | 0.609302 |
87551a19d18c033111d6b47a06ccc05e2ab9a537 | 46 | py | Python | test/declare.py | CHUNHUNGFAN/Course_Social_Network_Novel | fe85ffe6ba7213e4b5a7878554184f1728899694 | [
"MIT"
] | null | null | null | test/declare.py | CHUNHUNGFAN/Course_Social_Network_Novel | fe85ffe6ba7213e4b5a7878554184f1728899694 | [
"MIT"
] | 5 | 2021-03-10T12:17:57.000Z | 2022-02-27T01:52:58.000Z | test/declare.py | CHUNHUNGFAN/Course_Social_Network_Novel | fe85ffe6ba7213e4b5a7878554184f1728899694 | [
"MIT"
] | null | null | null | #%%
a = [[0]]*8
print(a)
print(a[0][0])
# %%
| 6.571429 | 14 | 0.369565 |
035bfb65e0a828f8b236e090904c6c55c7396c2b | 10,214 | py | Python | pictobot/keyboards.py | baychimo/pictobot | 05b42e7e5875e10aa911f546889b5d6d2ea87ac5 | [
"MIT"
] | 1 | 2022-03-27T20:44:25.000Z | 2022-03-27T20:44:25.000Z | pictobot/keyboards.py | baychimo/pictobot | 05b42e7e5875e10aa911f546889b5d6d2ea87ac5 | [
"MIT"
] | 16 | 2021-06-01T23:43:58.000Z | 2022-02-13T09:53:38.000Z | pictobot/keyboards.py | baychimo/pictobot | 05b42e7e5875e10aa911f546889b5d6d2ea87ac5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from settings.base import _
from pictograms import *
from collections import OrderedDict
###############################################################################
# INLINE KEYBOARDS #
# Inception of dicts containing keyboard parameters. #
# Order does not matter here, apart for the layouts inner dicts #
###############################################################################
keyboards = {
# Dict describing main inline keyboard : categories of pictograms
'KB_MAIN': {
'layout': OrderedDict([
(_('Emergency') + ' / ' + _('Health'), 'KB_EMERGENCY'),
(_('Food') + ' / ' + _('Water'), 'KB_FOOD_WATER'),
(_('Hygiene') + ' / ' + _('Sanitation'), 'KB_HYGIENE'),
(_('Transport'), 'KB_TRANSPORT'),
(_('Accommodation'), 'KB_ACCOMMODATION'),
(_('Useful Services'), 'KB_USEFUL_SERVICES'),
(_('More') + ' >>', 'KB_MORE')
]),
'text': _('Select category')
},
# Dict describing 2nd level keyboard : food / water category
'KB_FOOD_WATER': {
'layout': OrderedDict([
(pictograms['DRINKING_WATER']['title'], 'DRINKING_WATER'),
(pictograms['MARKET']['title'], 'MARKET'),
(pictograms['SUPERMARKET']['title'], 'SUPERMARKET'),
(pictograms['RESTAURANT']['title'], 'RESTAURANT'),
(pictograms['FOOD']['title'], 'FOOD'),
(_('More') + ' >>', 'KB_FOOD_WATER_2'),
('<< ' + _('Back'), 'KB_MAIN')
]),
'text': _('Category') + ' :: ' + _('Food') + ' / ' + _('Water')
},
# Dict describing 3rd level keyboard : food / water category
'KB_FOOD_WATER_2': {
'layout': OrderedDict([
(pictograms['WATER_BOTTLE']['title'], 'WATER_BOTTLE'),
(pictograms['FAST_FOOD']['title'], 'FAST_FOOD'),
(pictograms['BAR']['title'], 'BAR'),
(pictograms['COFFEE']['title'], 'COFFEE'),
('<< ' + _('Back'), 'KB_FOOD_WATER')
]),
'text': _('Category') + ' :: ' +
_('Food') + ' / ' + _('Water') + ' [2]'
},
# Dict describing 2nd level keyboard : emergency / health category
'KB_EMERGENCY': {
'layout': OrderedDict([
(pictograms['FIRST_AID']['title'], 'FIRST_AID'),
(pictograms['AMBULANCE']['title'], 'AMBULANCE'),
(pictograms['DOCTOR']['title'], 'DOCTOR'),
(pictograms['HOSPITAL']['title'], 'HOSPITAL'),
(pictograms['POLICE']['title'], 'POLICE'),
(_('More') + ' >>', 'KB_EMERGENCY_2'),
('<< ' + _('Back'), 'KB_MAIN')
]),
'text': _('Category') + ' :: ' + _('Emergency') + ' / ' + _('Health')
},
# Dict describing 3rd level keyboard : emergency / health category
'KB_EMERGENCY_2': {
'layout': OrderedDict([
(pictograms['DENTIST']['title'], 'DENTIST'),
(pictograms['MEDICINE']['title'], 'MEDICINE'),
(pictograms['BAND_AID']['title'], 'BAND_AID'),
(pictograms['SHELTER']['title'], 'SHELTER'),
('<< ' + _('Back'), 'KB_EMERGENCY')
]),
'text': _('Category') + ' :: ' + _('Emergency') + ' / ' + _('Health') +
' [2]'
},
# Dict describing 2nd level keyboard : hygiene / sanitation category
'KB_HYGIENE': {
'layout': OrderedDict([
(pictograms['TOILETS']['title'], 'TOILETS'),
(pictograms['SHOWER']['title'], 'SHOWER'),
(pictograms['SOAP']['title'], 'SOAP'),
(pictograms['TOILET_PAPER']['title'], 'TOILET_PAPER'),
(pictograms['WASHING_MACHINE']['title'], 'WASHING_MACHINE'),
('<< ' + _('Back'), 'KB_MAIN')
]),
'text': _('Category') + ' :: ' + _('Hygiene') + ' / ' + _('Sanitation')
},
# Dict describing 2nd level keyboard : transport category
'KB_TRANSPORT': {
'layout': OrderedDict([
(pictograms['TAXI']['title'], 'TAXI'),
(pictograms['BUS']['title'], 'BUS'),
(pictograms['TRAIN']['title'], 'TRAIN'),
(pictograms['CAR_RENTAL']['title'], 'CAR_RENTAL'),
(pictograms['AIRPLANE']['title'], 'AIRPLANE'),
(_('More') + ' >>', 'KB_TRANSPORT_2'),
('<< ' + _('Back'), 'KB_MAIN')
]),
'text': _('Category') + ' :: ' + _('Transport')
},
# Dict describing 3rd level keyboard : transport category
'KB_TRANSPORT_2': {
'layout': OrderedDict([
(pictograms['GROUND_TRANSPORTATION']['title'],
'GROUND_TRANSPORTATION'
),
(pictograms['FERRY_BOAT']['title'], 'FERRY_BOAT'),
(pictograms['SAIL_BOAT']['title'], 'SAIL_BOAT'),
(pictograms['VAN']['title'], 'VAN'),
(pictograms['MOTORBIKE']['title'], 'MOTORBIKE'),
('<< ' + _('Back'), 'KB_TRANSPORT')
]),
'text': _('Category') + ' :: ' + _('Transport') + ' [2]'
},
# Dict describing 2nd level keyboard : accommodation category
'KB_ACCOMMODATION': {
'layout': OrderedDict([
(pictograms['HOTEL']['title'], 'HOTEL'),
(pictograms['HOSTEL']['title'], 'HOSTEL'),
(pictograms['CAMPING']['title'], 'CAMPING'),
(pictograms['ROOM_SERVICE']['title'], 'ROOM_SERVICE'),
('<< ' + _('Back'), 'KB_MAIN')
]),
'text': _('Category') + ' :: ' + _('Accommodation')
},
# Dict describing 2nd level keyboard : useful services category
'KB_USEFUL_SERVICES': {
'layout': OrderedDict([
(pictograms['ATM']['title'], 'ATM'),
(pictograms['BANK']['title'], 'BANK'),
(pictograms['JUSTICE']['title'], 'JUSTICE'),
(pictograms['HAIRDRESSER']['title'], 'HAIRDRESSER'),
(pictograms['GAS_STATION']['title'], 'GAS_STATION'),
(_('More') + ' >>', 'KB_USEFUL_SERVICES_2'),
('<< ' + _('Back'), 'KB_MAIN')
]),
'text': _('Category') + ' :: ' + _('Useful Services')
},
# Dict describing 3rd level keyboard : useful services category
'KB_USEFUL_SERVICES_2': {
'layout': OrderedDict([
(pictograms['PHOTOGRAPHER']['title'], 'PHOTOGRAPHER'),
(pictograms['MUSEUM']['title'], 'MUSEUM'),
(pictograms['INTERNET_ACCESS']['title'], 'INTERNET_ACCESS'),
('<< ' + _('Back'), 'KB_USEFUL_SERVICES')
]),
'text': _('Category') + ' :: ' + _('Useful Services') + ' [2]'
},
# Dict describing 2nd level keyboard : other / more category
'KB_MORE': {
'layout': OrderedDict([
(_('Useful Items'), 'KB_USEFUL_ITEMS'),
(_('Nature'), 'KB_NATURE'),
(_('Sports'), 'KB_SPORTS'),
(_('Religion'), 'KB_RELIGION'),
(_('Alien contact'), 'KB_ALIEN'),
('<< ' + _('Back'), 'KB_MAIN')
]),
'text': _('Select category')
},
# Dict describing 3rd level keyboard : useful items category
'KB_USEFUL_ITEMS': {
'layout': OrderedDict([
(pictograms['ADAPTER_PLUG']['title'], 'ADAPTER_PLUG'),
(pictograms['CHARGER']['title'], 'CHARGER'),
(pictograms['SUNGLASSES']['title'], 'SUNGLASSES'),
(pictograms['BEACH_UMBRELLA']['title'], 'BEACH_UMBRELLA'),
(pictograms['TOOLS']['title'], 'TOOLS'),
(_('More') + ' >>', 'KB_USEFUL_ITEMS_2'),
('<< ' + _('Back'), 'KB_MORE')
]),
'text': _('Category') + ' :: ' + _('Useful Items')
},
# Dict describing 4th level keyboard : useful items category
'KB_USEFUL_ITEMS_2': {
'layout': OrderedDict([
(pictograms['HAMMER']['title'], 'HAMMER'),
(pictograms['SCREWDRIVER']['title'], 'SCREWDRIVER'),
(pictograms['WRENCH']['title'], 'WRENCH'),
(pictograms['UMBRELLA']['title'], 'UMBRELLA'),
(pictograms['PENCIL']['title'], 'PENCIL'),
('<< ' + _('Back'), 'KB_USEFUL_ITEMS')
]),
'text': _('Category') + ' :: ' + _('Useful Items') + ' [2]'
},
# Dict describing 3rd level keyboard : nature category
'KB_NATURE': {
'layout': OrderedDict([
(pictograms['PARK']['title'], 'PARK'),
(pictograms['BEACH']['title'], 'BEACH'),
(pictograms['LAKE']['title'], 'LAKE'),
(pictograms['OCEAN']['title'], 'OCEAN'),
('<< ' + _('Back'), 'KB_MORE')
]),
'text': _('Category') + ' :: ' + _('Nature')
},
# Dict describing 3rd level keyboard : sports category
'KB_SPORTS': {
'layout': OrderedDict([
(pictograms['HIKE']['title'], 'HIKE'),
(pictograms['CYCLING']['title'], 'CYCLING'),
(pictograms['SKYDIVING']['title'], 'SKYDIVING'),
(pictograms['SCUBA_DIVING']['title'], 'SCUBA_DIVING'),
(pictograms['SWIMMING']['title'], 'SWIMMING'),
(pictograms['INDOOR_SWIMMING']['title'], 'INDOOR_SWIMMING'),
('<< ' + _('Back'), 'KB_MORE')
]),
'text': _('Category') + ' :: ' + _('Sports')
},
# Dict describing 3rd level keyboard : religion category
'KB_RELIGION': {
'layout': OrderedDict([
(pictograms['CHURCH']['title'], 'CHURCH'),
(pictograms['MOSQUE']['title'], 'MOSQUE'),
(pictograms['HINDU_TEMPLE']['title'], 'HINDU_TEMPLE'),
(pictograms['BUDDHIST_TEMPLE']['title'], 'BUDDHIST_TEMPLE'),
(pictograms['SYNAGOGUE']['title'], 'SYNAGOGUE'),
(pictograms['CHAPEL']['title'], 'CHAPEL'),
('<< ' + _('Back'), 'KB_MORE')
]),
'text': _('Category') + ' :: ' + _('Religion')
},
# Dict describing 3rd level keyboard : alien contact category
'KB_ALIEN': {
'layout': OrderedDict([
(pictograms['TRIANGLE']['title'], 'TRIANGLE'),
(pictograms['BORG']['title'], 'BORG'),
('<< ' + _('Back'), 'KB_MORE')
]),
'text': _('Category') + ' :: ' + _('Alien contact')
}
}
| 43.83691 | 79 | 0.488643 |
934bfb8f20d5e5ce4e8df9808021234f616da4a7 | 2,104 | py | Python | asl-api/engine/tensor/custom_CNN.py | ooawagaeri/orbital-asl-application | 4e65306652edd3217eaa097ebe63506a403b2475 | [
"MIT"
] | 2 | 2021-05-29T17:35:13.000Z | 2021-05-29T17:35:14.000Z | asl-api/engine/tensor/custom_CNN.py | ooawagaeri/orbital-asl-application | 4e65306652edd3217eaa097ebe63506a403b2475 | [
"MIT"
] | 1 | 2021-06-18T08:50:26.000Z | 2021-06-18T08:50:26.000Z | asl-api/engine/tensor/custom_CNN.py | ooawagaeri/orbital-asl-application | 4e65306652edd3217eaa097ebe63506a403b2475 | [
"MIT"
] | null | null | null | """
custom_CNN.py
Used to perform / extract features from img via convolution operation(s)
"""
import torch.nn as nn
import torch.nn.functional as func
import joblib
import os
class CustomCNN(nn.Module):
"""
Custom convolution neural network class
Attributes:
conv1 : torch.Conv2d
conv2 : torch.Conv2d
conv3 : torch.Conv2d
conv4 : torch.Conv2d
fc1 : torch.Linear
fc2 : torch.Linear
pool : torch.MaxPool2d
Methods:
forward(self, x): Feeds images through several layer aka Feed-Forward network
"""
def __init__(self, lb_len):
"""
Constructs CustomCNN object.
"""
super(CustomCNN, self).__init__()
# Computes a 2-D convolution
self.conv1 = nn.Conv2d(3, 16, 5)
self.conv2 = nn.Conv2d(16, 32, 5)
self.conv3 = nn.Conv2d(32, 64, 3)
self.conv4 = nn.Conv2d(64, 128, 5)
# Performs linear transformation on data
self.fc1 = nn.Linear(128, 256)
self.fc2 = nn.Linear(256, lb_len)
# Applies 2D max_images pooling over img
# Calculates maximum value in each patch of the feature map and
# down-samples img, reducing dimensionality / parameters for
# better assumptions / predictions
self.pool = nn.MaxPool2d(2, 2)
def forward(self, x):
"""
Feeds images through several layer aka Feed-Forward network
Parameters:
x (torch.Tensor) : Input tensor
Returns:
x (torch.Tensor) : Output tensor
"""
# Applies the rectified linear unit function element-wise
x = self.pool(func.relu(self.conv1(x)))
x = self.pool(func.relu(self.conv2(x)))
x = self.pool(func.relu(self.conv3(x)))
x = self.pool(func.relu(self.conv4(x)))
bs, _, _, _ = x.shape
# Applies a 2D adaptive average pooling over an img signal
# composed of several img planes.
x = func.adaptive_avg_pool2d(x, 1).reshape(bs, -1)
x = func.relu(self.fc1(x))
x = self.fc2(x)
return x
| 31.402985 | 85 | 0.601711 |
b74cdb1fdf63f335bfc817a5f880078206c37ca4 | 1,575 | py | Python | metalfi/src/data/meta/importance/dropcolumn.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
] | 2 | 2019-12-05T07:57:14.000Z | 2019-12-05T13:02:08.000Z | metalfi/src/data/meta/importance/dropcolumn.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
] | 31 | 2019-12-05T15:14:47.000Z | 2020-12-04T14:37:46.000Z | metalfi/src/data/meta/importance/dropcolumn.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
] | 1 | 2020-12-04T13:40:11.000Z | 2020-12-04T13:40:11.000Z | from pandas import DataFrame
from rfpimp import *
from sklearn.preprocessing import StandardScaler
from metalfi.src.data.meta.importance.featureimportance import FeatureImportance
class DropColumnImportance(FeatureImportance):
def __init__(self, dataset):
super(DropColumnImportance, self).__init__(dataset)
self._name = "_LOFO"
def calculateScores(self):
for model in self._linear_models:
self._feature_importances.append(self.dropcolImportance(model, self._target))
for model in self._tree_models:
new_model = RandomForestClassifier(oob_score=True, n_estimators=100, n_jobs=4, random_state=115)
self._feature_importances.append(self.oobDropcolImportance(new_model, self._target))
for model in self._kernel_models:
self._feature_importances.append(self.dropcolImportance(model, self._target))
def dropcolImportance(self, model, target):
sc = StandardScaler()
X = DataFrame(data=sc.fit_transform(self._data_frame.drop(target, axis=1)),
columns=self._data_frame.drop(target, axis=1).columns)
y = self._data_frame[target]
model.fit(X, y)
imp = dropcol_importances(model, X, y)
#plot_importances(imp).view()
return imp
def oobDropcolImportance(self, model, target):
X = self._data_frame.drop(target, axis=1)
y = self._data_frame[target]
model.fit(X, y)
imp = oob_dropcol_importances(model, X, y)
#plot_importances(imp).view()
return imp
| 35.795455 | 108 | 0.688889 |
b6a55027b8fcf91423641f6a6b1afc44ae39060f | 23,744 | py | Python | lale/sklearn_compat.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | lale/sklearn_compat.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | lale/sklearn_compat.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Iterable, List, Optional, Tuple, TypeVar, Union
import sklearn.base
import lale.operators as Ops
from lale.pretty_print import hyperparams_to_string
from lale.search.PGO import remove_defaults_dict
from lale.util.Visitor import Visitor, accept
# We support an argument encoding schema intended to be a
# conservative extension to sklearn's encoding schema
# sklearn uses __ to separate elements in a hierarchy
# (pipeline's have operators that have keys)
# Since we support richer computation graphs, we need to extend this encoding
# to support it. Graphs that could be represented in sklearn
# should be encoded identically
# our encoding scheme:
# * __ separates nested components (as-in sklearn)
# * ? is the discriminant (choice made) for a choice
# * ? is also a prefix for the nested parts of the chosen branch
# * x@n In a pipeline, if multiple components have identical names,
# ** everything but the first are suffixed with a number (starting with 1)
# ** indicating which one we are talking about.
# ** For example, given (x >> y >> x), we would treat this much the same as
# ** (x >> y >> x@1)
# * $ is used in the rare case that sklearn would expect the key of an object,
# ** but we allow (and have) a non-object schema. In that case,
# ** $ is used as the key. This should only happen at the top level,
# ** since nested occurences should be removed.
# * # is a structure indicator, and the value should be one of 'list', 'tuple', or 'dict'
# * n is used to represent the nth component in an array or tuple
# This method (and the to_lale() method on the returned value)
# are the only ones intended to be exported
def make_sklearn_compat(
op: Union[Ops.Operator, "SKlearnCompatWrapper", Any]
) -> "SKlearnCompatWrapper":
"""Top level function for providing compatibiltiy with sklearn operations
This returns a wrapper around the provided sklearn operator graph which can be passed
to sklearn methods such as clone and GridSearchCV
The wrapper may modify the wrapped lale operator/pipeline as part of providing
compatibility with these methods.
After the sklearn operation is complete,
SKlearnCompatWrapper.to_lale() can be called to recover the
wrapped lale operator for future use
"""
if isinstance(op, SKlearnCompatWrapper):
return op
else:
return SKlearnCompatWrapper.make_wrapper(Ops.wrap_operator(op))
def sklearn_compat_clone(impl: Any) -> Any:
if impl is None:
return None
from sklearn.base import clone
cp = clone(impl, safe=False)
return cp
def clone_lale(op: Ops.Operator) -> Ops.Operator:
return op._lale_clone(sklearn_compat_clone)
class WithoutGetParams(object):
""" This wrapper forwards everything except "get_attr" to what it is wrapping
"""
def __init__(self, base):
self._base = base
assert self._base != self
def __getattr__(self, name):
# This is needed because in python copy skips calling the __init__ method
if name == "_base":
raise AttributeError
if name == "get_params":
raise AttributeError
if name in ["__getstate__", "__setstate__", "__repr__"]:
raise AttributeError
else:
return getattr(self._base, name)
@classmethod
def clone_wgp(cls, obj: "WithoutGetParams") -> "WithoutGetParams":
while isinstance(obj, WithoutGetParams):
obj = obj._base
assert isinstance(obj, Ops.Operator)
return WithoutGetParams(clone_lale(obj))
def __str__(self):
b = getattr(self, "_base", None)
s: str
if b is None:
s = ""
else:
s = str(b)
return f"WGP<{s}>"
def partition_sklearn_params(
d: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]:
sub_parts: Dict[str, Dict[str, Any]] = {}
main_parts: Dict[str, Any] = {}
for k, v in d.items():
ks = k.split("__", 1)
if len(ks) == 1:
assert k not in main_parts
main_parts[k] = v
else:
assert len(ks) == 2
bucket: Dict[str, Any] = {}
group: str = ks[0]
param: str = ks[1]
if group in sub_parts:
bucket = sub_parts[group]
else:
sub_parts[group] = bucket
assert param not in bucket
bucket[param] = v
return (main_parts, sub_parts)
def partition_sklearn_choice_params(d: Dict[str, Any]) -> Tuple[int, Dict[str, Any]]:
discriminant_value: int = -1
choice_parts: Dict[str, Any] = {}
for k, v in d.items():
if k == discriminant_name:
assert discriminant_value == -1
discriminant_value = int(v)
else:
k_rest = unnest_choice(k)
choice_parts[k_rest] = v
assert discriminant_value != -1
return (discriminant_value, choice_parts)
DUMMY_SEARCH_SPACE_GRID_PARAM_NAME: str = "$"
discriminant_name: str = "?"
choice_prefix: str = "?"
structure_type_name: str = "#"
structure_type_list: str = "list"
structure_type_tuple: str = "tuple"
structure_type_dict: str = "dict"
def get_name_and_index(name: str) -> Tuple[str, int]:
""" given a name of the form "name@i", returns (name, i)
if given a name of the form "name", returns (name, 0)
"""
splits = name.split("@", 1)
if len(splits) == 1:
return splits[0], 0
else:
return splits[0], int(splits[1])
def make_degen_indexed_name(name, index):
return f"{name}@{index}"
def make_indexed_name(name, index):
if index == 0:
return name
else:
return f"{name}@{index}"
def make_array_index_name(index, is_tuple: bool = False):
sep = "##" if is_tuple else "#"
return f"{sep}{str(index)}"
def is_numeric_structure(structure_type: str):
if structure_type == "list" or structure_type == "tuple":
return True
elif structure_type == "dict":
return False
else:
assert False, f"Unknown structure type {structure_type} found"
def set_structured_params(k, params: Dict[str, Any], hyper_parent):
# need to handle the different encoding schemes used
if params is None:
return None
if structure_type_name in params:
# this is a structured type
structure_type = params[structure_type_name]
type_params, sub_params = partition_sklearn_params(params)
hyper = None
if isinstance(hyper_parent, dict):
hyper = hyper_parent.get(k, None)
elif isinstance(hyper_parent, list) and k < len(hyper_parent):
hyper = hyper_parent[k]
if hyper is None:
hyper = {}
elif isinstance(hyper, tuple):
# to make it mutable
hyper = list(hyper)
del type_params[structure_type_name]
actual_key: Union[str, int]
for elem_key, elem_value in type_params.items():
if elem_value is not None:
if not isinstance(hyper, dict):
assert is_numeric_structure(structure_type)
actual_key = int(elem_key)
hyper[actual_key] = elem_value
else:
actual_key = elem_key
hyper[actual_key] = elem_value
for elem_key, elem_params in sub_params.items():
if not isinstance(hyper, dict):
assert is_numeric_structure(structure_type)
actual_key = int(elem_key)
else:
actual_key = elem_key
set_structured_params(actual_key, elem_params, hyper)
if isinstance(hyper, dict) and is_numeric_structure(structure_type):
max_key = max(map(int, hyper.keys()))
hyper = [hyper.get(str(x), None) for x in range(max_key)]
if structure_type == "tuple":
hyper = tuple(hyper)
hyper_parent[k] = hyper
else:
# if it is not a structured parameter
# then it must be a nested higher order operator
sub_op = hyper_parent[k]
if isinstance(sub_op, list):
if len(sub_op) == 1:
sub_op = sub_op[0]
else:
(disc, chosen_params) = partition_sklearn_choice_params(params)
assert 0 <= disc and disc < len(sub_op)
sub_op = sub_op[disc]
params = chosen_params
trainable_sub_op = set_operator_params(sub_op, **params)
hyper_parent[k] = trainable_sub_op
def set_operator_params(op: Ops.Operator, **impl_params) -> Ops.TrainableOperator:
"""May return a new operator, in which case the old one should be overwritten
"""
if isinstance(op, Ops.PlannedIndividualOp):
main_params, partitioned_sub_params = partition_sklearn_params(impl_params)
hyper = op._hyperparams
if hyper is None:
hyper = {}
# we set the sub params first
for sub_key, sub_params in partitioned_sub_params.items():
set_structured_params(sub_key, sub_params, hyper)
# we have now updated any nested operators
# (if this is a higher order operator)
# and can work on the main operator
all_params = {**main_params, **hyper}
return op.set_params(**all_params)
elif isinstance(op, Ops.BasePipeline):
steps = op.steps()
main_params, partitioned_sub_params = partition_sklearn_params(impl_params)
assert not main_params, f"Unexpected non-nested arguments {main_params}"
found_names: Dict[str, int] = {}
step_map: Dict[Ops.Operator, Ops.Operator] = {}
for s in steps:
name = s.name()
name_index = 0
params: Dict[str, Any] = {}
if name in found_names:
name_index = found_names[name] + 1
found_names[name] = name_index
uname = make_indexed_name(name, name_index)
if uname in partitioned_sub_params:
params = partitioned_sub_params[uname]
else:
found_names[name] = 0
uname = make_degen_indexed_name(name, 0)
if uname in partitioned_sub_params:
params = partitioned_sub_params[uname]
assert name not in partitioned_sub_params
elif name in partitioned_sub_params:
params = partitioned_sub_params[name]
new_s = set_operator_params(s, **params)
if s != new_s:
step_map[s] = new_s
# make sure that no parameters were passed in for operations
# that are not actually part of this pipeline
for k in partitioned_sub_params.keys():
n, i = get_name_and_index(k)
assert n in found_names and i <= found_names[n]
if step_map:
op._subst_steps(step_map)
if not isinstance(op, Ops.TrainablePipeline):
# As a result of choices made, we may now be a TrainableIndividualOp
ret = Ops.make_pipeline_graph(op.steps(), op.edges(), ordered=True)
if not isinstance(ret, Ops.TrainableOperator):
assert False
return ret
else:
return op
else:
assert isinstance(op, Ops.TrainableOperator)
return op
elif isinstance(op, Ops.OperatorChoice):
choices = op.steps()
choice_index: int
choice_params: Dict[str, Any]
if len(choices) == 1:
choice_index = 0
chosen_params = impl_params
else:
(choice_index, chosen_params) = partition_sklearn_choice_params(impl_params)
assert 0 <= choice_index and choice_index < len(choices)
choice: Ops.Operator = choices[choice_index]
new_step = set_operator_params(choice, **chosen_params)
# we remove the OperatorChoice, replacing it with the branch that was taken
return new_step
else:
assert False, f"Not yet supported operation of type: {op.__class__.__name__}"
class SKlearnCompatWrapper(object):
_base: WithoutGetParams
# This is used to trick clone into leaving us alone
_old_params_for_clone: Optional[Dict[str, Any]]
@classmethod
def make_wrapper(cls, base: Ops.Operator):
b: Any = base
if isinstance(base, SKlearnCompatWrapper):
return base
elif not isinstance(base, WithoutGetParams):
b = WithoutGetParams(base)
return cls(__lale_wrapper_init_base=b)
def __init__(self, **kwargs):
if "__lale_wrapper_init_base" in kwargs:
# if we are being called by make_wrapper
# then we don't need to make a copy
self._base = kwargs["__lale_wrapper_init_base"]
self._old_params_for_clone = None
else:
# otherwise, we are part of a get_params/init clone
# and we need to make a copy
self.init_params_internal(**kwargs)
assert self._base != self
def init_params_internal(self, **kwargs):
op = kwargs["__lale_wrapper_base"]
self._base = WithoutGetParams.clone_wgp(op)
self._old_params_for_clone = kwargs
def get_params_internal(self, out: Dict[str, Any]):
out["__lale_wrapper_base"] = self._base
def set_params_internal(self, **impl_params):
self._base = impl_params["__lale_wrapper_base"]
assert self._base != self
def fixup_params_internal(self, **params):
return params
def to_lale(self) -> Ops.Operator:
cur: Any = self
assert cur is not None
assert cur._base is not None
cur = cur._base
while isinstance(cur, WithoutGetParams):
cur = cur._base
assert isinstance(cur, Ops.Operator)
return cur
# sklearn calls __repr__ instead of __str__
def __repr__(self):
op = self.to_lale()
if isinstance(op, Ops.TrainableIndividualOp):
name = op.name()
hyps = ""
hps = op.hyperparams()
if hps is not None:
hyps = hyperparams_to_string(hps)
return name + "(" + hyps + ")"
else:
return super().__repr__()
def __getattribute__(self, name):
""" Try proxying unknown attributes to the underlying operator
getattribute is used instead of getattr to ensure that the
correct underlying error is thrown in case
a property (such as classes_) throws an AttributeError
"""
# This is needed because in python copy skips calling the __init__ method
try:
return super(SKlearnCompatWrapper, self).__getattribute__(name)
except AttributeError as e:
if name == "_base":
raise AttributeError
try:
return getattr(self._base, name)
except AttributeError:
raise e
def get_params(self, deep: bool = True) -> Dict[str, Any]:
# TODO: We currently ignore deep
out: Dict[str, Any] = {}
if self._old_params_for_clone is not None:
# lie to clone to make it happy
params = self._old_params_for_clone
self._old_params_for_clone = None
return params
else:
self.get_params_internal(out)
return out
def fit(self, X, y=None, **fit_params):
if hasattr(self._base, "fit"):
filtered_params = remove_defaults_dict(fit_params)
return self._base.fit(X, y=y, **filtered_params)
else:
pass
def set_params(self, **impl_params):
if "__lale_wrapper_base" in impl_params:
self.set_params_internal(**impl_params)
else:
cur: Union[WithoutGetParams, Ops.Operator] = self._base
assert self != cur
assert cur is not None
prev: WithoutGetParams = cur # Note that this assignment is spurious, since the loop will always run at least once
while isinstance(cur, WithoutGetParams):
assert cur != cur._base
prev = cur
cur = cur._base
assert isinstance(cur, Ops.Operator)
fixed_params = self.fixup_params_internal(**impl_params)
new_s = set_operator_params(cur, **fixed_params)
if not isinstance(new_s, Ops.TrainableOperator):
assert False
if new_s != cur:
prev._base = new_s
return self
def get_defaults(self) -> Dict[str, Any]:
return DefaultsVisitor.run(self.to_lale())
def _final_individual_op(self) -> Optional[Ops.IndividualOp]:
op: Optional[Ops.Operator] = self.to_lale()
while op is not None and isinstance(op, Ops.BasePipeline):
op = op.get_last()
if op is not None and not isinstance(op, Ops.IndividualOp):
op = None
return op
@property
def _final_estimator(self) -> Any:
op: Optional[Ops.IndividualOp] = self._final_individual_op()
model = None
if op is not None:
# if fit was called, we want to use trained result
# even if the code uses the original operrator
# since sklearn assumes that fit mutates the operator
if hasattr(op, "_trained"):
tr_op: Any = op._trained
assert isinstance(tr_op, Ops.TrainedIndividualOp)
op = tr_op
if hasattr(op, "_impl"):
impl = op._impl_instance()
if hasattr(impl, "_wrapped_model"):
model = impl._wrapped_model
elif isinstance(impl, sklearn.base.BaseEstimator):
model = impl
return "passthrough" if model is None else model
@property
def classes_(self):
return self._final_estimator.classes_
@property
def n_classes_(self):
return self._final_estimator.n_classes_
@property
def _estimator_type(self):
return self._final_estimator._estimator_type
@property
def _get_tags(self):
return self._final_estimator._get_tags
@property
def coef_(self):
return self._final_estimator.coef_
@property
def feature_importances_(self):
return self._final_estimator.feature_importances_
def get_param_ranges(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Returns two dictionaries, ranges and cat_idx, for hyperparameters.
The ranges dictionary has two kinds of entries. Entries for
numeric and Boolean hyperparameters are tuples of the form
(min, max, default). Entries for categorical hyperparameters
are lists of their values.
The cat_idx dictionary has (min, max, default) entries of indices
into the corresponding list of values.
Warning: ignores side constraints and unions."""
op: Optional[Ops.IndividualOp] = self._final_individual_op()
if op is None:
raise ValueError("This pipeline does not end with an individual operator")
else:
return op.get_param_ranges()
def get_param_dist(self, size=10) -> Dict[str, List[Any]]:
"""Returns a dictionary for discretized hyperparameters.
Each entry is a list of values. For continuous hyperparameters,
it returns up to `size` uniformly distributed values.
Warning: ignores side constraints, unions, and distributions."""
op: Optional[Ops.IndividualOp] = self._final_individual_op()
if op is None:
raise ValueError("This pipeline does not end with an individual operator")
else:
return op.get_param_dist(size=size)
# sklearn compatibility
# @property
# def _final_estimator(self):
# lale_op = self.to_lale()
# if lale_op is _
# estimator = self.steps[-1][1]
# return 'passthrough' if estimator is None else estimator
class DefaultsVisitor(Visitor):
@classmethod
def run(cls, op: Ops.Operator) -> Dict[str, Any]:
visitor = cls()
return accept(op, visitor)
def __init__(self):
super(DefaultsVisitor, self).__init__()
def visitIndividualOp(self, op: Ops.IndividualOp) -> Dict[str, Any]:
return op.get_defaults()
visitPlannedIndividualOp = visitIndividualOp
visitTrainableIndividualOp = visitIndividualOp
visitTrainedIndividualOp = visitIndividualOp
def visitPipeline(self, op: Ops.PlannedPipeline) -> Dict[str, Any]:
defaults_list: Iterable[Dict[str, Any]] = (
nest_HPparams(s.name(), accept(s, self)) for s in op.steps()
)
defaults: Dict[str, Any] = {}
for d in defaults_list:
defaults.update(d)
return defaults
visitPlannedPipeline = visitPipeline
visitTrainablePipeline = visitPipeline
visitTrainedPipeline = visitPipeline
def visitOperatorChoice(self, op: Ops.OperatorChoice) -> Dict[str, Any]:
defaults_list: Iterable[Dict[str, Any]] = (accept(s, self) for s in op.steps())
defaults: Dict[str, Any] = {}
for d in defaults_list:
defaults.update(d)
return defaults
# Auxiliary functions
V = TypeVar("V")
def nest_HPparam(name: str, key: str):
if key == DUMMY_SEARCH_SPACE_GRID_PARAM_NAME:
# we can get rid of the dummy now, since we have a name for it
return name
return name + "__" + key
def nest_HPparams(name: str, grid: Dict[str, V]) -> Dict[str, V]:
return {(nest_HPparam(name, k)): v for k, v in grid.items()}
def nest_all_HPparams(name: str, grids: List[Dict[str, V]]) -> List[Dict[str, V]]:
""" Given the name of an operator in a pipeline, this transforms every key(parameter name) in the grids
to use the operator name as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_HPparams(name, grid) for grid in grids]
def nest_choice_HPparam(key: str):
return choice_prefix + key
def nest_choice_HPparams(grid: Dict[str, V]) -> Dict[str, V]:
return {(nest_choice_HPparam(k)): v for k, v in grid.items()}
def nest_choice_all_HPparams(grids: List[Dict[str, V]]) -> List[Dict[str, V]]:
""" this transforms every key(parameter name) in the grids
to be nested under a choice, using a ? as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_choice_HPparams(grid) for grid in grids]
def unnest_choice(k: str) -> str:
assert k.startswith(choice_prefix)
return k[len(choice_prefix) :]
def unnest_HPparams(k: str) -> List[str]:
return k.split("__")
OpType = TypeVar("OpType", bound=Ops.Operator)
def clone_op(op: OpType, name: str = None) -> OpType:
""" Clone any operator.
"""
from sklearn.base import clone
nop = clone(make_sklearn_compat(op)).to_lale()
if name:
nop._set_name(name)
return nop
| 35.438806 | 128 | 0.628833 |
7c5c46b9c330f3b3fd09e8a030201b58fbccdbe2 | 1,760 | py | Python | nexxT/tests/interface/test_dataSample.py | ifm/nexxT | e697763a940f054287e8d3dc59105377de0ae2f0 | [
"Apache-2.0"
] | 5 | 2020-05-03T10:52:14.000Z | 2022-03-02T10:32:33.000Z | nexxT/tests/interface/test_dataSample.py | ifm/nexxT | e697763a940f054287e8d3dc59105377de0ae2f0 | [
"Apache-2.0"
] | 32 | 2020-05-18T15:49:00.000Z | 2022-02-22T20:10:56.000Z | nexxT/tests/interface/test_dataSample.py | ifm/nexxT | e697763a940f054287e8d3dc59105377de0ae2f0 | [
"Apache-2.0"
] | 2 | 2020-03-21T15:04:46.000Z | 2021-03-01T15:42:49.000Z | # SPDX-License-Identifier: Apache-2.0
# Copyright (C) 2020 ifm electronic gmbh
#
# THE PROGRAM IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
#
import logging
import math
import platform
import time
import pytest
from nexxT.interface import DataSample
logging.getLogger(__name__).debug("executing test_dataSample.py")
def test_basic():
dataSample = DataSample(b"Hello", "String", 38)
assert dataSample.getContent().data() == b'Hello'
# get the content and modify it
c = dataSample.getContent()
c[:] = b'a'*c.size()
assert c.data() == b'aaaaa'
# but the modification is not affecting the original data
assert dataSample.getContent().data() == b'Hello'
@pytest.mark.skipif(platform.system() == "Windows" and platform.release() == "7",
reason="windows 10 or higher, windows 7 seems to have millisecond resolution on timestamps.")
def test_currentTime():
shortestDelta = math.inf
ts = time.time()
lastT = DataSample.currentTime()
factor = round(DataSample.TIMESTAMP_RES / 1e-9)
deltas = []
while time.time() - ts < 3:
t = DataSample.currentTime()
# assert that the impementation is consistent with time.time()
deltas.append(abs(t - (time.time_ns() // factor))*DataSample.TIMESTAMP_RES)
if t != lastT:
shortestDelta = min(t - lastT, shortestDelta)
lastT = t
# make sure that the average delta is smaller than 1 millisecond
assert sum(deltas)/len(deltas) < 1e-3
shortestDelta = shortestDelta * DataSample.TIMESTAMP_RES
# we want at least 10 microseconds resolution
print("shortestDelta: %s" % shortestDelta)
assert shortestDelta <= 1e-5
if __name__ == "__main__":
test_basic()
test_currentTime() | 33.846154 | 113 | 0.680114 |
f5f8b23bc92ba5faf8f13226381035c99561cf36 | 3,844 | py | Python | scrapers/scrape_so.py | BrianG4/covid_19 | 409258c515a7610afe8ce4d21df862e28ddfcb6a | [
"CC-BY-4.0"
] | null | null | null | scrapers/scrape_so.py | BrianG4/covid_19 | 409258c515a7610afe8ce4d21df862e28ddfcb6a | [
"CC-BY-4.0"
] | null | null | null | scrapers/scrape_so.py | BrianG4/covid_19 | 409258c515a7610afe8ce4d21df862e28ddfcb6a | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from bs4 import BeautifulSoup
import scrape_common as sc
base_url = 'https://corona.so.ch'
url = f'{base_url}/bevoelkerung/daten/woechentlicher-situationsbericht/'
d = sc.download(url, silent=True)
soup = BeautifulSoup(d, 'html.parser')
pdf_url = soup.find(href=re.compile(r'\.pdf$')).get('href')
pdf_url = f'{base_url}{pdf_url}'
content = sc.pdfdownload(pdf_url, layout=True, silent=True)
"""
Hospitalisationen im Kanton Anzahl Personen in Isolation davon Kontakte in Quarantäne Anzahl zusätzlicher Personen in Quarantäne nach Rückkehr aus Risikoland Re- Wert***
6 (6) 120 (71) 280 (189) 388 (280) 1.46 (1.1)
"""
rows = []
date = sc.find(r'S\s?tand: (\d+\.\d+\.20\d{2})', content)
number_of_tests = sc.find(r'PCR-Tes\s?ts\sTotal\s+(\d+\'?\d+)\s', content).replace('\'', '')
res = re.search(r'Hospitalisationen im Kanton.*\d+ \(\d+\)\s+(\d+) \(\d+\)\s+(\d+) \(\d+\)\s+(\d+) \(\d+\)\s+\d\.\d+ \(\d\.\d+\)', content, re.DOTALL)
if res is not None:
data = sc.DayData(canton='SO', url=pdf_url)
data.datetime = date
data.tested = number_of_tests
data.isolated = res[1]
data.quarantined = res[2]
data.quarantine_riskareatravel = res[3]
rows.append(data)
url = f"{base_url}/index.php?id=27979"
d = sc.download(url, silent=True)
d = d.replace(" ", " ")
soup = BeautifulSoup(d, 'html.parser')
data_table = soup.find('h2', text=re.compile("Situation Kanton Solothurn")).find_next("table")
if data_table:
headers = [cell.string for cell in data_table.find('tr').find_all('th')]
for row in data_table.find_all('tr'):
data = sc.DayData(canton='SO', url=url)
col_num = 0
tmp_date = None
tmp_time = None
for cell in row.find_all(['td']):
if headers[col_num] == 'Datum':
tmp_date = cell.string.strip()
elif headers[col_num] == 'Zeit':
tmp_time = cell.string.strip()
elif headers[col_num] == 'Bestätigte Fälle (kumuliert)':
data.cases = cell.string.strip()
elif headers[col_num] == 'Todesfälle (kumuliert)':
data.deaths = cell.string.strip()
elif headers[col_num] == 'Im Kanton Hospitalisierte Personen':
data.hospitalized = cell.string.strip()
col_num += 1
if data and tmp_date and tmp_time and not tmp_date.startswith('bis '):
data.datetime = f"{tmp_date} {tmp_time}".strip()
rows.append(data)
else:
# if the table is not there (it vanished on 2020-05-20) fallback to main page
url = "https://corona.so.ch/"
d = sc.download(url, silent=True)
soup = BeautifulSoup(d, 'html.parser')
title = soup.find('strong', text=re.compile("Situation Kanton Solothurn"))
data_list = title.find_parent("div").find_all('li')
date_str = sc.find('Stand\s*(.+)\s*Uhr', title.string)
data = sc.DayData(canton='SO', url=url)
for item in data_list:
content = "".join([str(s) for s in item.contents])
if not item:
continue
if 'Anzahl positiv getesteter Erkrankungsfälle' in content:
data.cases = sc.find('.*:.*?(\d+)\s*.*', content).strip()
continue
if 'Verstorbene Personen' in content:
data.deaths = sc.find('.*:.*?(\d+)\s*.*', content).strip()
continue
if 'hospitalisierte Personen' in content and not 'weniger als' in content:
data.hospitalized = sc.find('.*:.*?(\d+)\s*.*', content).strip()
continue
rows.append(data)
is_first = True
# skip first row
for row in rows:
if not is_first:
print('-' * 10)
is_first = False
print(row)
| 39.22449 | 173 | 0.590271 |
d79d3823f2c685ea195f168e283e682f2950590b | 46 | py | Python | huffman/__init__.py | nicktimko/huffman | bfad004ce7951750cc4536ae4466f87afa0f5e5d | [
"MIT"
] | 18 | 2017-03-07T20:00:04.000Z | 2022-03-09T00:22:35.000Z | huffman/__init__.py | nicktimko/huffman | bfad004ce7951750cc4536ae4466f87afa0f5e5d | [
"MIT"
] | 1 | 2018-04-20T14:26:29.000Z | 2018-04-20T14:33:30.000Z | huffman/__init__.py | nicktimko/huffman | bfad004ce7951750cc4536ae4466f87afa0f5e5d | [
"MIT"
] | 5 | 2017-03-30T07:23:19.000Z | 2022-02-01T20:10:08.000Z | from .huffman import *
__version__ = "0.1.2"
| 11.5 | 22 | 0.673913 |
23056e74bc614f0ca5296193fab6f615f544f87d | 19,776 | py | Python | networks/FlowNet2.py | huent189/fast_blind_video_consistency | 0c0c138d60f4b6215c0d7b40d6c58a14b028bcc5 | [
"MIT"
] | null | null | null | networks/FlowNet2.py | huent189/fast_blind_video_consistency | 0c0c138d60f4b6215c0d7b40d6c58a14b028bcc5 | [
"MIT"
] | null | null | null | networks/FlowNet2.py | huent189/fast_blind_video_consistency | 0c0c138d60f4b6215c0d7b40d6c58a14b028bcc5 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
try:
from networks.resample2d_package.resample2d import Resample2d
from networks.channelnorm_package.channelnorm import ChannelNorm
from networks import FlowNetC
from networks import FlowNetS
from networks import FlowNetSD
from networks import FlowNetFusion
from networks.submodules import *
except:
from .networks.resample2d_package.resample2d import Resample2d
from .networks.channelnorm_package.channelnorm import ChannelNorm
from .networks import FlowNetC
from .networks import FlowNetS
from .networks import FlowNetSD
from .networks import FlowNetFusion
from .networks.submodules import *
'Parameter count = 162,518,834'
class MyDict(dict):
pass
class FlowNet2(nn.Module):
def __init__(self, args=None, batchNorm=False, div_flow = 20.):
super(FlowNet2,self).__init__()
if args is None:
args = MyDict()
args.rgb_max = 1
args.fp16 = False
args.grads = {}
self.batchNorm = batchNorm
self.div_flow = div_flow
self.rgb_max = args.rgb_max
self.args = args
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
if args.fp16:
self.resample1 = nn.Sequential(
tofp32(),
Resample2d(),
tofp16())
else:
self.resample1 = Resample2d()
# Block (FlowNetS1)
self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
if args.fp16:
self.resample2 = nn.Sequential(
tofp32(),
Resample2d(),
tofp16())
else:
self.resample2 = Resample2d()
# Block (FlowNetS2)
self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
# Block (FlowNetSD)
self.flownets_d = FlowNetSD.FlowNetSD(args, batchNorm=self.batchNorm)
self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest')
self.upsample4 = nn.Upsample(scale_factor=4, mode='nearest')
if args.fp16:
self.resample3 = nn.Sequential(
tofp32(),
Resample2d(),
tofp16())
else:
self.resample3 = Resample2d()
if args.fp16:
self.resample4 = nn.Sequential(
tofp32(),
Resample2d(),
tofp16())
else:
self.resample4 = Resample2d()
# Block (FLowNetFusion)
self.flownetfusion = FlowNetFusion.FlowNetFusion(args, batchNorm=self.batchNorm)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
def init_deconv_bilinear(self, weight):
f_shape = weight.size()
heigh, width = f_shape[-2], f_shape[-1]
f = np.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([heigh, width])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
min_dim = min(f_shape[0], f_shape[1])
weight.data.fill_(0.)
for i in range(min_dim):
weight.data[i,i,:,:] = torch.from_numpy(bilinear)
return
def forward(self, img1, img2):
sz = img1.size()
img1 = img1.view(sz[0], sz[1], 1, sz[2], sz[3] )
img2 = img2.view(sz[0], sz[1], 1, sz[2], sz[3] )
inputs = torch.cat((img1, img2), dim=2)
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
x = torch.cat((x1,x2), dim = 1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow)
# warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1
resampled_img1 = self.resample2(x[:,3:,:,:], flownets1_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag
concat2 = torch.cat((x, resampled_img1, flownets1_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets2
flownets2_flow2 = self.flownets_2(concat2)[0]
flownets2_flow = self.upsample4(flownets2_flow2 * self.div_flow)
norm_flownets2_flow = self.channelnorm(flownets2_flow)
diff_flownets2_flow = self.resample4(x[:,3:,:,:], flownets2_flow)
# if not diff_flownets2_flow.volatile:
# diff_flownets2_flow.register_hook(save_grad(self.args.grads, 'diff_flownets2_flow'))
diff_flownets2_img1 = self.channelnorm((x[:,:3,:,:]-diff_flownets2_flow))
# if not diff_flownets2_img1.volatile:
# diff_flownets2_img1.register_hook(save_grad(self.args.grads, 'diff_flownets2_img1'))
# flownetsd
flownetsd_flow2 = self.flownets_d(x)[0]
flownetsd_flow = self.upsample3(flownetsd_flow2 / self.div_flow)
norm_flownetsd_flow = self.channelnorm(flownetsd_flow)
diff_flownetsd_flow = self.resample3(x[:,3:,:,:], flownetsd_flow)
# if not diff_flownetsd_flow.volatile:
# diff_flownetsd_flow.register_hook(save_grad(self.args.grads, 'diff_flownetsd_flow'))
diff_flownetsd_img1 = self.channelnorm((x[:,:3,:,:]-diff_flownetsd_flow))
# if not diff_flownetsd_img1.volatile:
# diff_flownetsd_img1.register_hook(save_grad(self.args.grads, 'diff_flownetsd_img1'))
# concat img1 flownetsd, flownets2, norm_flownetsd, norm_flownets2, diff_flownetsd_img1, diff_flownets2_img1
concat3 = torch.cat((x[:,:3,:,:], flownetsd_flow, flownets2_flow, norm_flownetsd_flow, norm_flownets2_flow, diff_flownetsd_img1, diff_flownets2_img1), dim=1)
flownetfusion_flow = self.flownetfusion(concat3)
# if not flownetfusion_flow.volatile:
# flownetfusion_flow.register_hook(save_grad(self.args.grads, 'flownetfusion_flow'))
return flownetfusion_flow
class FlowNet2C(FlowNetC.FlowNetC):
def __init__(self, args, batchNorm=False, div_flow=20):
super(FlowNet2C,self).__init__(args, batchNorm=batchNorm, div_flow=20)
self.rgb_max = args.rgb_max
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
# FlownetC top input stream
out_conv1a = self.conv1(x1)
out_conv2a = self.conv2(out_conv1a)
out_conv3a = self.conv3(out_conv2a)
# FlownetC bottom input stream
out_conv1b = self.conv1(x2)
out_conv2b = self.conv2(out_conv1b)
out_conv3b = self.conv3(out_conv2b)
# Merge streams
out_corr = self.corr(out_conv3a, out_conv3b) # False
out_corr = self.corr_activation(out_corr)
# Redirect top input stream and concatenate
out_conv_redir = self.conv_redir(out_conv3a)
in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1)
# Merged conv layers
out_conv3_1 = self.conv3_1(in_conv3_1)
out_conv4 = self.conv4_1(self.conv4(out_conv3_1))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return self.upsample1(flow2*self.div_flow)
class FlowNet2S(FlowNetS.FlowNetS):
def __init__(self, args, batchNorm=False, div_flow=20):
super(FlowNet2S,self).__init__(args, input_channels = 6, batchNorm=batchNorm)
self.rgb_max = args.rgb_max
self.div_flow = div_flow
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1)
out_conv1 = self.conv1(x)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return self.upsample1(flow2*self.div_flow)
class FlowNet2SD(FlowNetSD.FlowNetSD):
def __init__(self, args, batchNorm=False, div_flow=20):
super(FlowNet2SD,self).__init__(args, batchNorm=batchNorm)
self.rgb_max = args.rgb_max
self.div_flow = div_flow
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1)
out_conv0 = self.conv0(x)
out_conv1 = self.conv1_1(self.conv1(out_conv0))
out_conv2 = self.conv2_1(self.conv2(out_conv1))
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
out_interconv5 = self.inter_conv5(concat5)
flow5 = self.predict_flow5(out_interconv5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
out_interconv4 = self.inter_conv4(concat4)
flow4 = self.predict_flow4(out_interconv4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
out_interconv3 = self.inter_conv3(concat3)
flow3 = self.predict_flow3(out_interconv3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
out_interconv2 = self.inter_conv2(concat2)
flow2 = self.predict_flow2(out_interconv2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return self.upsample1(flow2*self.div_flow)
class FlowNet2CS(nn.Module):
def __init__(self, args, batchNorm=False, div_flow = 20.):
super(FlowNet2CS,self).__init__()
self.batchNorm = batchNorm
self.div_flow = div_flow
self.rgb_max = args.rgb_max
self.args = args
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
if args.fp16:
self.resample1 = nn.Sequential(
tofp32(),
Resample2d(),
tofp16())
else:
self.resample1 = Resample2d()
# Block (FlowNetS1)
self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
# init_deconv_bilinear(m.weight)
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
x = torch.cat((x1,x2), dim = 1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow)
return flownets1_flow
class FlowNet2CSS(nn.Module):
def __init__(self, args, batchNorm=False, div_flow = 20.):
super(FlowNet2CSS,self).__init__()
self.batchNorm = batchNorm
self.div_flow = div_flow
self.rgb_max = args.rgb_max
self.args = args
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
if args.fp16:
self.resample1 = nn.Sequential(
tofp32(),
Resample2d(),
tofp16())
else:
self.resample1 = Resample2d()
# Block (FlowNetS1)
self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
if args.fp16:
self.resample2 = nn.Sequential(
tofp32(),
Resample2d(),
tofp16())
else:
self.resample2 = Resample2d()
# Block (FlowNetS2)
self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest')
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
# init_deconv_bilinear(m.weight)
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
x = torch.cat((x1,x2), dim = 1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow)
# warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1
resampled_img1 = self.resample2(x[:,3:,:,:], flownets1_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag
concat2 = torch.cat((x, resampled_img1, flownets1_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets2
flownets2_flow2 = self.flownets_2(concat2)[0]
flownets2_flow = self.upsample3(flownets2_flow2 * self.div_flow)
return flownets2_flow | 38.251451 | 165 | 0.602043 |
88e5569d75f3f38543cad1824c5bc597eb4ec6fe | 1,336 | py | Python | cwlab/__main__.py | krini-project/CWLab | 7e75cf9c4d00a9defb03802b358d40902f1ffd59 | [
"Apache-2.0"
] | null | null | null | cwlab/__main__.py | krini-project/CWLab | 7e75cf9c4d00a9defb03802b358d40902f1ffd59 | [
"Apache-2.0"
] | null | null | null | cwlab/__main__.py | krini-project/CWLab | 7e75cf9c4d00a9defb03802b358d40902f1ffd59 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import argparse
def main():
parser = argparse.ArgumentParser(
prog="cwlab",
description='CWLab: A platform-agnostic, cloud-ready framework for simplified' + \
' deployment of the Common Workflow Language using a graphical web interface '
)
parser.add_argument(
'--version','-v',
help="Return version of cwlab.",
action='store_true'
)
subparser = parser.add_subparsers(
help="CWLab sub-commands",
dest='subcommand'
)
parser_up = subparser.add_parser(
"up",
help="Start the webserver."
)
parser_up.add_argument(
'-c', '--config',
help="Specify the path to a costum config file."
)
parser_up = subparser.add_parser(
"print_config",
help="Get an example config. Typical usage: cwlab print_config > example_config.yaml"
)
args = parser.parse_args()
if args.version:
from . import __version__
print(f"cwlab {__version__}")
if args.subcommand == "up":
from . import create_app
create_app(config_file=args.config, webapp=True)
elif args.subcommand == "print_config":
from .utils import output_example_config
output_example_config()
if __name__ == "__main__":
main() | 27.265306 | 93 | 0.628743 |
732a71f4e594c1de558c63c9322c0ef78b556ae4 | 13,689 | py | Python | wbb/modules/greetings.py | stylishsuryaa/WilliamButcherBotp | 56f0a49239c4e6daf9e004360b31b92254e5dcb1 | [
"MIT"
] | null | null | null | wbb/modules/greetings.py | stylishsuryaa/WilliamButcherBotp | 56f0a49239c4e6daf9e004360b31b92254e5dcb1 | [
"MIT"
] | null | null | null | wbb/modules/greetings.py | stylishsuryaa/WilliamButcherBotp | 56f0a49239c4e6daf9e004360b31b92254e5dcb1 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import os
from datetime import datetime
from random import shuffle
from pyrogram import filters
from pyrogram.errors.exceptions.bad_request_400 import (ChatAdminRequired,
UserNotParticipant)
from pyrogram.types import (Chat, ChatPermissions, InlineKeyboardButton,
InlineKeyboardMarkup, Message, User)
from wbb import SUDOERS, WELCOME_DELAY_KICK_SEC, app
from wbb.core.decorators.errors import capture_err
from wbb.core.decorators.permissions import adminsOnly
from wbb.core.keyboard import ikb
from wbb.utils.dbfunctions import (captcha_off, captcha_on, del_welcome,
get_captcha_cache, get_welcome,
has_solved_captcha_once, is_captcha_on,
is_gbanned_user, save_captcha_solved,
set_welcome, update_captcha_cache)
from wbb.utils.filter_groups import welcome_captcha_group
from wbb.utils.functions import extract_text_and_keyb, generate_captcha
__MODULE__ = "Greetings"
__HELP__ = """
/captcha [ENABLE|DISABLE] - Enable/Disable captcha.
/set_welcome - Reply this to a message containing correct
format for a welcome message, check end of this message.
/del_welcome - Delete the welcome message.
/get_welcome - Get the welcome message.
**SET_WELCOME ->**
The format should be something like below.
```
**Hi** {name} Welcome to {chat}
~ #This separater (~) should be there between text and buttons, remove this comment also
button=[Duck, https://duckduckgo.com]
button2=[Github, https://github.com]
```
**NOTES ->**
for /rules, you can do /filter rules to a message
containing rules of your groups whenever a user
sends /rules, he'll get the message
"""
answers_dicc = []
loop = asyncio.get_running_loop()
async def get_initial_captcha_cache():
global answers_dicc
answers_dicc = await get_captcha_cache()
return answers_dicc
loop.create_task(get_initial_captcha_cache())
@app.on_message(filters.new_chat_members, group=welcome_captcha_group)
@capture_err
async def welcome(_, message: Message):
global answers_dicc
# Get cached answers from mongodb in case of bot's been restarted or crashed.
answers_dicc = await get_captcha_cache()
# Mute new member and send message with button
if not await is_captcha_on(message.chat.id):
return
for member in message.new_chat_members:
try:
if member.id in SUDOERS:
continue # ignore sudo users
if await is_gbanned_user(member.id):
await message.chat.kick_member(member.id)
await message.reply_text(
f"{member.mention} was globally banned, and got removed,"
+ " if you think this is a false gban, you can appeal"
+ " for this ban in support chat."
)
continue
if member.is_bot:
continue # ignore bots
# Ignore user if he has already solved captcha in this group
# someday
if await has_solved_captcha_once(message.chat.id, member.id):
continue
await message.chat.restrict_member(member.id, ChatPermissions())
text = (
f"{(member.mention())} Are you human?\n"
f"Solve this captcha in {WELCOME_DELAY_KICK_SEC} "
"seconds and 4 attempts or you'll be kicked."
)
except ChatAdminRequired:
return
# Generate a captcha image, answers and some wrong answers
captcha = generate_captcha()
captcha_image = captcha[0]
captcha_answer = captcha[1]
wrong_answers = captcha[2] # This consists of 8 wrong answers
correct_button = InlineKeyboardButton(
f"{captcha_answer}",
callback_data=f"pressed_button {captcha_answer} {member.id}",
)
temp_keyboard_1 = [correct_button] # Button row 1
temp_keyboard_2 = [] # Botton row 2
temp_keyboard_3 = []
for i in range(2):
temp_keyboard_1.append(
InlineKeyboardButton(
f"{wrong_answers[i]}",
callback_data=f"pressed_button {wrong_answers[i]} {member.id}",
)
)
for i in range(2, 5):
temp_keyboard_2.append(
InlineKeyboardButton(
f"{wrong_answers[i]}",
callback_data=f"pressed_button {wrong_answers[i]} {member.id}",
)
)
for i in range(5, 8):
temp_keyboard_3.append(
InlineKeyboardButton(
f"{wrong_answers[i]}",
callback_data=f"pressed_button {wrong_answers[i]} {member.id}",
)
)
shuffle(temp_keyboard_1)
keyboard = [temp_keyboard_1, temp_keyboard_2, temp_keyboard_3]
shuffle(keyboard)
verification_data = {
"chat_id": message.chat.id,
"user_id": member.id,
"answer": captcha_answer,
"keyboard": keyboard,
"attempts": 0,
}
keyboard = InlineKeyboardMarkup(keyboard)
# Append user info, correct answer and
answers_dicc.append(verification_data)
# keyboard for later use with callback query
button_message = await message.reply_photo(
photo=captcha_image,
caption=text,
reply_markup=keyboard,
quote=True,
)
os.remove(captcha_image)
# Save captcha answers etc in mongodb in case bot gets crashed or restarted.
await update_captcha_cache(answers_dicc)
asyncio.create_task(
kick_restricted_after_delay(
WELCOME_DELAY_KICK_SEC, button_message, member
)
)
await asyncio.sleep(0.5)
async def send_welcome_message(chat: Chat, user_id: int):
raw_text = await get_welcome(chat.id)
if not raw_text:
return
text, keyb = extract_text_and_keyb(ikb, raw_text)
if "{chat}" in text:
text = text.replace("{chat}", chat.title)
if "{name}" in text:
text = text.replace("{name}", (await app.get_users(user_id)).mention)
await app.send_message(
chat.id,
text=text,
reply_markup=keyb,
disable_web_page_preview=True,
)
@app.on_callback_query(filters.regex("pressed_button"))
async def callback_query_welcome_button(_, callback_query):
"""After the new member presses the correct button,
set his permissions to chat permissions,
delete button message and join message.
"""
global answers_dicc
data = callback_query.data
pressed_user_id = callback_query.from_user.id
pending_user_id = int(data.split(None, 2)[2])
button_message = callback_query.message
answer = data.split(None, 2)[1]
if len(answers_dicc) != 0:
for i in answers_dicc:
if (
i["user_id"] == pending_user_id
and i["chat_id"] == button_message.chat.id
):
correct_answer = i["answer"]
keyboard = i["keyboard"]
if pending_user_id != pressed_user_id:
return await callback_query.answer("This is not for you")
if answer != correct_answer:
await callback_query.answer("Yeah, It's Wrong.")
for iii in answers_dicc:
if (
iii["user_id"] == pending_user_id
and iii["chat_id"] == button_message.chat.id
):
attempts = iii["attempts"]
if attempts >= 3:
answers_dicc.remove(iii)
await button_message.chat.kick_member(pending_user_id)
await asyncio.sleep(1)
await button_message.chat.unban_member(pending_user_id)
await button_message.delete()
await update_captcha_cache(answers_dicc)
return
iii["attempts"] += 1
break
shuffle(keyboard[0])
shuffle(keyboard[1])
shuffle(keyboard[2])
shuffle(keyboard)
keyboard = InlineKeyboardMarkup(keyboard)
return await button_message.edit(
text=button_message.caption.markdown,
reply_markup=keyboard,
)
await callback_query.answer("Captcha passed successfully!")
await button_message.chat.unban_member(pending_user_id)
await button_message.delete()
if len(answers_dicc) != 0:
for ii in answers_dicc:
if (
ii["user_id"] == pending_user_id
and ii["chat_id"] == button_message.chat.id
):
answers_dicc.remove(ii)
await update_captcha_cache(answers_dicc)
chat = callback_query.message.chat
# Save this verification in db, so we don't have to
# send captcha to this user when he joins again.
await save_captcha_solved(chat.id, pending_user_id)
return await send_welcome_message(chat, pending_user_id)
async def kick_restricted_after_delay(
delay, button_message: Message, user: User
):
"""If the new member is still restricted after the delay, delete
button message and join message and then kick him
"""
global answers_dicc
await asyncio.sleep(delay)
join_message = button_message.reply_to_message
group_chat = button_message.chat
user_id = user.id
await join_message.delete()
await button_message.delete()
if len(answers_dicc) != 0:
for i in answers_dicc:
if i["user_id"] == user_id:
answers_dicc.remove(i)
await update_captcha_cache(answers_dicc)
await _ban_restricted_user_until_date(group_chat, user_id, duration=delay)
async def _ban_restricted_user_until_date(
group_chat, user_id: int, duration: int
):
try:
member = await group_chat.get_member(user_id)
if member.status == "restricted":
until_date = int(datetime.utcnow().timestamp() + duration)
await group_chat.kick_member(user_id, until_date=until_date)
except UserNotParticipant:
pass
@app.on_message(filters.command("captcha") & ~filters.private)
@adminsOnly("can_restrict_members")
async def captcha_state(_, message):
usage = "**Usage:**\n/captcha [ENABLE|DISABLE]"
if len(message.command) != 2:
await message.reply_text(usage)
return
chat_id = message.chat.id
state = message.text.split(None, 1)[1].strip()
state = state.lower()
if state == "enable":
await captcha_on(chat_id)
await message.reply_text("Enabled Captcha For New Users.")
elif state == "disable":
await captcha_off(chat_id)
await message.reply_text("Disabled Captcha For New Users.")
else:
await message.reply_text(usage)
# WELCOME MESSAGE
@app.on_message(filters.command("set_welcome") & ~filters.private)
@adminsOnly("can_change_info")
async def set_welcome_func(_, message):
usage = "You need to reply to a text, check the Greetings module in /help"
if not message.reply_to_message:
await message.reply_text(usage)
return
if not message.reply_to_message.text:
await message.reply_text(usage)
return
chat_id = message.chat.id
raw_text = message.reply_to_message.text.markdown
if not (extract_text_and_keyb(ikb, raw_text)):
return await message.reply_text("Wrong formating, check help section.")
await set_welcome(chat_id, raw_text)
await message.reply_text("Welcome message has been successfully set.")
@app.on_message(filters.command("del_welcome") & ~filters.private)
@adminsOnly("can_change_info")
async def del_welcome_func(_, message):
chat_id = message.chat.id
await del_welcome(chat_id)
await message.reply_text("Welcome message has been deleted.")
@app.on_message(filters.command("get_welcome") & ~filters.private)
@adminsOnly("can_change_info")
async def get_welcome_func(_, message):
chat = message.chat
welcome = await get_welcome(chat.id)
if not welcome:
return await message.reply_text("No welcome message set.")
if not message.from_user:
return await message.reply_text(
"You're anon, can't send welcome message."
)
await send_welcome_message(chat, message.from_user.id)
await message.reply_text(f'`{welcome.replace("`", "")}`')
| 34.568182 | 88 | 0.646797 |
6fc170f289ac14925bc9a39a3675375f2a0fb1ed | 873 | bzl | Python | rules/expand_template/hello.bzl | CyberFlameGO/examples | 87a4812cb23f7e7969d74cc073579fb82540c0f6 | [
"Apache-2.0"
] | 572 | 2015-09-02T20:26:41.000Z | 2022-03-30T07:43:22.000Z | rules/expand_template/hello.bzl | CyberFlameGO/examples | 87a4812cb23f7e7969d74cc073579fb82540c0f6 | [
"Apache-2.0"
] | 158 | 2015-08-31T20:21:50.000Z | 2022-03-20T20:13:14.000Z | rules/expand_template/hello.bzl | CyberFlameGO/examples | 87a4812cb23f7e7969d74cc073579fb82540c0f6 | [
"Apache-2.0"
] | 408 | 2015-08-31T20:05:14.000Z | 2022-03-28T02:36:44.000Z | """Generate a file using a template.
It is much more memory-efficient to use a template file than creating the whole
content during the analysis phase.
"""
# Label of the template file to use.
_TEMPLATE = "//expand_template:hello.cc"
def hello(**kwargs):
_hello(
source_file = "{name}.cc".format(**kwargs),
**kwargs
)
def _hello_impl(ctx):
ctx.actions.expand_template(
template = ctx.file._template,
output = ctx.outputs.source_file,
substitutions = {
"{FIRSTNAME}": ctx.attr.firstname,
},
)
_hello = rule(
implementation = _hello_impl,
attrs = {
"firstname": attr.string(mandatory = True),
"_template": attr.label(
default = Label(_TEMPLATE),
allow_single_file = True,
),
"source_file": attr.output(mandatory = True),
},
)
| 24.25 | 79 | 0.608247 |
a077448c1de73d0fb41bc622fda12dc019930bdf | 8,789 | py | Python | pitop/keyboard/vendor/pynput/keyboard/_win32.py | pi-top/pi-top-Python-SDK | 6c83cc5f612d77f86f8d391c7f2924a28f7b1232 | [
"Apache-2.0"
] | 28 | 2020-11-24T08:02:58.000Z | 2022-02-27T18:37:33.000Z | pitop/keyboard/vendor/pynput/keyboard/_win32.py | pi-top/pi-top-Python-SDK | 6c83cc5f612d77f86f8d391c7f2924a28f7b1232 | [
"Apache-2.0"
] | 263 | 2020-11-10T14:35:10.000Z | 2022-03-31T12:35:13.000Z | pitop/keyboard/vendor/pynput/keyboard/_win32.py | pi-top/pi-top-Python-SDK | 6c83cc5f612d77f86f8d391c7f2924a28f7b1232 | [
"Apache-2.0"
] | 1 | 2022-01-31T22:48:35.000Z | 2022-01-31T22:48:35.000Z | # coding=utf-8
# pynput
# Copyright (C) 2015-2018 Moses Palmér
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The keyboard implementation for *Windows*."""
# pylint: disable=C0111
# The documentation is extracted from the base classes
# pylint: disable=R0903
# We implement stubs
import contextlib
import ctypes
import enum
from ctypes import wintypes
import pynput._util.win32_vks as VK
import six
from pynput._util import AbstractListener
from pynput._util.win32 import (
INPUT,
KEYBDINPUT,
INPUT_union,
KeyTranslator,
ListenerMixin,
SendInput,
SystemHook,
VkKeyScan,
)
from . import _base
class KeyCode(_base.KeyCode):
def _parameters(self, is_press):
"""The parameters to pass to ``SendInput`` to generate this key.
:param bool is_press: Whether to generate a press event.
:return: all arguments to pass to ``SendInput`` for this key
:rtype: dict
"""
if self.vk:
vk = self.vk
scan = 0
flags = 0
else:
res = VkKeyScan(self.char)
if (res >> 8) & 0xFF == 0:
vk = res & 0xFF
scan = 0
flags = 0
else:
vk = 0
scan = ord(self.char)
flags = KEYBDINPUT.UNICODE
return dict(
dwFlags=flags | (KEYBDINPUT.KEYUP if not is_press else 0),
wVk=vk,
wScan=scan,
)
class Key(enum.Enum):
alt = KeyCode.from_vk(VK.MENU)
alt_l = KeyCode.from_vk(VK.LMENU)
alt_r = KeyCode.from_vk(VK.RMENU)
alt_gr = KeyCode.from_vk(VK.RMENU)
backspace = KeyCode.from_vk(VK.BACK)
caps_lock = KeyCode.from_vk(VK.CAPITAL)
cmd = KeyCode.from_vk(VK.LWIN)
cmd_l = KeyCode.from_vk(VK.LWIN)
cmd_r = KeyCode.from_vk(VK.RWIN)
ctrl = KeyCode.from_vk(VK.CONTROL)
ctrl_l = KeyCode.from_vk(VK.LCONTROL)
ctrl_r = KeyCode.from_vk(VK.RCONTROL)
delete = KeyCode.from_vk(VK.DELETE)
down = KeyCode.from_vk(VK.DOWN)
end = KeyCode.from_vk(VK.END)
enter = KeyCode.from_vk(VK.RETURN)
esc = KeyCode.from_vk(VK.ESCAPE)
f1 = KeyCode.from_vk(VK.F1)
f2 = KeyCode.from_vk(VK.F2)
f3 = KeyCode.from_vk(VK.F3)
f4 = KeyCode.from_vk(VK.F4)
f5 = KeyCode.from_vk(VK.F5)
f6 = KeyCode.from_vk(VK.F6)
f7 = KeyCode.from_vk(VK.F7)
f8 = KeyCode.from_vk(VK.F8)
f9 = KeyCode.from_vk(VK.F9)
f10 = KeyCode.from_vk(VK.F10)
f11 = KeyCode.from_vk(VK.F11)
f12 = KeyCode.from_vk(VK.F12)
f13 = KeyCode.from_vk(VK.F13)
f14 = KeyCode.from_vk(VK.F14)
f15 = KeyCode.from_vk(VK.F15)
f16 = KeyCode.from_vk(VK.F16)
f17 = KeyCode.from_vk(VK.F17)
f18 = KeyCode.from_vk(VK.F18)
f19 = KeyCode.from_vk(VK.F19)
f20 = KeyCode.from_vk(VK.F20)
home = KeyCode.from_vk(VK.HOME)
left = KeyCode.from_vk(VK.LEFT)
page_down = KeyCode.from_vk(VK.NEXT)
page_up = KeyCode.from_vk(VK.PRIOR)
right = KeyCode.from_vk(VK.RIGHT)
shift = KeyCode.from_vk(VK.LSHIFT)
shift_l = KeyCode.from_vk(VK.LSHIFT)
shift_r = KeyCode.from_vk(VK.RSHIFT)
space = KeyCode.from_vk(VK.SPACE, char=" ")
tab = KeyCode.from_vk(VK.TAB)
up = KeyCode.from_vk(VK.UP)
insert = KeyCode.from_vk(VK.INSERT)
menu = KeyCode.from_vk(VK.APPS)
num_lock = KeyCode.from_vk(VK.NUMLOCK)
pause = KeyCode.from_vk(VK.PAUSE)
print_screen = KeyCode.from_vk(VK.SNAPSHOT)
scroll_lock = KeyCode.from_vk(VK.SCROLL)
class Controller(_base.Controller):
_KeyCode = KeyCode
_Key = Key
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
def _handle(self, key, is_press):
SendInput(
1,
ctypes.byref(
INPUT(
type=INPUT.KEYBOARD,
value=INPUT_union(ki=KEYBDINPUT(**key._parameters(is_press))),
)
),
ctypes.sizeof(INPUT),
)
class Listener(ListenerMixin, _base.Listener):
#: The Windows hook ID for low level keyboard events, ``WH_KEYBOARD_LL``
_EVENTS = 13
_WM_KEYDOWN = 0x0100
_WM_KEYUP = 0x0101
_WM_SYSKEYDOWN = 0x0104
_WM_SYSKEYUP = 0x0105
# A bit flag attached to messages indicating that the payload is an actual
# UTF-16 character code
_UTF16_FLAG = 0x1000
# A special virtual key code designating unicode characters
_VK_PACKET = 0xE7
#: The messages that correspond to a key press
_PRESS_MESSAGES = (_WM_KEYDOWN, _WM_SYSKEYDOWN)
#: The messages that correspond to a key release
_RELEASE_MESSAGES = (_WM_KEYUP, _WM_SYSKEYUP)
#: A mapping from keysym to special key
_SPECIAL_KEYS = {key.value.vk: key for key in Key}
_HANDLED_EXCEPTIONS = (SystemHook.SuppressException,)
class _KBDLLHOOKSTRUCT(ctypes.Structure):
"""Contains information about a mouse event passed to a
``WH_KEYBOARD_LL`` hook procedure, ``LowLevelKeyboardProc``."""
_fields_ = [
("vkCode", wintypes.DWORD),
("scanCode", wintypes.DWORD),
("flags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", ctypes.c_void_p),
]
#: A pointer to a :class:`KBDLLHOOKSTRUCT`
_LPKBDLLHOOKSTRUCT = ctypes.POINTER(_KBDLLHOOKSTRUCT)
def __init__(self, *args, **kwargs):
super(Listener, self).__init__(*args, **kwargs)
self._translator = KeyTranslator()
self._event_filter = self._options.get("event_filter", lambda msg, data: True)
def _convert(self, code, msg, lpdata):
if code != SystemHook.HC_ACTION:
return
data = ctypes.cast(lpdata, self._LPKBDLLHOOKSTRUCT).contents
is_packet = data.vkCode == self._VK_PACKET
# Suppress further propagation of the event if it is filtered
if self._event_filter(msg, data) is False:
return None
elif is_packet:
return (msg | self._UTF16_FLAG, data.scanCode)
else:
return (msg, data.vkCode)
@AbstractListener._emitter
def _process(self, wparam, lparam):
msg = wparam
vk = lparam
# If the key has the UTF-16 flag, we treat it as a unicode character,
# otherwise convert the event to a KeyCode; this may fail, and in that
# case we pass None
is_utf16 = msg & self._UTF16_FLAG
if is_utf16:
msg = msg ^ self._UTF16_FLAG
scan = vk
key = KeyCode.from_char(six.unichr(scan))
else:
try:
key = self._event_to_key(msg, vk)
except OSError:
key = None
if msg in self._PRESS_MESSAGES:
self.on_press(key)
elif msg in self._RELEASE_MESSAGES:
self.on_release(key)
# pylint: disable=R0201
@contextlib.contextmanager
def _receive(self):
"""An empty context manager; we do not need to fake keyboard events."""
yield
# pylint: enable=R0201
def _event_to_key(self, msg, vk):
"""Converts an :class:`_KBDLLHOOKSTRUCT` to a :class:`KeyCode`.
:param msg: The message received.
:param vk: The virtual key code to convert.
:return: a :class:`pynput.keyboard.KeyCode`
:raises OSError: if the message and data could not be converted
"""
# We must always call self._translate to keep the keyboard state up to
# date
key = KeyCode(**self._translate(vk, msg in self._PRESS_MESSAGES))
# If the virtual key code corresponds to a Key value, we prefer that
if vk in self._SPECIAL_KEYS:
return self._SPECIAL_KEYS[vk]
else:
return key
def _translate(self, vk, is_press):
"""Translates a virtual key code to a parameter list passable to
:class:`pynput.keyboard.KeyCode`.
:param int vk: The virtual key code.
:param bool is_press: Whether this is a press event.
:return: a paramter list to the :class:`pynput.keyboard.KeyCode`
constructor
"""
return self._translator(vk, is_press)
| 30.947183 | 86 | 0.631926 |
1ce576985b5ba0605d70bc72bd302c1c63879004 | 602 | py | Python | test/test_analysis.py | EvilPsyCHo/Deep-Time-Series-Prediction | f6a6da060bb3f7d07f2a61967ee6007e9821064e | [
"Apache-2.0"
] | 334 | 2019-11-01T01:39:18.000Z | 2022-03-31T08:10:17.000Z | test/test_analysis.py | luxixiang/Deep-Time-Series-Prediction | f6a6da060bb3f7d07f2a61967ee6007e9821064e | [
"Apache-2.0"
] | 8 | 2019-12-30T08:01:32.000Z | 2021-12-06T05:27:29.000Z | test/test_analysis.py | luxixiang/Deep-Time-Series-Prediction | f6a6da060bb3f7d07f2a61967ee6007e9821064e | [
"Apache-2.0"
] | 57 | 2020-01-13T13:20:15.000Z | 2022-03-31T08:10:20.000Z | # encoding: utf-8
"""
@author : zhirui zhou
@contact: evilpsycho42@gmail.com
@time : 2020/4/27 15:11
"""
import pytest
from deepseries.analysis import SeriesAnalysisModel
import numpy as np
def test_analysis_model():
x = np.random.rand(4, 500) + 1e-3
x[0][0] = np.nan
x[0][1] = 0
model = SeriesAnalysisModel(x)
model.plot_valid()
model.get_trend(365).plot_trend()
model.plot_trend(0)
model.get_autocorr(np.arange(1, 300)).plot_autocorr()
assert model.mask.sum() == 2
assert model.valid_lens[0] == 498
if __name__ == "__main__":
test_analysis_model()
| 20.758621 | 57 | 0.671096 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.