hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf57eccff8c0900e10bd7343f2504c637582180 | 2,408 | py | Python | pyhamtools/dxcluster.py | Falk2704/pyhamtools | 2d8c2b978746cf0dd7cfab206a5de7367c7aa922 | [
"MIT"
] | null | null | null | pyhamtools/dxcluster.py | Falk2704/pyhamtools | 2d8c2b978746cf0dd7cfab206a5de7367c7aa922 | [
"MIT"
] | null | null | null | pyhamtools/dxcluster.py | Falk2704/pyhamtools | 2d8c2b978746cf0dd7cfab206a5de7367c7aa922 | [
"MIT"
] | null | null | null | __author__ = 'dh1tw'
import re
import pytz
from datetime import datetime
from time import strptime, mktime
from pyhamtools.consts import LookupConventions as const
UTC = pytz.UTC
def decode_char_spot(raw_string):
"""Chop Line from DX-Cluster into pieces and return a dict with the spot data"""
data = {}
# Spotter callsign
if re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]):
data[const.SPOTTER] = re.sub(':', '', re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]).group(0))
else:
raise ValueError
if re.search('[0-9\.]{5,12}', raw_string[10:25]):
data[const.FREQUENCY] = float(re.search('[0-9\.]{5,12}', raw_string[10:25]).group(0))
else:
raise ValueError
data[const.DX] = re.sub('[^A-Za-z0-9\/]+', '', raw_string[26:38])
data[const.COMMENT] = re.sub('[^\sA-Za-z0-9\.,;\#\+\-!\?\$\(\)@\/]+', ' ', raw_string[39:69]).strip()
data[const.TIME] = datetime.now().replace(tzinfo=UTC)
return data
def decode_pc11_message(raw_string):
"""Decode PC11 message, which usually contains DX Spots"""
data = {}
spot = raw_string.split("^")
data[const.FREQUENCY] = float(spot[1])
data[const.DX] = spot[2]
data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H%M")))
data[const.COMMENT] = spot[5]
data[const.SPOTTER] = spot[6]
data["node"] = spot[7]
data["raw_spot"] = raw_string
return data
def decode_pc61_message(raw_string):
"""Decode PC61 message, which usually contains DX Spots"""
data = {}
spot = raw_string.split("^")
data[const.FREQUENCY] = float(spot[1])
data[const.DX] = spot[2]
data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H%M")))
data[const.COMMENT] = spot[5]
data[const.SPOTTER] = spot[6]
data["node"] = spot[7]
data["ip"] = spot[8]
data["raw_spot"] = raw_string
return data
def decode_pc23_message(raw_string):
""" Decode PC23 Message which usually contains WCY """
data = {}
wcy = raw_string.split("^")
data[const.R] = int(wcy[1])
data[const.expk] = int(wcy[2])
data[const.CALLSIGN] = wcy[3]
data[const.A] = wcy[4]
data[const.SFI] = wcy[5]
data[const.K] = wcy[6]
data[const.AURORA] = wcy[7]
data["node"] = wcy[7]
data["ip"] = wcy[8]
data["raw_data"] = raw_string
return data
| 29.365854 | 106 | 0.606312 |
acf5800e9b5368f78a3d40605eb6e6d5ebad9a0d | 5,996 | py | Python | tests/unit/h_matchers/matcher/web/request_test.py | hypothesis/h-matcher | 40a9a5577c33295f3ce651338df05a6814554568 | [
"BSD-2-Clause"
] | null | null | null | tests/unit/h_matchers/matcher/web/request_test.py | hypothesis/h-matcher | 40a9a5577c33295f3ce651338df05a6814554568 | [
"BSD-2-Clause"
] | 14 | 2019-10-31T17:24:09.000Z | 2021-09-10T14:08:23.000Z | tests/unit/h_matchers/matcher/web/request_test.py | hypothesis/h-matchers | 2544f2de107585f3964137d497a2349b689b1816 | [
"BSD-2-Clause"
] | null | null | null | from urllib.parse import urlparse
import pytest
from pyramid.request import Request as PyramidRequest
from pyramid.testing import DummyRequest
from pytest import param
from requests import PreparedRequest, Request
from h_matchers import Any
from h_matchers.matcher.collection import AnyMapping
from h_matchers.matcher.web.request import AnyRequest
# We have a lot of fixtures going on in this file
# pylint: disable=too-many-arguments
class TestAnyRequest:
@pytest.mark.parametrize(
"request_method,matcher_method,matches",
(
("POST", None, True),
("GET", None, True),
("POST", "POST", True),
("GET", "GET", True),
("PoSt", "pOsT", True),
("POST", "GET", False),
("POST", Any.string(), True),
("POST", Any.string.containing("OS"), True),
("GET", Any.string.containing("OS"), False),
),
)
def test_can_match_method(
self, request_method, matcher_method, matches, make_request, is_fluent
):
request = make_request(method=request_method)
matcher = (
AnyRequest.with_method(matcher_method)
if is_fluent
else AnyRequest(method=matcher_method)
)
assert (request == matcher) is matches
@pytest.mark.parametrize(
"request_url,matcher_url,matches",
(
("http://example.com/", None, True),
("http://example.com/", "http://example.com", True),
("http://EXAMPLE.COM", "http://example.com/", True),
("http://example.com/path", "http://example.com/path", True),
("http://not.example.com/", "http://example.com/", False),
("http://example.com/", Any.url(host="example.com"), True),
("http://not.example.com/", Any.url(host="example.com"), False),
),
)
def test_can_match_url(
self, request_url, matcher_url, matches, make_request, is_fluent
):
request = make_request(url=request_url)
matcher = (
AnyRequest.with_url(matcher_url)
if is_fluent
else AnyRequest(url=matcher_url)
)
assert (request == matcher) is matches
@pytest.mark.parametrize(
"header_matcher,matches",
(
param({"A": "a", "B": "b", "C": "c"}, False, id="Super set"),
param({"A": "a", "B": "b"}, True, id="Exact match"),
param({"A": "a"}, False, id="Sub set"),
param(AnyMapping(), True, id="Matcher"),
(None, True),
),
)
def test_can_match_headers(self, make_request, header_matcher, matches, is_fluent):
headers = {"A": "a", "B": "b"}
request = make_request(headers=headers)
if is_fluent:
matcher = AnyRequest.with_headers(header_matcher)
else:
matcher = AnyRequest(headers=header_matcher)
assert (request == matcher) is matches
@pytest.mark.parametrize(
"header_matcher,matches",
(
param({"A": "a", "B": "b", "C": "c"}, False, id="Super set"),
param({"A": "a", "B": "b"}, True, id="Exact match"),
param({"A": "a"}, True, id="Sub set"),
param(AnyMapping(), True, id="Matcher"),
(None, True),
),
)
def test_can_match_with_a_subset_of_headers(
self, make_request, header_matcher, matches
):
headers = {"A": "a", "B": "b"}
request = make_request(headers=headers)
matcher = AnyRequest.containing_headers(header_matcher)
assert (request == matcher) is matches
@pytest.fixture
def make_request(self, request_class, default_params):
def make_request(**params):
params = dict(default_params, **params)
return RequestBuilder.build(class_=request_class, params=params)
return make_request
@pytest.fixture
def default_params(self):
return {"url": "http://example.com/", "method": "GET", "headers": {}}
@pytest.fixture(
params=(
param(Request, id="requests.Request"),
param(PreparedRequest, id="requests.PreparedRequest"),
param(PyramidRequest, id="pyramid.request.Request"),
param(DummyRequest, id="pyramid.testing.DummyRequest"),
)
)
def request_class(self, request):
# Note that `request` in this context is a pytest thing
return request.param
@pytest.fixture(params=[True, False], ids=["fluent", "init"])
def is_fluent(self, request):
return request.param
class RequestBuilder:
@classmethod
def build(cls, class_, params):
# Requests objects
if issubclass(class_, Request):
return Request(**params)
if issubclass(class_, PreparedRequest):
return Request(**params).prepare()
# Pyramid objects
environ = cls._make_environ(**params)
if issubclass(class_, PyramidRequest):
return PyramidRequest(environ)
if issubclass(class_, DummyRequest):
request = DummyRequest(
environ, url=params["url"], headers=params["headers"]
)
request.method = params["method"]
return request
raise NotImplementedError(
f"Don't know how to build '{class_}'"
) # pragma: no cover
@classmethod
def _make_environ(cls, method, url, headers):
# https://www.python.org/dev/peps/pep-0333/#environ-variables
url_parts = urlparse(url)
environ = {
"SERVER_PROTOCOL": "HTTP/1.0",
"wsgi.url_scheme": url_parts.scheme,
"REQUEST_METHOD": method,
"HTTP_HOST": url_parts.hostname,
"PATH_INFO": url_parts.path,
"QUERY_STRING": url_parts.query,
}
for key, value in headers.items():
environ[f"HTTP_{key.upper()}"] = value
return environ
| 31.893617 | 87 | 0.577552 |
acf58016afb3f81a665582d69fba4896d9f13b0e | 4,121 | py | Python | tests/test_version.py | facelessuser/coloraide | c273cb652f75941b95ad8ddc8becc9873b97f085 | [
"MIT"
] | 30 | 2020-10-11T05:47:51.000Z | 2022-03-22T06:05:33.000Z | tests/test_version.py | nisancigokmen/coloraide | 2707323cbd440e8e75fd58dd4092a5d036f07bd6 | [
"MIT"
] | 139 | 2020-10-20T15:28:57.000Z | 2022-03-31T23:44:18.000Z | tests/test_version.py | nisancigokmen/coloraide | 2707323cbd440e8e75fd58dd4092a5d036f07bd6 | [
"MIT"
] | 3 | 2021-08-29T13:25:12.000Z | 2021-12-22T19:58:11.000Z | """Version tests."""
import unittest
from coloraide.__meta__ import Version, parse_version
class TestVersion(unittest.TestCase):
"""Test versions."""
def test_version_output(self):
"""Test that versions generate proper strings."""
assert Version(1, 0, 0, "final")._get_canonical() == "1.0"
assert Version(1, 2, 0, "final")._get_canonical() == "1.2"
assert Version(1, 2, 3, "final")._get_canonical() == "1.2.3"
assert Version(1, 2, 0, "alpha", pre=4)._get_canonical() == "1.2a4"
assert Version(1, 2, 0, "beta", pre=4)._get_canonical() == "1.2b4"
assert Version(1, 2, 0, "candidate", pre=4)._get_canonical() == "1.2rc4"
assert Version(1, 2, 0, "final", post=1)._get_canonical() == "1.2.post1"
assert Version(1, 2, 3, ".dev-alpha", pre=1)._get_canonical() == "1.2.3a1.dev0"
assert Version(1, 2, 3, ".dev")._get_canonical() == "1.2.3.dev0"
assert Version(1, 2, 3, ".dev", dev=1)._get_canonical() == "1.2.3.dev1"
assert Version(0, 1, 0, "final")._get_canonical() == "0.1.0"
def test_version_comparison(self):
"""Test that versions compare proper."""
assert Version(1, 0, 0, "final") < Version(1, 2, 0, "final")
assert Version(1, 2, 0, "alpha", pre=4) < Version(1, 2, 0, "final")
assert Version(1, 2, 0, "final") < Version(1, 2, 0, "final", post=1)
assert Version(1, 2, 3, ".dev-beta", pre=2) < Version(1, 2, 3, "beta", pre=2)
assert Version(1, 2, 3, ".dev") < Version(1, 2, 3, ".dev-beta", pre=2)
assert Version(1, 2, 3, ".dev") < Version(1, 2, 3, ".dev", dev=1)
def test_version_parsing(self):
"""Test version parsing."""
assert parse_version(
Version(1, 0, 0, "final")._get_canonical()
) == Version(1, 0, 0, "final")
assert parse_version(
Version(1, 2, 0, "final")._get_canonical()
) == Version(1, 2, 0, "final")
assert parse_version(
Version(1, 2, 3, "final")._get_canonical()
) == Version(1, 2, 3, "final")
assert parse_version(
Version(1, 2, 0, "alpha", pre=4)._get_canonical()
) == Version(1, 2, 0, "alpha", pre=4)
assert parse_version(
Version(1, 2, 0, "beta", pre=4)._get_canonical()
) == Version(1, 2, 0, "beta", pre=4)
assert parse_version(
Version(1, 2, 0, "candidate", pre=4)._get_canonical()
) == Version(1, 2, 0, "candidate", pre=4)
assert parse_version(
Version(1, 2, 0, "final", post=1)._get_canonical()
) == Version(1, 2, 0, "final", post=1)
assert parse_version(
Version(1, 2, 3, ".dev-alpha", pre=1)._get_canonical()
) == Version(1, 2, 3, ".dev-alpha", pre=1)
assert parse_version(
Version(1, 2, 3, ".dev")._get_canonical()
) == Version(1, 2, 3, ".dev")
assert parse_version(
Version(1, 2, 3, ".dev", dev=1)._get_canonical()
) == Version(1, 2, 3, ".dev", dev=1)
def test_asserts(self):
"""Test asserts."""
with self.assertRaises(ValueError):
Version("1", "2", "3")
with self.assertRaises(ValueError):
Version(1, 2, 3, 1)
with self.assertRaises(ValueError):
Version("1", "2", "3")
with self.assertRaises(ValueError):
Version(1, 2, 3, "bad")
with self.assertRaises(ValueError):
Version(1, 2, 3, "alpha")
with self.assertRaises(ValueError):
Version(1, 2, 3, "alpha", pre=1, dev=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, "alpha", pre=1, post=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, ".dev-alpha")
with self.assertRaises(ValueError):
Version(1, 2, 3, ".dev-alpha", pre=1, post=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, pre=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, dev=1)
with self.assertRaises(ValueError):
parse_version('bad&version')
| 43.378947 | 87 | 0.549139 |
acf5818786246b9365ebf7fff1c27c45fe29d1ec | 4,983 | py | Python | densenet/models/one_d.py | rikuhuttunen/densenet_1d | fb7660fca9de7cd326d7e7201a1b64a1325baed9 | [
"MIT"
] | 1 | 2021-01-20T07:13:23.000Z | 2021-01-20T07:13:23.000Z | densenet/models/one_d.py | rikuhuttunen/densenet_1d | fb7660fca9de7cd326d7e7201a1b64a1325baed9 | [
"MIT"
] | null | null | null | densenet/models/one_d.py | rikuhuttunen/densenet_1d | fb7660fca9de7cd326d7e7201a1b64a1325baed9 | [
"MIT"
] | null | null | null | """
models/one_d.py
Author: Ankit Gupta
Implementations of the core DenseNet model
This module contains helper functions that define a DenseNet computational graph in Keras.
Note that these functions are not immediately usable for classification, as the outputs
are not softmaxed, and the functions have not been wrapped in keras.models.Model objects.
"""
from tensorflow.keras.layers import Conv1D, BatchNormalization, Activation, MaxPooling1D, GlobalAveragePooling1D
from densenet.blocks.one_d import dense_block, transition_block, squeeze_excite_block
def DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling,
se=False):
def f(x):
x = Conv1D(
initial_filters,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling1D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
if se:
x = squeeze_excite_block(x)
# Add all but the last dense block
for block_size in block_sizes[:-1]:
x = dense_block(
k,
block_size,
conv_kernel_width,
bottleneck_size,
se=se)(x)
x = transition_block(
pool_size=transition_pool_size,
stride=transition_pool_stride,
theta=theta,
se=se)(x)
# Add the last dense block
final_block_size = block_sizes[-1]
x = dense_block(
k,
final_block_size,
conv_kernel_width,
bottleneck_size)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
if use_global_pooling:
x = GlobalAveragePooling1D()(x)
return x
return f
def DenseNet121(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 24, 16]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
def DenseNet169(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 32, 32]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
def DenseNet201(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 48, 32]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
def DenseNet264(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 64, 48]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
| 25.953125 | 112 | 0.598836 |
acf582f0a75fe2c7d33d76483d7fa8b7b0c5a51a | 1,074 | py | Python | selfservice-api/src/selfservice_api/schemas/__init__.py | bcgov/BCSC-BPS | 3bfe09c100a0f5b98d61228324336d5f45ad93ad | [
"Apache-2.0"
] | 2 | 2020-07-03T18:18:34.000Z | 2021-03-08T10:25:50.000Z | selfservice-api/src/selfservice_api/schemas/__init__.py | bcgov/BCSC-BPS | 3bfe09c100a0f5b98d61228324336d5f45ad93ad | [
"Apache-2.0"
] | 312 | 2020-01-10T23:00:08.000Z | 2022-03-29T22:07:00.000Z | selfservice-api/src/selfservice_api/schemas/__init__.py | bcgov/BCSC-BPS | 3bfe09c100a0f5b98d61228324336d5f45ad93ad | [
"Apache-2.0"
] | 2 | 2020-03-26T05:10:20.000Z | 2021-02-05T19:22:56.000Z | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This exports all of the schemas used by the application."""
from .contact_us import ContactUsSchema
from .oidc_config import OIDCConfigSchema
from .project import ProjectSchema
from .team import TeamSchema
from .technical_req import (TechnicalReqPackageSchema, TechnicalReqRequestSchema, # noqa: I001
TechnicalReqResponseSchema, TechnicalReqTestAccountSchema) # noqa: I001
from .test_account import TestAccountSchema
from .user import UserSchema
| 44.75 | 100 | 0.775605 |
acf58317b81c4353eb906fd6026d7f9778c9af95 | 648 | py | Python | examples/user-api-django-app/api/views.py | stphivos/bg-kube | 3936c2899f76b2f58df7a5f38fac741c638562cf | [
"MIT"
] | 8 | 2017-11-24T12:07:02.000Z | 2020-04-27T03:27:58.000Z | examples/user-api-django-app/api/views.py | stphivos/bg-kube | 3936c2899f76b2f58df7a5f38fac741c638562cf | [
"MIT"
] | 19 | 2017-11-14T23:35:31.000Z | 2022-03-08T22:50:02.000Z | examples/user-api-django-app/api/views.py | stphivos/bg-kube | 3936c2899f76b2f58df7a5f38fac741c638562cf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.http import HttpResponse
from rest_framework import viewsets
from .serializers import UserSerializer
from .permissions import IsAuthenticatedOrCreateOnly
def ready(_):
return HttpResponse('OK')
def live(_):
return HttpResponse('OK')
class UserViewSet(viewsets.ModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
http_method_names = ['get', 'post', 'patch']
permission_classes = [IsAuthenticatedOrCreateOnly]
def get_object(self):
return self.request.user
| 22.344828 | 54 | 0.751543 |
acf58434f673ca00331d1181eb2c594f2bb0e884 | 2,857 | py | Python | tests/core/action_test.py | analogue/Tron | 9e09f8e76ab9f9aea61501c4d20bccef37d5bf0b | [
"Apache-2.0"
] | null | null | null | tests/core/action_test.py | analogue/Tron | 9e09f8e76ab9f9aea61501c4d20bccef37d5bf0b | [
"Apache-2.0"
] | null | null | null | tests/core/action_test.py | analogue/Tron | 9e09f8e76ab9f9aea61501c4d20bccef37d5bf0b | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from tron.config.schema import ConfigAction
from tron.config.schema import ConfigConstraint
from tron.config.schema import ConfigParameter
from tron.config.schema import ConfigVolume
from tron.core.action import Action
class TestAction:
@pytest.mark.parametrize('disk', [600., None])
def test_from_config_full(self, disk):
config = ConfigAction(
name="ted",
command="do something",
node="first",
executor="ssh",
cpus=1,
mem=100,
disk=disk, # default: 1024.0
constraints=[
ConfigConstraint(
attribute='pool',
operator='LIKE',
value='default',
),
],
docker_image='fake-docker.com:400/image',
docker_parameters=[
ConfigParameter(
key='test',
value=123,
),
],
env={'TESTING': 'true'},
extra_volumes=[
ConfigVolume(
host_path='/tmp',
container_path='/nail/tmp',
mode='RO',
),
],
trigger_downstreams=True,
triggered_by=["foo.bar"],
)
new_action = Action.from_config(config)
assert new_action.name == config.name
assert new_action.command == config.command
assert new_action.node_pool is None
assert new_action.executor == config.executor
assert new_action.cpus == config.cpus
assert new_action.mem == config.mem
assert new_action.disk == (600. if disk else 1024.)
assert new_action.constraints == {('pool', 'LIKE', 'default')}
assert new_action.docker_image == config.docker_image
assert new_action.docker_parameters == {('test', 123)}
assert new_action.env == config.env
assert new_action.extra_volumes == {('/nail/tmp', '/tmp', 'RO')}
assert new_action.trigger_downstreams is True
assert new_action.triggered_by == ['foo.bar']
def test_from_config_none_values(self):
config = ConfigAction(
name="ted",
command="do something",
node="first",
executor="ssh",
)
new_action = Action.from_config(config)
assert new_action.name == config.name
assert new_action.command == config.command
assert new_action.executor == config.executor
assert new_action.constraints == set()
assert new_action.docker_image is None
assert new_action.docker_parameters == set()
assert new_action.env == {}
assert new_action.extra_volumes == set()
| 35.271605 | 72 | 0.570179 |
acf58443136d4c96f86334cec6054185351f8563 | 6,857 | py | Python | toolkit/radar_preprocess/util/mmwave_bag.py | MAPS-Lab/OdomBydVision | a6e1a91dce0001bd377080ad228529f6184b7130 | [
"MIT"
] | null | null | null | toolkit/radar_preprocess/util/mmwave_bag.py | MAPS-Lab/OdomBydVision | a6e1a91dce0001bd377080ad228529f6184b7130 | [
"MIT"
] | null | null | null | toolkit/radar_preprocess/util/mmwave_bag.py | MAPS-Lab/OdomBydVision | a6e1a91dce0001bd377080ad228529f6184b7130 | [
"MIT"
] | null | null | null | import struct
import csv
import numpy as np
import pandas as pd
def make_frames_from_csv(csv_path):
readings_dict = dict()
with open(csv_path, 'r') as input_file:
reader = csv.reader(input_file)
next(reader)
for row in reader:
pts = list()
# add timestamp
timestamp = row[0] # timestamp = row[4] + row[5].zfill(9)
# parsing
try:
offset_col = row[37]
except:
offset_col = row[29]
pt_cloud = np.fromstring(offset_col[1:-1], dtype=int, sep=',')
for i in range(0, int(len(pt_cloud) / 32)):
point = list()
# x
tmp = struct.pack('4B', int(pt_cloud[32 * i]), int(pt_cloud[32 * i + 1]), int(pt_cloud[32 * i + 2]),
int(pt_cloud[32 * i + 3]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# y
tmp = struct.pack('4B', int(pt_cloud[32 * i + 4]), int(pt_cloud[32 * i + 5]), int(pt_cloud[32 * i + 6]),
int(pt_cloud[32 * i + 7]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# z
tmp = struct.pack('4B', int(pt_cloud[32 * i + 8]), int(pt_cloud[32 * i + 9]),
int(pt_cloud[32 * i + 10]),
int(pt_cloud[32 * i + 11]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# intensity
tmp = struct.pack('4B', int(pt_cloud[32 * i + 16]), int(pt_cloud[32 * i + 17]),
int(pt_cloud[32 * i + 18]),
int(pt_cloud[32 * i + 19]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# range
tmp = struct.pack('4B', int(pt_cloud[32 * i + 20]), int(pt_cloud[32 * i + 21]),
int(pt_cloud[32 * i + 22]),
int(pt_cloud[32 * i + 23]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# doppler
tmp = struct.pack('4B', int(pt_cloud[32 * i + 24]), int(pt_cloud[32 * i + 25]),
int(pt_cloud[32 * i + 26]),
int(pt_cloud[32 * i + 27]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
pts.append(point)
readings_dict[timestamp] = pts
return readings_dict
def make_frames_from_csv_doppler(csv_path):
doppler_csv_path = csv_path.replace('.csv', '_scan.csv')
print(doppler_csv_path)
vel_data = pd.read_csv(doppler_csv_path)
# print(vel_data.head())
vel_idx = 0
readings_dict = dict()
with open(csv_path, 'r') as input_file:
reader = csv.reader(input_file)
next(reader)
for row in reader:
pts = list()
# add timestamp
timestamp = row[0] # timestamp = row[4] + row[5].zfill(9)
# parsing
try:
offset_col = row[37]
except:
offset_col = row[29]
pt_cloud = np.fromstring(offset_col[1:-1], dtype=int, sep=',')
row_without_velocity = 0
for i in range(0, int(len(pt_cloud) / 32)):
point = list()
# x
tmp = struct.pack('4B', int(pt_cloud[32 * i]), int(pt_cloud[32 * i + 1]), int(pt_cloud[32 * i + 2]),
int(pt_cloud[32 * i + 3]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# y
tmp = struct.pack('4B', int(pt_cloud[32 * i + 4]), int(pt_cloud[32 * i + 5]), int(pt_cloud[32 * i + 6]),
int(pt_cloud[32 * i + 7]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# z
tmp = struct.pack('4B', int(pt_cloud[32 * i + 8]), int(pt_cloud[32 * i + 9]),
int(pt_cloud[32 * i + 10]),
int(pt_cloud[32 * i + 11]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# intensity
tmp = struct.pack('4B', int(pt_cloud[32 * i + 16]), int(pt_cloud[32 * i + 17]),
int(pt_cloud[32 * i + 18]),
int(pt_cloud[32 * i + 19]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# range
tmp = struct.pack('4B', int(pt_cloud[32 * i + 20]), int(pt_cloud[32 * i + 21]),
int(pt_cloud[32 * i + 22]),
int(pt_cloud[32 * i + 23]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
# doppler
tmp = struct.pack('4B', int(pt_cloud[32 * i + 24]), int(pt_cloud[32 * i + 25]),
int(pt_cloud[32 * i + 26]),
int(pt_cloud[32 * i + 27]))
tempf = struct.unpack('1f', tmp)
point.append(tempf[0])
if row_without_velocity == 1:
continue
if vel_idx == vel_data.shape[0]:
return readings_dict
if abs(point[0] - vel_data.loc[vel_idx]['x']) < 1e-10 and abs(
point[1] - vel_data.loc[vel_idx]['y']) < 1e-10 and abs(
point[2] - vel_data.loc[vel_idx]['z']) < 1e-10:
point[5] = vel_data.loc[vel_idx]['velocity']
vel_idx = vel_idx + 1
else:
find_flag = 0
while vel_idx < vel_data.shape[0] - 1:
vel_idx = vel_idx + 1
if abs(point[0] - vel_data.loc[vel_idx]['x']) < 1e-10 and abs(
point[1] - vel_data.loc[vel_idx]['y']) < 1e-10 and abs(
point[2] - vel_data.loc[vel_idx]['z']) < 1e-10:
point[5] = vel_data.loc[vel_idx]['velocity']
vel_idx = vel_idx + 1
find_flag = 1
break
if find_flag == 0:
vel_idx = 0
row_without_velocity = 1
continue
pts.append(point)
readings_dict[timestamp] = pts
return readings_dict
| 43.675159 | 120 | 0.41855 |
acf5849f8a8376f9d73386d5ae403cfca449d3ac | 74 | py | Python | test.py | AlexLamson/novella | ad856dac218fa1e964bc92dd25940f22a0fd0e14 | [
"MIT"
] | null | null | null | test.py | AlexLamson/novella | ad856dac218fa1e964bc92dd25940f22a0fd0e14 | [
"MIT"
] | null | null | null | test.py | AlexLamson/novella | ad856dac218fa1e964bc92dd25940f22a0fd0e14 | [
"MIT"
] | null | null | null | import smarter_responder as r
def i(sentence):
return r.inputs(sentence) | 18.5 | 29 | 0.797297 |
acf584d4334555e41ad492ed29794db0e034a238 | 4,040 | py | Python | lib/irc_connect.py | kjv13/Twitch-Migration-Tracker | c2f34941382f89fabb6530339fd50b272d3c6572 | [
"MIT"
] | null | null | null | lib/irc_connect.py | kjv13/Twitch-Migration-Tracker | c2f34941382f89fabb6530339fd50b272d3c6572 | [
"MIT"
] | null | null | null | lib/irc_connect.py | kjv13/Twitch-Migration-Tracker | c2f34941382f89fabb6530339fd50b272d3c6572 | [
"MIT"
] | null | null | null | import pdb
import sys
import os
import socket
import configparser
# Constants
SERVER = 'irc.chat.twitch.tv'
PORT = 6667
NICKNAME = 'mroseman'
PASSWORD = ''
BUFFER_SIZE = 2048
class IRCBadMessage(Exception):
pass
class IRCConnection:
"""
Used to connect to the twitch irc server and get messages, etc from
different channels
"""
current_dir = os.path.dirname(__file__)
config_rel_path = '../config/irc.cfg'
config_abs_path = os.path.join(current_dir, config_rel_path)
section_name = 'Connection Authentication'
def __init__(self):
config = configparser.ConfigParser()
config.read(self.config_abs_path)
try:
PASSWORD = config[self.section_name]['oauth']
except Exception as e:
print('one of the options in the config file has no value\n{0}:' +
'{1}').format(e.errno, e.strerror)
sys.exit()
self.IRC = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.IRC.connect((SERVER, PORT))
self._send_data('PASS %s' % PASSWORD)
self._send_data('NICK %s' % NICKNAME)
self._send_data('CAP REQ :twitch.tv/membership')
def _send_data(self, command):
"""
sends the given command to the IRC server
"""
self.IRC.send(bytes(command + '\r\n', 'UTF-8'))
def _parse_line(self, line):
"""
takes an irc message and parses it into prefix, command and args
@return: (prefix, command, args)
"""
prefix = ''
if not line:
raise IRCBadMessage("Empty line.")
if line[0] == ':':
try:
prefix, line = line[1:].split(' ', 1)
except ValueError as e:
print(line)
raise e
if line.find(' :') != -1:
line, trailing = line.split(' ', 1)
args = line.split()
args.append(trailing)
else:
args = line.split()
command = args.pop(0)
if not command or not args:
raise IRCBadMessage('Improperly formatted line: {0}'.format(line))
return prefix, command, args
def get_channel_users(self, channel):
"""
gets a list of users from the IRC NAMES command
@return: a set of users from the irc (may be just OPs)
"""
# clear the IRC buffer
temp = self.IRC.recv(BUFFER_SIZE)
# join the IRC channel
self._send_data('JOIN #{0}'.format(channel))
users = set()
readbuffer = ''
debug_lines = ''
while True:
readbuffer = readbuffer +\
str(self.IRC.recv(BUFFER_SIZE).decode('UTF-8'))
temp = str.split(readbuffer, '\n')
# If there isn't a \n char at the end of the line then the whole
# line wasn't received so it is popped and included in the next
# iteration
readbuffer = temp.pop()
for line in temp:
debug_lines += line + '\n'
line = str.rstrip(line)
# print(line)
try:
_, command, args = self._parse_line(line)
except IRCBadMessage as e:
pdb.set_trace()
print('bad IRC message received, returning empty user' +
'list')
print(e)
print()
print(debug_lines)
return []
try:
# if this is a response to NAMES
if command == '353':
users |= set((args[0].split(':', 1))[1].split())
if 'End of /NAMES list' in args[0]:
self._send_data('PART #{0}'.format(channel))
return users
except Exception as e:
print('\n\n')
print(debug_lines)
raise e
print('\n\n')
| 31.5625 | 78 | 0.514356 |
acf58527b6bb15c17938851b41a59d5bfbe7b573 | 3,401 | py | Python | pypureclient/flasharray/FA_2_6/models/directory_policy_export_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_6/models/directory_policy_export_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_6/models/directory_policy_export_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_6 import models
class DirectoryPolicyExportPost(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'policies': 'list[DirectorypolicyexportpostPolicies]'
}
attribute_map = {
'policies': 'policies'
}
required_args = {
}
def __init__(
self,
policies=None, # type: List[models.DirectorypolicyexportpostPolicies]
):
"""
Keyword args:
policies (list[DirectorypolicyexportpostPolicies]): A list of export policies to apply to the directory. The `id` and `name` fields in each `policy` parameter are required, but cannot be set together.
"""
if policies is not None:
self.policies = policies
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryPolicyExportPost`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectoryPolicyExportPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectoryPolicyExportPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.366071 | 212 | 0.57042 |
acf5856256bdfe5da28cc154d4f0929be6e9c1a3 | 3,285 | py | Python | bsds/functions.py | matej-ulicny/harmonic-networks | 0fccf674806a0b876e641ef5271aad520ff90739 | [
"BSD-3-Clause"
] | 41 | 2019-05-02T02:58:20.000Z | 2022-03-28T22:37:16.000Z | bsds/functions.py | matej-ulicny/harmonic-networks | 0fccf674806a0b876e641ef5271aad520ff90739 | [
"BSD-3-Clause"
] | 4 | 2020-09-14T12:50:24.000Z | 2021-03-19T04:36:46.000Z | bsds/functions.py | matej-ulicny/harmonic-networks | 0fccf674806a0b876e641ef5271aad520ff90739 | [
"BSD-3-Clause"
] | 8 | 2019-05-04T09:37:06.000Z | 2021-08-15T15:38:45.000Z | import numpy as np
import torch
# loss function
def sigmoid_cross_entropy_loss(prediction, label):
#print (label,label.max(),label.min())
label = label.long()
mask = (label != 0).float()
num_positive = torch.sum(mask).float()
num_negative = mask.numel() - num_positive
#print (num_positive, num_negative)
mask[mask != 0] = num_negative / (num_positive + num_negative)
mask[mask == 0] = num_positive / (num_positive + num_negative)
cost = torch.nn.functional.binary_cross_entropy_with_logits(
prediction.float(),label.float(), weight=mask, reduce=False)
return torch.sum(cost)
def cross_entropy_loss(prediction, label):
#print (label,label.max(),label.min())
label = label.long()
mask = (label != 0).float()
num_positive = torch.sum(mask).float()
num_negative = mask.numel() - num_positive
#print (num_positive, num_negative)
mask[mask != 0] = num_negative / (num_positive + num_negative)
mask[mask == 0] = num_positive / (num_positive + num_negative)
cost = torch.nn.functional.binary_cross_entropy(
prediction.float(),label.float(), weight=mask, reduce=False)
return torch.sum(cost)
def fixed_weight_cross_entropy_loss(prediction, label):
#print (label,label.max(),label.min())
label = label.long()
mask = (label != 0).float()
num_positive = torch.sum(mask).float()
num_negative = mask.numel() - num_positive
#print (num_positive, num_negative)
mask[mask != 0] = 0.95
mask[mask == 0] = 0.05
cost = torch.nn.functional.binary_cross_entropy(
prediction.float(),label.float(), weight=mask, reduce=False)
return torch.sum(cost)
def weighted_nll_loss(prediction, label):
label = torch.squeeze(label.long(), dim=0)
nch = prediction.shape[1]
label[label >= nch] = 0
cost = torch.nn.functional.nll_loss(prediction, label, reduce=False)
mask = (label != 0).float()
num_positive = torch.sum(mask).float()
num_negative = mask.numel() - num_positive
mask[mask != 0] = num_negative / (num_positive + num_negative)
mask[mask == 0] = num_positive / (num_positive + num_negative)
cost = torch.mul(cost, mask)
return torch.sum(cost)
def weighted_cross_entropy_loss(prediction, label, output_mask=False):
criterion = torch.nn.CrossEntropyLoss(reduce=False)
label = torch.squeeze(label.long(), dim=0)
nch = prediction.shape[1]
label[label >= nch] = 0
cost = criterion(prediction, label)
mask = (label != 0).float()
num_positive = torch.sum(mask).float()
num_negative = mask.numel() - num_positive
mask[mask == 1] = num_negative / (num_positive + num_negative)
mask[mask == 0] = num_positive / (num_positive + num_negative)
cost = torch.mul(cost, mask)
if output_mask:
return torch.sum(cost), (label != 0)
else:
return torch.sum(cost)
def l2_regression_loss(prediction, label, mask):
label = torch.squeeze(label.float())
prediction = torch.squeeze(prediction.float())
mask = (mask != 0).float()
num_positive = torch.sum(mask).float()
cost = torch.nn.functional.mse_loss(prediction, label, reduce=False)
cost = torch.mul(cost, mask)
cost = cost / (num_positive + 0.00000001)
return torch.sum(cost)
| 39.107143 | 72 | 0.670928 |
acf587a23614ea23a9cd381c7bff399bd4500022 | 13,757 | py | Python | ivy/functional/backends/torch/general.py | DhruvaBhardwaj404/ivy | 0d500ab66bb4fd929e1d74ecc4aa35c24e34a1d3 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/torch/general.py | DhruvaBhardwaj404/ivy | 0d500ab66bb4fd929e1d74ecc4aa35c24e34a1d3 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/torch/general.py | DhruvaBhardwaj404/ivy | 0d500ab66bb4fd929e1d74ecc4aa35c24e34a1d3 | [
"Apache-2.0"
] | null | null | null | """Collection of PyTorch general functions, wrapped to fit Ivy syntax and signature."""
# global
import ivy
import numpy as np
import torch as torch
from operator import mul
from functools import reduce as _reduce
from typing import List, Optional, Union
from numbers import Number
# local
from ivy.functional.backends.torch.device import dev_from_str, _callable_dev
torch_scatter = None
def is_native_array(x, exclusive=False):
if isinstance(x, torch.Tensor):
if exclusive and x.requires_grad:
return False
return True
return False
def copy_array(x: torch.Tensor) -> torch.Tensor:
return x.clone()
def array_equal(x0: torch.Tensor, x1: torch.Tensor) -> bool:
dtype = torch.promote_types(x0.dtype, x1.dtype)
x0 = x0.type(dtype=dtype)
x1 = x1.type(dtype=dtype)
return torch.equal(x0, x1)
def to_numpy(x: torch.Tensor) -> np.ndarray:
if isinstance(x, np.ndarray) or isinstance(x, (float, int, bool)):
return x
elif torch.is_tensor(x):
if x.dtype is torch.bfloat16:
x = x.to(torch.float16)
return x.detach().cpu().numpy()
raise ValueError("Expected a pytorch tensor.")
def to_scalar(x: torch.Tensor) -> Number:
if isinstance(x, (float, int)):
return x
return x.item()
def to_list(x: torch.Tensor) -> list:
if isinstance(x, np.ndarray):
return x.tolist()
elif torch.is_tensor(x):
return x.detach().cpu().tolist()
raise ValueError("Expected a pytorch tensor.")
def floormod(
x: torch.Tensor, y: torch.Tensor, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
ret = x % y
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def unstack(x, axis: int, keepdims: bool = False) -> List[torch.Tensor]:
if x.shape == ():
return [x]
ret = list(torch.unbind(x, axis))
if keepdims:
return [r.unsqueeze(axis) for r in ret]
return ret
def container_types():
return []
def inplace_update(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native.data = val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
inplace_arrays_supported = lambda: True
inplace_variables_supported = lambda: True
def inplace_decrement(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native.data -= val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def inplace_increment(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native.data += val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def cumsum(x: torch.Tensor, axis: int = 0, out: Optional[torch.Tensor] = None):
if ivy.exists(out):
return ivy.inplace_update(out, torch.cumsum(x, axis))
else:
return torch.cumsum(x, axis)
def cumprod(
x: torch.Tensor,
axis: int = 0,
exclusive: Optional[bool] = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if exclusive:
x = torch.transpose(x, axis, -1)
x = torch.cat((torch.ones_like(x[..., -1:]), x[..., :-1]), -1)
res = torch.cumprod(x, -1)
if ivy.exists(out):
return ivy.inplace_update(out, torch.transpose(res, axis, -1))
else:
return torch.transpose(res, axis, -1)
if ivy.exists(out):
return ivy.inplace_update(out, torch.cumprod(x, axis))
else:
return torch.cumprod(x, axis)
# noinspection PyShadowingNames
def scatter_flat(
indices,
updates,
size: Optional[int] = None,
tensor: Optional[torch.Tensor] = None,
reduction: str = "sum",
device: Optional[str] = None,
):
target = tensor
target_given = ivy.exists(target)
if ivy.exists(size) and ivy.exists(target):
assert len(target.shape) == 1 and target.shape[0] == size
if device is None:
device = _callable_dev(updates)
dtype = updates.dtype
if reduction in ["sum", "replace"]:
initial_val = torch.tensor(0).type(dtype).to(dev_from_str(device))
elif reduction == "min":
initial_val = torch.tensor(1e12).type(dtype).to(dev_from_str(device))
elif reduction == "max":
initial_val = torch.tensor(-1e12).type(dtype).to(dev_from_str(device))
else:
raise Exception(
'reduction is {}, but it must be one of "sum", "min" or "max"'.format(
reduction
)
)
if target_given:
output = tensor
else:
output = torch.ones([size], dtype=dtype).to(dev_from_str(device)) * initial_val
global torch_scatter
if torch_scatter is None:
try:
import torch_scatter as torch_scatter
except ImportError:
raise Exception(
"Unable to import torch_scatter, verify this is correctly installed."
)
if reduction == "replace":
output[indices.type(torch.int64)] = updates
res = output
else:
res = torch_scatter.scatter(
updates, indices.type(torch.int64), out=output, reduce=reduction
)
if not target_given:
return torch.where(
res == initial_val,
torch.zeros([size], dtype=updates.dtype).to(dev_from_str(device)),
res,
)
return res
def _parse_ellipsis(so, ndims):
pre = list()
for s in so:
if s is Ellipsis:
break
pre.append(s)
post = list()
for s in reversed(so):
if s is Ellipsis:
break
post.append(s)
return tuple(
pre
+ [slice(None, None, None) for _ in range(ndims - len(pre) - len(post))]
+ list(reversed(post))
)
# noinspection PyShadowingNames
def scatter_nd(indices, updates, shape=None, tensor=None, reduction="sum", device=None):
# handle numeric updates
updates = torch.tensor(
[updates] if isinstance(updates, (float, int, bool)) else updates,
dtype=ivy.dtype(tensor, as_str=False)
if ivy.exists(tensor)
else ivy.default_dtype(item=updates),
)
# hanle non-tensor indices
if indices == ():
return updates
elif indices is Ellipsis or (isinstance(indices, tuple) and indices == (Ellipsis,)):
if updates.shape == () and ivy.exists(tensor) and tensor.shape == ():
return updates
shape = tensor.shape if ivy.exists(tensor) else updates.shape
indices = torch.concat(
[
torch.unsqueeze(g, -1)
for g in torch.meshgrid(*[torch.range(0, s) for s in shape])
],
-1,
)
elif isinstance(indices, (float, int, bool)):
indices = (indices,)
if isinstance(indices, tuple):
shape = tensor.shape if ivy.exists(tensor) else updates.shape
indices = _parse_ellipsis(indices, len(shape))
indices = torch.concat(
[
torch.unsqueeze(g, -1)
for g in torch.meshgrid(
*[
torch.range(0, s)
if idx is slice(None, None, None)
else torch.tensor(idx) % s
for s, idx in zip(shape, indices)
]
)
],
-1,
)
# broadcast updates to indices
if updates.shape == ():
updates = torch.broadcast_to(updates, indices.shape[:-1])
# implementation
target = tensor
target_given = ivy.exists(target)
if ivy.exists(shape) and ivy.exists(target):
assert ivy.shape_to_tuple(target.shape) == ivy.shape_to_tuple(shape)
if device is None:
device = _callable_dev(updates)
shape = list(shape) if ivy.exists(shape) else list(tensor.shape)
dtype = updates.dtype
indices_shape = indices.shape
num_index_dims = indices_shape[-1]
result_dim_sizes_list = [
_reduce(mul, shape[i + 1 :], 1) for i in range(len(shape) - 1)
] + [1]
result_dim_sizes = torch.tensor(result_dim_sizes_list).to(dev_from_str(device))
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_result_size = _reduce(mul, shape, 1)
if reduction in ["sum", "replace"]:
initial_val = torch.tensor(0).type(dtype).to(dev_from_str(device))
elif reduction == "min":
initial_val = torch.tensor(1e12).type(dtype).to(dev_from_str(device))
elif reduction == "max":
initial_val = torch.tensor(-1e12).type(dtype).to(dev_from_str(device))
else:
raise Exception(
'reduction is {}, but it must be one of "sum", "min" or "max"'.format(
reduction
)
)
if target_given:
flat_output = torch.reshape(tensor, (flat_result_size,))
else:
flat_output = (
torch.ones(flat_result_size, dtype=dtype).to(dev_from_str(device))
* initial_val
)
flat_updates = torch.reshape(updates, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = torch.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = torch.reshape(
torch.sum(indices * indices_scales, -1, keepdim=True), (-1, 1)
).repeat(*[1, implicit_indices_factor])
implicit_indices = torch.unsqueeze(
torch.arange(implicit_indices_factor).to(dev_from_str(device)), 0
).repeat(*[indices_for_flat_tiled.shape[0], 1])
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = torch.reshape(indices_for_flat, (-1,)).type(torch.long)
global torch_scatter
if torch_scatter is None:
try:
import torch_scatter as torch_scatter
except ImportError:
raise Exception(
"Unable to import torch_scatter, verify this is correctly installed."
)
if reduction == "replace":
flat_output[flat_indices_for_flat] = flat_updates
flat_scatter = flat_output
else:
flat_scatter = torch_scatter.scatter(
flat_updates,
flat_indices_for_flat,
out=flat_output.clone(),
reduce=reduction,
)
if not target_given:
# noinspection PyTypeChecker
flat_scatter = torch.where(
flat_scatter == initial_val,
torch.zeros(flat_result_size, dtype=updates.dtype).to(dev_from_str(device)),
flat_scatter,
)
res = torch.reshape(flat_scatter, list(shape))
return res
# noinspection PyShadowingNames
def gather(
params: torch.Tensor,
indices: torch.Tensor,
axis: Optional[int] = -1,
device: Optional[str] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if device is None:
device = _callable_dev(params)
ret = torch.gather(params, axis, indices.type(torch.int64)).to(dev_from_str(device))
if ivy.exists(out):
return ivy.inplace_update(out, ret)
else:
return ret
# noinspection PyShadowingNames
def gather_nd(params, indices, device: Optional[str] = None):
if device is None:
device = _callable_dev(params)
indices_shape = indices.shape
params_shape = params.shape
num_index_dims = indices_shape[-1]
result_dim_sizes_list = [
_reduce(mul, params_shape[i + 1 :], 1) for i in range(len(params_shape) - 1)
] + [1]
result_dim_sizes = torch.tensor(result_dim_sizes_list).to(dev_from_str(device))
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_params = torch.reshape(params, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = torch.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = torch.reshape(
torch.sum(indices * indices_scales, -1, keepdim=True), (-1, 1)
).repeat(*[1, implicit_indices_factor])
implicit_indices = torch.unsqueeze(
torch.arange(implicit_indices_factor).to(dev_from_str(device)), 0
).repeat(*[indices_for_flat_tiled.shape[0], 1])
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = torch.reshape(indices_for_flat, (-1,)).type(torch.long)
flat_gather = torch.gather(flat_params, 0, flat_indices_for_flat)
res = torch.reshape(
flat_gather, list(indices_shape[:-1]) + list(params_shape[num_index_dims:])
)
return res
def multiprocessing(context=None):
import torch.multiprocessing
if context is None:
return torch.multiprocessing
return torch.multiprocessing.get_context(context)
def indices_where(x):
where_x = torch.where(x)
res = torch.cat([torch.unsqueeze(item, -1) for item in where_x], -1)
return res
# noinspection PyUnresolvedReferences,PyShadowingNames
def one_hot(indices, depth: int, device: Optional[str] = None):
if device is None:
device = _callable_dev(indices)
return torch.nn.functional.one_hot(indices.type(torch.int64), depth).to(
dev_from_str(device)
)
def shape(
x: torch.Tensor, as_tensor: bool = False
) -> Union[torch.Tensor, List[int]]:
if as_tensor:
return torch.tensor(x.shape)
else:
return x.shape
def get_num_dims(x, as_tensor=False) -> Union[torch.Tensor, int]:
return torch.tensor(len(x.shape)) if as_tensor else len(x.shape)
def compile(
fn, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None
):
if dynamic:
return torch.jit.script(fn)
return torch.jit.trace(fn, example_inputs)
def current_framework_str():
return "torch"
| 31.480549 | 88 | 0.630443 |
acf5886cdd190354c11681399069f931f27ee0c9 | 2,664 | py | Python | tests/urls.py | sommelon/django-restql | 64ff3f39e426dca7012826c1c75fa7517f0e5051 | [
"MIT"
] | null | null | null | tests/urls.py | sommelon/django-restql | 64ff3f39e426dca7012826c1c75fa7517f0e5051 | [
"MIT"
] | null | null | null | tests/urls.py | sommelon/django-restql | 64ff3f39e426dca7012826c1c75fa7517f0e5051 | [
"MIT"
] | null | null | null | """test_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from tests.testapp import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('books', views.BookViewSet, 'book')
router.register('courses', views.CourseViewSet, 'course')
router.register('courses-with-disable-dynamic-fields', views.CourseWithDisableDaynamicFieldsKwargViewSet, 'course_with_disable_dynamic_fields_kwarg')
router.register('courses-with-returnpk-kwarg', views.CourseWithReturnPkkwargViewSet, 'course_with_returnpk_kwarg')
router.register('courses-with-field-kwarg', views.CourseWithFieldsKwargViewSet, 'course_with_field_kwarg')
router.register('courses-with-exclude-kwarg', views.CourseWithExcludeKwargViewSet, 'course_with_exclude_kwarg')
router.register('courses-with-aliased-books', views.CourseWithAliasedBooksViewSet, 'course_with_aliased_books')
router.register('course-with-dynamic-serializer-method-field', views.CourseWithDynamicSerializerMethodFieldViewSet, 'course_with_dynamic_serializer_method_field')
router.register('students', views.StudentViewSet, 'student')
router.register('students-eager-loading', views.StudentEagerLoadingViewSet, 'student_eager_loading')
router.register('students-eager-loading-prefetch', views.StudentEagerLoadingPrefetchObjectViewSet, 'student_eager_loading_prefetch')
router.register('students-auto-apply-eager-loading', views.StudentAutoApplyEagerLoadingViewSet, 'student_auto_apply_eager_loading')
router.register('writable-courses', views.WritableCourseViewSet, 'wcourse')
router.register('replaceable-students', views.ReplaceableStudentViewSet, 'rstudent')
router.register('replaceable-students-with-alias', views.ReplaceableStudentWithAliasViewSet, 'rstudent_with_alias')
router.register('writable-students', views.WritableStudentViewSet, 'wstudent')
router.register('writable-students-with-alias', views.WritableStudentWithAliasViewSet, 'wstudent_with_alias')
router.register('nested-phones', views.NestedPhoneViewset, 'nphone')
urlpatterns = [
url('', include(router.urls))
]
| 55.5 | 162 | 0.808183 |
acf588e3c178e15528a2f4778f400cebb354c6d1 | 5,188 | py | Python | .history/classes/Menu_20171107165537.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107165537.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107165537.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | # DADSA - Assignment 1
# Reece Benson
from os import system as call
from collections import OrderedDict
class Menu():
# Define the variables we will be using
_app = None
_menu = None
_current = [ ]
_current_menu = "main"
def __init__(self, app):
# Set our Application
self._app = app
def load(self):
# Define our Menu
self._menu = { }
# Create our Menu
self._menu['main'] = { "new_season": "New Season", "load_season": "Load Season" }
self._menu['new_season'] = { "ns_players": "Players", "ns_tournaments": "Tournaments", "ns_prizemoney": "Prize Money", "ns_difficulty": "Difficulty", "back": "Back" }
self._menu['back'] = lambda: self.go_back()
self._menu['ns_players'] = { "ns_viewplayers": "View Players", "ns_viewplayer": "View Player", "back": "Back" }
self._menu['ns_tournaments'] = { "ns_viewtournaments": "Example Tournament 1", "back": "Back" }
self._menu['ns_prizemoney'] = { "ns_setprizemoney": "Set Prize Money", "ns_viewprizemoney": "View Prize Money", "back": "Back" }
self._menu['ns_difficulty'] = { "ns_setdifficulty": "Set Difficulty", "ns_viewdifficulty": "View Difficulty", "back": "Back" }
self._menu['load_season'] = { }
# Append our Seasons to the "Load Season" Menu
for seasonId in self._app.handler.get_seasons():
season = self._app.handler.get_season(seasonId)
self._menu['load_season'].update({ "ls_"+str(seasonId): season.name() })
# Create our menu option for loading a season
self._menu['ls_'+str(seasonId)] = { "back": "Back" }
self._menu["load_season"].update({ "back": "Back" })
# Display our Menu
self.display("main")
def go_back(self):
print("Current Menu: {}".format(self._current_menu))
print("Tree: {}".format(_current.join("/")))
print("Previous Menu: {}".format("?"))
def display(self, index = None, error = None):
# Clear our terminal window
#call("cls")
# Define our variables
cur_count = 0
menu_item = self.get_menu(index or "main")
# Error Handling
if(error != None):
print("\n", "Error!", error, "\n")
# Menu Title, set tree
print("Please select an option: ({})".format(index))
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Is the Menu Item a Function?
m_type = None
if(callable(self._menu[m])): m_type = ""
else: m_type = "->"
# Print our Menu Item
print("{0}. {1} {2}".format(menu_counter, menu_name, m_type))
# Get User Input
self.get_input()
def validate_menu(self, index):
try:
menu_name = [ (v) for k,v in enumerate(self._menu) if(k == index) ][0]
return menu_name
except IndexError:
return None
def get_menu(self, menu_name):
# Check our Menu exists
if(not menu_name in self._menu):
return None
else:
return self._menu[menu_name]
def menu_exists(self, index):
# Find our indexed menu
menu_item = self.get_menu(self._current_menu)
menu_found = None
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Has our menu been found?
if(menu_counter == index):
print("-- menu found")
# Check if it's a function or a submenu
if(callable(self._menu[m])):
# Call our function
print("-- function call")
menu_found = self._menu[m]
else:
menu_found = m
return menu_found
def get_input(self):
# Wrap this in a try/except to validate any errors with input
try:
# Get users input
resp = int(input('>>> '))
# Validate some set input calls
if(resp == "exit"):
raise KeyboardInterrupt
elif(resp == ""):
return self.display(None, "Please select a valid option!")
# Validate input from current menu
menu_selected = self.menu_exists(resp)
if(menu_selected != None and callable(menu_selected) != True):
print(menu_selected)
self._current_menu = menu_selected
self.display(menu_selected)
elif(callable(menu_selected)):
menu_selected()
else:
print("no menu", resp)
except KeyboardInterrupt:
self._app.exit()
except ValueError:
self.display(None, "Please select a valid option!")
def load_action(self, menu_id):
#TODO: Load Action from Menu_ID
print("Load Action") | 33.470968 | 174 | 0.545297 |
acf58972c9be19a39bcbbb7cbb4f1ae80e2a9f94 | 45 | py | Python | MotunrayoKoyejo/Phase 1/Python Basic 1/Day6/Q42.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | MotunrayoKoyejo/Phase 1/Python Basic 1/Day6/Q42.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | MotunrayoKoyejo/Phase 1/Python Basic 1/Day6/Q42.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | import struct
print(struct.calcsize("P") * 8) | 22.5 | 31 | 0.733333 |
acf58acc835b015a46181dab6bc81ee07f3e61b4 | 3,625 | py | Python | dwavebinarycsp/factories/constraint/sat.py | dwavesystems/dwavebinarycsp | 0bbc4384749a409583b204c3ae8604cc8ea66825 | [
"Apache-2.0"
] | 17 | 2018-10-05T08:10:15.000Z | 2021-09-04T02:40:57.000Z | dwavebinarycsp/factories/constraint/sat.py | dwavesystems/dwavebinarycsp | 0bbc4384749a409583b204c3ae8604cc8ea66825 | [
"Apache-2.0"
] | 51 | 2018-05-14T19:47:19.000Z | 2021-12-22T00:48:07.000Z | dwavebinarycsp/factories/constraint/sat.py | dwavesystems/dwavebinarycsp | 0bbc4384749a409583b204c3ae8604cc8ea66825 | [
"Apache-2.0"
] | 25 | 2018-05-10T16:38:04.000Z | 2022-03-11T10:21:41.000Z | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import dimod
from dwavebinarycsp.core.constraint import Constraint
__all__ = ['sat2in4']
@dimod.decorators.vartype_argument('vartype')
def sat2in4(pos, neg=tuple(), vartype=dimod.BINARY, name='2-in-4'):
"""Two-in-four (2-in-4) satisfiability.
Args:
pos (iterable):
Variable labels, as an iterable, for non-negated variables of the constraint.
Exactly four variables are specified by `pos` and `neg` together.
neg (tuple):
Variable labels, as an iterable, for negated variables of the constraint.
Exactly four variables are specified by `pos` and `neg` together.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='2-in-4'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that satisfy a two-in-four satisfiability problem.
Examples:
>>> import dwavebinarycsp.factories.constraint.sat as sat
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(sat.sat2in4(['w', 'x', 'y', 'z'], vartype='BINARY', name='sat1'))
>>> csp.check({'w': 1, 'x': 1, 'y': 0, 'z': 0})
True
"""
pos = tuple(pos)
neg = tuple(neg)
variables = pos + neg
if len(variables) != 4:
raise ValueError("")
if neg and (len(neg) < 4):
# because 2-in-4 sat is symmetric, all negated is the same as none negated
const = sat2in4(pos=variables, vartype=vartype) # make one that has no negations
for v in neg:
const.flip_variable(v)
const.name = name # overwrite the name directly
return const
# we can just construct them directly for speed
if vartype is dimod.BINARY:
configurations = frozenset([(0, 0, 1, 1),
(0, 1, 0, 1),
(1, 0, 0, 1),
(0, 1, 1, 0),
(1, 0, 1, 0),
(1, 1, 0, 0)])
else:
# SPIN, vartype is checked by the decorator
configurations = frozenset([(-1, -1, +1, +1),
(-1, +1, -1, +1),
(+1, -1, -1, +1),
(-1, +1, +1, -1),
(+1, -1, +1, -1),
(+1, +1, -1, -1)])
def func(a, b, c, d):
if a == b:
return (b != c) and (c == d)
elif a == c:
# a != b
return b == d
else:
# a != b, a != c => b == c
return a == d
return Constraint(func, configurations, variables, vartype=vartype, name=name)
| 36.25 | 96 | 0.541241 |
acf58ae22a65b1a5c1e3d3cfd3bb97426e5c88f4 | 4,136 | py | Python | websrv.py | t0b3/fraktal | a62cf79bdbed87812f1ac1518239bfad369eb153 | [
"MIT"
] | null | null | null | websrv.py | t0b3/fraktal | a62cf79bdbed87812f1ac1518239bfad369eb153 | [
"MIT"
] | null | null | null | websrv.py | t0b3/fraktal | a62cf79bdbed87812f1ac1518239bfad369eb153 | [
"MIT"
] | null | null | null | import os
from http.server import SimpleHTTPRequestHandler
from urllib import parse
from http.server import ThreadingHTTPServer as HTTPServer
from fraktal import Drawing
class MyHandler(SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server, cache: bool = False):
self.cache = cache
super().__init__(request, client_address, server)
def list_directory(self, path):
self.send_error(403, "Request forbidden")
def do_GET(self):
"""Respond to a GET request."""
def send_response(response, mimetype):
# serve image directly
self.send_response(200)
self.send_header("Content-type", mimetype)
self.end_headers()
self.wfile.write(response)
def save_to_file(content, filename):
basedir = os.path.dirname(filename)
if not (os.path.isdir(basedir)):
# create basedir if not exists
os.makedirs(basedir)
fh = open(filename, "wb")
fh.write(content)
fh.close()
# serve static files
static_routes = (
self.path in ('/', '/favicon.ico', '/manifest.json'),
self.path.startswith('/?'),
self.path.startswith('/assets'))
if any(static_routes):
self.path = '/app' + self.path
super().do_GET()
# serve WMTS tile requests
elif (self.path.startswith('/wmts/')):
real_path = super().translate_path('/cache' + self.path)
if (os.path.isfile(real_path)):
# file exists: serve from cache
super().do_GET()
else:
# file does not exist: calculate
# parse input params
p = self.path.rstrip('.png').rsplit('wmts/')[-1].split('/')
par = {"x_row": int(p[-2]),
"y_row": int(p[-1]),
"zoomlevel": int(p[-3]),
"style": p[1],
"fractal": p[0]}
if (len(p)==7):
if (p[2]=='undefined'):
par["c"]=0
else:
par["c"]=complex(float(p[2]),
float(p[3]))
png = Drawing.generate_image_wmts_tile(par)
send_response(png, mimetype="image/png")
# save image to WMTS cache (optionally)
if (self.cache):
save_to_file(png, real_path)
# serve WMS get image requests
elif (self.path.startswith("/wms")):
p = dict(parse.parse_qsl(parse.urlsplit(self.path).query))
def filter_neg_dict(d: dict, keys: list):
return {k: v for k, v in d.items() if k not in keys}
# TODO: implement service error handling
p = filter_neg_dict(p, ['SERVICE','VERSION','REQUEST'])
p = filter_neg_dict(p, ['FORMAT','TRANSPARENT','CRS'])
par = {"fractal": p["LAYERS"],
"style": p["STYLES"],
"width": int(p["WIDTH"]),
"height": int(p["HEIGHT"])}
par["xmin"] = float(p["BBOX"].split(',')[0])
par["ymin"] = float(p["BBOX"].split(',')[1])
par["xmax"] = float(p["BBOX"].split(',')[2])
par["ymax"] = float(p["BBOX"].split(',')[3])
if (("CX" and "CY") in p.keys()):
par["c"] = complex(float(p["CX"]),
float(p["CY"]))
png = Drawing.generate_image_wms(par)
send_response(png, mimetype="image/png")
# respond with failure to unexpected requests
else:
self.send_error(500)
# start HTTP server
def start_webservice(server_address = ('127.0.0.1', 8080), server_class=HTTPServer, handler_class=MyHandler):
httpd = server_class(server_address, handler_class)
print("listening at http://"+server_address[0]+":"+str(server_address[1]))
httpd.serve_forever()
if __name__ == "__main__":
start_webservice() | 35.965217 | 109 | 0.516683 |
acf58b12e742fe259e9e7a507419d8556664eef2 | 1,083 | py | Python | tests/integration/issues/github_929/test_shelfindexer.py | yuanl/jina | 989d0689353bbbcd2c7bf11928b652224c3d4bf7 | [
"Apache-2.0"
] | null | null | null | tests/integration/issues/github_929/test_shelfindexer.py | yuanl/jina | 989d0689353bbbcd2c7bf11928b652224c3d4bf7 | [
"Apache-2.0"
] | 4 | 2020-09-01T17:47:27.000Z | 2021-04-16T23:11:57.000Z | tests/integration/issues/github_929/test_shelfindexer.py | yuanl/jina | 989d0689353bbbcd2c7bf11928b652224c3d4bf7 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
import pytest
from jina.flow import Flow
from jina.logging.profile import used_memory
from jina.proto import jina_pb2
from tests import random_docs, validate_callback
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('uses', ['binarypb.yml'])
def test_shelf_in_flow(uses, mocker):
m1 = used_memory()
# shelve does not support embed > 1000??
# _dbm.error: cannot add item to database
# HASH: Out of overflow pages. Increase page size
docs = random_docs(10000, embed_dim=1000)
f = Flow().add(uses=os.path.join(cur_dir, uses))
with f:
f.index(docs)
m2 = used_memory()
d = jina_pb2.DocumentProto()
def validate(req):
m4 = used_memory()
print(f'before: {m1}, after index: {m2}, after loading: {m3} after searching {m4}')
mock = mocker.Mock()
with f:
m3 = used_memory()
f.search([d], on_done=mock)
shutil.rmtree('test-workspace', ignore_errors=False, onerror=None)
mock.assert_called_once()
validate_callback(mock, validate)
| 25.785714 | 91 | 0.68144 |
acf58c15763dae5e38850c2fe5ff2344fe0dd8a5 | 460 | py | Python | coursera/algorithmic-toolbox/week-2/algorithmic-warm-up/3_greatest_common_divisor/solution.py | kamilsk/algo | 08b446760543844d62ecb98c98cd641203a60932 | [
"MIT"
] | 4 | 2019-07-26T21:29:09.000Z | 2022-01-09T01:21:46.000Z | coursera/algorithmic-toolbox/week-2/algorithmic-warm-up/3_greatest_common_divisor/solution.py | kamilsk/algo | 08b446760543844d62ecb98c98cd641203a60932 | [
"MIT"
] | null | null | null | coursera/algorithmic-toolbox/week-2/algorithmic-warm-up/3_greatest_common_divisor/solution.py | kamilsk/algo | 08b446760543844d62ecb98c98cd641203a60932 | [
"MIT"
] | null | null | null | # python3
def naive_gcd(a: int, b: int) -> int:
current_gcd = 1
for d in range(2, min(a, b) + 1):
if a % d == 0 and b % d == 0:
if d > current_gcd:
current_gcd = d
return current_gcd
def fast_gcd(a: int, b: int) -> int:
if b == 0:
return a
if a < b:
return fast_gcd(a, b % a)
return fast_gcd(b, a % b)
if __name__ == '__main__':
print(fast_gcd(*map(int, input().split())))
| 19.166667 | 47 | 0.504348 |
acf58c7b9dd5c46fa3f9871d92136ea06a0c2a98 | 216 | py | Python | post-processors/stackoverflow/TimeProcessorHandler.py | kingking888/SearchEngine | 83729fcc4e872277c7eaeb6d26ce2c3e425ef6a2 | [
"MIT"
] | 6 | 2019-07-05T02:47:54.000Z | 2021-05-03T08:33:28.000Z | post-processors/stackoverflow/TimeProcessorHandler.py | SpanockLau/SearchEngine | 83729fcc4e872277c7eaeb6d26ce2c3e425ef6a2 | [
"MIT"
] | 8 | 2020-08-06T03:34:38.000Z | 2022-02-26T15:22:28.000Z | post-processors/stackoverflow/TimeProcessorHandler.py | SpanockLau/SearchEngine | 83729fcc4e872277c7eaeb6d26ce2c3e425ef6a2 | [
"MIT"
] | 4 | 2019-07-05T08:03:51.000Z | 2019-10-05T06:48:24.000Z | class TimeProcessHandler():
def __init__(self):
pass
def process(self, time):
time = str(time)
if len(time) == 0: return
new_time = time.split(' ')[0]
return new_time | 21.6 | 37 | 0.550926 |
acf58ce7d8f64512251414b43dc4661645446de4 | 598 | py | Python | paginator/__init__.py | zeitcodes/django-paginator | 2978deb60e2575e0a5245c78e68ce155108acfff | [
"BSD-3-Clause"
] | null | null | null | paginator/__init__.py | zeitcodes/django-paginator | 2978deb60e2575e0a5245c78e68ce155108acfff | [
"BSD-3-Clause"
] | null | null | null | paginator/__init__.py | zeitcodes/django-paginator | 2978deb60e2575e0a5245c78e68ce155108acfff | [
"BSD-3-Clause"
] | null | null | null | __version_info__ = {
'major': 0,
'minor': 2,
'micro': 0,
'releaselevel': 'alpha',
'serial': 3
}
def get_version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0], __version_info__['serial']))
return ''.join(vers)
__version__ = get_version()
| 31.473684 | 96 | 0.608696 |
acf58e7183b978a79f53c029a4e0fbc00ae94bfd | 1,942 | py | Python | backpack/extensions/secondorder/diag_hessian/diag_h_base.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 395 | 2019-10-04T09:37:52.000Z | 2022-03-29T18:00:56.000Z | backpack/extensions/secondorder/diag_hessian/diag_h_base.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 78 | 2019-10-11T18:56:43.000Z | 2022-03-23T01:49:54.000Z | backpack/extensions/secondorder/diag_hessian/diag_h_base.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 50 | 2019-10-03T16:31:10.000Z | 2022-03-15T19:36:14.000Z | from numpy import prod
from torch import clamp, diag_embed, einsum
from backpack.extensions.mat_to_mat_jac_base import MatToJacMat
class DiagHBaseModule(MatToJacMat):
PLUS = 1.0
MINUS = -1.0
def __init__(self, derivatives, params=None):
super().__init__(derivatives=derivatives, params=params)
def backpropagate(self, ext, module, g_inp, g_out, backproped):
bp_matrices = backproped["matrices"]
bp_signs = backproped["signs"]
bp_matrices = super().backpropagate(ext, module, g_inp, g_out, bp_matrices)
for matrix, sign in self.__local_curvatures(module, g_inp, g_out):
bp_matrices.append(matrix)
bp_signs.append(sign)
return {"matrices": bp_matrices, "signs": bp_signs}
def __local_curvatures(self, module, g_inp, g_out):
if self.derivatives.hessian_is_zero(module):
return []
if not self.derivatives.hessian_is_diagonal(module):
raise NotImplementedError
def positive_part(sign, H):
return clamp(sign * H, min=0)
def diag_embed_multi_dim(H):
"""Convert [N, C_in, H_in, ...] to [N, C_in * H_in * ...,],
embed into [N, C_in * H_in * ..., C_in * H_in = V], convert back
to [V, N, C_in, H_in, ..., V]."""
feature_shapes = H.shape[1:]
V, N = prod(feature_shapes), H.shape[0]
H_diag = diag_embed(H.view(N, V))
# [V, N, C_in, H_in, ...]
shape = (V, N, *feature_shapes)
return einsum("nic->cni", H_diag).view(shape)
def decompose_into_positive_and_negative_sqrt(H):
return [
[diag_embed_multi_dim(positive_part(sign, H).sqrt_()), sign]
for sign in [self.PLUS, self.MINUS]
]
H = self.derivatives.hessian_diagonal(module, g_inp, g_out)
return decompose_into_positive_and_negative_sqrt(H)
| 35.309091 | 83 | 0.609681 |
acf58eddf6efae1db28850818c21ef302c993906 | 381 | py | Python | app/migrations/0011_player_modifier.py | marcusosso/uwhvz | c20303c117e8b2fcd04f5901326054296d3f3caf | [
"MIT"
] | 9 | 2018-09-08T06:59:02.000Z | 2022-03-23T08:12:02.000Z | app/migrations/0011_player_modifier.py | marcusosso/uwhvz | c20303c117e8b2fcd04f5901326054296d3f3caf | [
"MIT"
] | 37 | 2020-01-22T02:36:32.000Z | 2020-10-06T15:05:37.000Z | app/migrations/0011_player_modifier.py | marcusosso/uwhvz | c20303c117e8b2fcd04f5901326054296d3f3caf | [
"MIT"
] | 6 | 2019-03-07T02:55:27.000Z | 2019-11-10T23:26:44.000Z | # Generated by Django 2.0.7 on 2018-07-10 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20180710_0533'),
]
operations = [
migrations.AddField(
model_name='player',
name='modifier',
field=models.IntegerField(default=0),
),
]
| 20.052632 | 49 | 0.593176 |
acf58fbbe0f89687897a8e2bf7358dcd52ad904b | 2,324 | py | Python | tests/cp2/test_cp2_ll_zeroex.py | capt-hb/cheritest | d3b3637a81a0005ee7272eca0f33a9f9911fdb32 | [
"Apache-2.0"
] | null | null | null | tests/cp2/test_cp2_ll_zeroex.py | capt-hb/cheritest | d3b3637a81a0005ee7272eca0f33a9f9911fdb32 | [
"Apache-2.0"
] | 2 | 2020-06-02T13:44:55.000Z | 2020-06-02T14:06:29.000Z | tests/cp2/test_cp2_ll_zeroex.py | capt-hb/cheritest | d3b3637a81a0005ee7272eca0f33a9f9911fdb32 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2012, 2015 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# This software was developed by the University of Cambridge Computer
# Laboratory as part of the Rigorous Engineering of Mainstream Systems (REMS)
# project, funded by EPSRC grant EP/K008528/1.
#
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from beritest_tools import attr
#
# Test that the unsigned load linked operations zero-extend the value that is
# loaded.
#
class test_cp2_ll_zeroex(BaseBERITestCase):
@attr('capabilities')
@attr('cached')
def test_cp2_ll_zeroex_1(self):
'''Test that cllbu zero-extends the result'''
self.assertRegisterEqual(self.MIPS.a0, 0xfe,
"cllbu of negative value returned incorrect result")
@attr('capabilities')
@attr('cached')
def test_cp2_ll_zeroex_2(self):
'''Test that cllhu zero-extends the result'''
self.assertRegisterEqual(self.MIPS.a1, 0xfedc,
"cllhu of negative value returned incorrect result")
@attr('capabilities')
@attr('cached')
def test_cp2_ll_zeroex_3(self):
'''Test that cllwu zero-extends the result'''
self.assertRegisterEqual(self.MIPS.a2, 0xfedcba98,
"cllwu of negative value returned incorrect result")
| 36.3125 | 77 | 0.736661 |
acf5900d2f86185bd223ec7a78ee87d367fab606 | 10,784 | py | Python | mygrad/linalg/ops.py | mkhan45/MyGrad | 7114d56eebba4078875f580c9f4638ed275e2d4a | [
"MIT"
] | null | null | null | mygrad/linalg/ops.py | mkhan45/MyGrad | 7114d56eebba4078875f580c9f4638ed275e2d4a | [
"MIT"
] | null | null | null | mygrad/linalg/ops.py | mkhan45/MyGrad | 7114d56eebba4078875f580c9f4638ed275e2d4a | [
"MIT"
] | null | null | null | from collections import Counter
from copy import copy
from functools import reduce
from itertools import chain
import numpy as np
from numpy.lib.stride_tricks import as_strided
from mygrad._utils import reduce_broadcast
from mygrad.operation_base import BroadcastableOp
__all__ = ["MatMul", "EinSum"]
class MatMul(BroadcastableOp):
scalar_only = True
def __call__(self, a, b):
""" f(a) -> matmul(a, b)
Parameters
----------
a : mygrad.Tensor
b : mygrad.Tensor
Returns
-------
numpy.ndarray"""
self.variables = (a, b)
return np.matmul(a.data, b.data)
def backward_var(self, grad, index, **kwargs):
a, b = (i.data for i in self.variables)
# handle 1D w/ 1D (dot product of vectors)
if a.ndim == 1 and b.ndim == 1:
if index == 0:
return grad * b
elif index == 1:
return grad * a
else:
raise IndexError
if index == 0: # compute grad through a
if b.ndim > 1: # ([...], j) w/ ([...], j, k)
if a.ndim == 1:
grad = np.expand_dims(grad, -2)
dfdx = np.matmul(grad, b.swapaxes(-1, -2))
else: # ([...], i, j) w/ (j,)
dfdx = np.expand_dims(grad, -1) * b
return dfdx
if index == 1: # compute grad through b
if a.ndim > 1: # ([...], i, j) w/ ([...], j, [k])
if b.ndim == 1:
grad = np.expand_dims(grad, -1)
dfdx = np.matmul(a.swapaxes(-1, -2), grad)
if b.ndim == 1:
dfdx = dfdx.squeeze(-1)
else: # (j,) w/ ([...], j, k)
dfdx = a[:, np.newaxis] * np.expand_dims(grad, -2)
return dfdx
else:
raise IndexError
### EinSum ###
def _unique_from_end(in_str):
""" Return a string with all redundant characters removed,
removing left-most redundant entries
i.e. "ijikik" -> "jik"
Parameters
----------
in_str: str
Returns
-------
str
Examples
--------
>>> _unique_from_end("ijikik")
"jik"
"""
return reduce(lambda acc, x: acc + x if x not in acc else acc, in_str[::-1], "")[
::-1
]
def _merge_max_mappings(*mappings):
""" Merge dictionaries based on largest values in key->value.
Parameters
----------
*mappings : Dict[Any, Any]
Returns
-------
Dict[Any, Any]
Examples
--------
>>> _merge_max_mappings({"a":1, "b":4}, {"a":2})
{"a":2, "b":4}
"""
def _merge_max(d1, d2):
d1.update((k, v) for k, v in d2.items() if d1.get(k, 0) < v)
return d1
return reduce(_merge_max, mappings, {})
def _get_indices(item, seq):
""" Return the indices where `item` occurs in `seq`
Returns
-------
Generator[int]"""
return (n for n, x in enumerate(seq) if x == item)
class EinSum(BroadcastableOp):
scalar_only = True
def __call__(self, *variables, in_lbls, out_lbls, optimize=False):
"""
einsum('{in_lbls}->{out_lbls}', *variables, optimize=optimize)
Parameters
----------
variables : mygrad.Tensor
in_lbls : str
out_lbls : str
optimize : bool
Returns
-------
numpy.ndarray
"""
self.in_lbls = in_lbls.split(",")
self.out_lbls = out_lbls
self.variables = variables
self.optimize = optimize
# cache counts the number of redundant tensor-label pairs
# fed to einsum. Only one gradient will be computed for a
# unique tensor-label pair
self.cache = Counter(zip(variables, self.in_lbls))
return np.einsum(
"->".join((in_lbls, out_lbls)),
*(var.data for var in self.variables),
optimize=optimize
)
def backward_var(self, grad, index, **kwargs):
"""
example
-------
fwd: "ijk, k -> ji", x, y
bkwd (var: 0): "ji, k -> ijk", grad, y
bkwd (var: 1): "ji, ijk -> k", grad, x
"""
# ijk, k
in_lbls = copy(self.in_lbls)
original_var_lbl = in_lbls.pop(index)
var = self.variables[index]
factor = self.cache[(var, original_var_lbl)]
if factor == 0:
# the gradient for the current tensor-label pair
# has already been computed, scaled, and back-propped,
# skip gradient calculation.
return None
numpy_arrays = tuple(i.data for i in self.variables)
self.cache[(var, original_var_lbl)] = 0
var_lbl = _unique_from_end(original_var_lbl)
repeat_lbls = len(var_lbl) != len(original_var_lbl)
if repeat_lbls:
# example fwd-prop: einsum("iji -> ij", x)
# "iji" becomes "ji", later we will write along
# the diagonal of an array to reinstate this axis that
# we just removed
mapping_gen = (
{k: v for k, v in zip(lbl, arr.shape)}
for lbl, arr in zip(self.in_lbls, numpy_arrays)
)
lbl_to_size = _merge_max_mappings(*mapping_gen)
var_shape = tuple(lbl_to_size[lbl] for lbl in var_lbl)
else:
var_shape = self.variables[index].shape
# ji
grad_lbl = self.out_lbls
# Catch indices over which un-contracted sum was performed
# for the given variable: e.g for var-0 in "ijk, jk -> k"
# i is summed over without contraction with another tensor
#
# Backpropping through this is illegal, as it requires the creation
# of an axis; e.g. k, jk -> ijk
# Broadcast the gradient along all such dimensions; e.g. k -> ik
# then proceed as usual; e.g. ik, jk -> ijk
unique_in_lbls = set(chain.from_iterable(in_lbls)) | set(grad_lbl)
if len(set(var_lbl) - unique_in_lbls) > 0:
exp_dims = [slice(None) for i in range(grad.ndim)]
grad_shape = list(grad.shape)
for n, lbl in enumerate(var_lbl):
if lbl not in unique_in_lbls:
grad_lbl = grad_lbl[:n] + lbl + grad_lbl[n:]
exp_dims.insert(n, np.newaxis)
grad_shape.insert(n, var_shape[n])
grad = np.broadcast_to(
grad if not grad.ndim else grad[tuple(exp_dims)], grad_shape
)
# "ji, k -> ijk"
back_prop_lbls = ",".join([grad_lbl] + in_lbls) + "->" + var_lbl
# (grad, y)
operands = (grad,) + numpy_arrays[:index] + numpy_arrays[index + 1 :]
if not repeat_lbls:
# dfdx: einsum("ji, k -> ijk", grad, y)
outshape = self.variables[index].shape
dfdx = reduce_broadcast(
np.einsum(back_prop_lbls, *operands, optimize=self.optimize), outshape
)
if var_shape != dfdx.shape:
# if y was broadcast over x, the gradient needs to
# be broadcast to x's shape: dfdx-shape (i,j,1) -> (i,j,k)
dfdx = np.broadcast_to(dfdx, var_shape)
if factor > 1:
# This tensor-label pair appears several times as
# input to einsum. Scale the gradient accordingly
# such that the full contribution of the tensor-label
# pair is accounted for.
dfdx *= factor
return dfdx
# Accommodate trace by writing to strided view on array of zeros
# For example:
#
# fwd: einsum('ijkji, k -> jk', x, y)
# dfdx: einsum('jk, k -> kji', grad, y, out=view_of_x)
#
# writing to `view_of_x`, which is a view along the appropriate
# diagonals of x, is equivalent to:
#
# dfdx: einsum('jk, k -> ijkji', grad, y)
#
# which is formally correct but not supported by einsum.
dfdx = np.zeros(tuple(lbl_to_size[i] for i in original_var_lbl))
out_view_shape = tuple(lbl_to_size[i] for i in var_lbl)
# compute strides required to traverse the appropriate diagonals of
# the output tensor.
strides = tuple(
sum(dfdx.strides[ind] for ind in _get_indices(lbl, original_var_lbl))
for lbl in var_lbl
)
out_view = as_strided(dfdx, shape=out_view_shape, strides=strides)
np.einsum(back_prop_lbls, *operands, out=out_view, optimize=self.optimize)
if factor > 1:
# This tensor-label pair appears several times as
# input to einsum. Scale the gradient accordingly
# such that the full contribution of the tensor-label
# pair is accounted for.
dfdx *= factor
return dfdx
def backward(self, grad, *, graph, **kwargs):
""" Back-propagates the gradient through all of the operation's inputs.
Constant tensors do not propagate a gradient.
This implementation of ``backward`` is specialized such that
`` self.backward_var`` can return ``None`` to bypass a
gradient-accumulation step.
Parameters
----------
grad : numpy.ndarray
The back-propagated total derivative with respect to the present
operation (`f`): d(out)/df
graph : Set[Operation]"""
for index, var in enumerate(self.variables):
if not var.constant:
if not var._ops:
raise Exception(
"Invalid Backprop: part of the computational graph containing "
"this tensor was cleared prior to backprop"
)
if var.grad is None:
o = self.backward_var(grad, index, **kwargs)
if o is not None:
tmp_grad = reduce_broadcast(o, var.shape)
var.grad = (
np.copy(tmp_grad)
if np.shares_memory(tmp_grad, grad)
else tmp_grad
)
else:
o = self.backward_var(grad, index, **kwargs)
if o is not None:
var.grad += reduce_broadcast(o, var.shape)
for var in {
i for i in self.variables if not i.constant and i.creator is not None
}:
var._accum_ops.add(self)
var._backward(graph=graph)
| 33.386997 | 87 | 0.523461 |
acf5911c4e06fb94c0c617a0d846c42fce484a60 | 16,682 | py | Python | ids/liked.py | hirmapau/spoti_classification | 749f2972398888a4d0ece41782044e06689e9400 | [
"MIT"
] | null | null | null | ids/liked.py | hirmapau/spoti_classification | 749f2972398888a4d0ece41782044e06689e9400 | [
"MIT"
] | null | null | null | ids/liked.py | hirmapau/spoti_classification | 749f2972398888a4d0ece41782044e06689e9400 | [
"MIT"
] | null | null | null | liked_ids = {
"items" : [ {
"track" : {
"album" : {
"name" : "JACKBOYS"
},
"id" : "4AO1XhrgJczQ9bNVxdfKQe",
"name" : "WHAT TO DO? (feat. Don Toliver)"
}
}, {
"track" : {
"album" : {
"name" : "Donda"
},
"id" : "6DZz58CbF0AKw5PCKqNKcM",
"name" : "Come to Life"
}
}, {
"track" : {
"album" : {
"name" : "Donda"
},
"id" : "2gbMPBrBVj3CuNTLp2dHYs",
"name" : "Off The Grid"
}
}, {
"track" : {
"album" : {
"name" : "family ties (with Kendrick Lamar)"
},
"id" : "7Bpx2vsWfQFBACRz4h3IqH",
"name" : "family ties (with Kendrick Lamar)"
}
}, {
"track" : {
"album" : {
"name" : "DAMN."
},
"id" : "2LTlO3NuNVN70lp2ZbVswF",
"name" : "FEEL."
}
}, {
"track" : {
"album" : {
"name" : "ye"
},
"id" : "6Bg7MznA9X0dIhlAsLyBYj",
"name" : "Ghost Town"
}
}, {
"track" : {
"album" : {
"name" : "Luv Is Rage 2 (Deluxe)"
},
"id" : "0uxSUdBrJy9Un0EYoBowng",
"name" : "20 Min"
}
}, {
"track" : {
"album" : {
"name" : "The Marshall Mathers LP"
},
"id" : "7tEoJKbYdIHBfn7tTIyjHW",
"name" : "Drug Ballad"
}
}, {
"track" : {
"album" : {
"name" : "Random Access Memories"
},
"id" : "0oks4FnzhNp5QPTZtoet7c",
"name" : "Giorgio by Moroder"
}
}, {
"track" : {
"album" : {
"name" : "Goodbye & Good Riddance"
},
"id" : "0X8DcetL926BYiPJYstJTc",
"name" : "Lucid Dreams"
}
}, {
"track" : {
"album" : {
"name" : "KIDS SEE GHOSTS"
},
"id" : "4RVbK6cV0VqWdpCDcx3hiT",
"name" : "Reborn"
}
}, {
"track" : {
"album" : {
"name" : "Illmatic"
},
"id" : "27u3Rh4IWYPdwVST20ALrt",
"name" : "One Love (feat. Q-Tip)"
}
}, {
"track" : {
"album" : {
"name" : "Illmatic"
},
"id" : "2PRsh2LNPxoxC9OnErnelg",
"name" : "Halftime"
}
}, {
"track" : {
"album" : {
"name" : "Ready to Die (The Remaster)"
},
"id" : "5vZRwV87oC3yoxcFpbJEdX",
"name" : "The What"
}
}, {
"track" : {
"album" : {
"name" : "In Tongues"
},
"id" : "4apZVURUvTrT9S51LBuXON",
"name" : "Will He"
}
}, {
"track" : {
"album" : {
"name" : "In Tongues"
},
"id" : "5iIixnRBYl3NJDBfzEOKWz",
"name" : "Pills"
}
}, {
"track" : {
"album" : {
"name" : "In Tongues"
},
"id" : "435yU2MvEGfDdmbH0noWZ0",
"name" : "worldstar money (interlude)"
}
}, {
"track" : {
"album" : {
"name" : "Random Access Memories"
},
"id" : "7oaEjLP2dTJLJsITbAxTOz",
"name" : "Touch (feat. Paul Williams)"
}
}, {
"track" : {
"album" : {
"name" : "Luv Is Rage 2"
},
"id" : "2eAZfqOm4EnOF9VvN50Tyc",
"name" : "The Way Life Goes (feat. Oh Wonder)"
}
}, {
"track" : {
"album" : {
"name" : "Luv Is Rage 2"
},
"id" : "5Y3Tj0wJhKAaPbwWxXxZGS",
"name" : "Feelings Mutual"
}
}, {
"track" : {
"album" : {
"name" : "Let Me Out (feat. Mavis Staples & Pusha T)"
},
"id" : "3CCS6y9BPowg2jYiaY94UZ",
"name" : "Let Me Out (feat. Mavis Staples & Pusha T)"
}
}, {
"track" : {
"album" : {
"name" : "Prayer in C"
},
"id" : "5fnA9mkIfScSqHIpeDyvck",
"name" : "Prayer in C - Robin Schulz Radio Edit"
}
}, {
"track" : {
"album" : {
"name" : "Discovery"
},
"id" : "7v9Q0dAb9t7h8gJOkcJHay",
"name" : "Face to Face"
}
}, {
"track" : {
"album" : {
"name" : "Discovery"
},
"id" : "5W3cjX2J3tjhG8zb6u0qHn",
"name" : "Harder, Better, Faster, Stronger"
}
}, {
"track" : {
"album" : {
"name" : "Discovery"
},
"id" : "186hvCTyrni4KT9nwIQ7zS",
"name" : "Superheroes"
}
}, {
"track" : {
"album" : {
"name" : "Alive 2007"
},
"id" : "5XzGyYZemDuCG3OdbhVFvh",
"name" : "Face to Face / Short Circuit"
}
}, {
"track" : {
"album" : {
"name" : "Music Sounds Better With You"
},
"id" : "303ccTay2FiDTZ9fZ2AdBt",
"name" : "Music Sounds Better With You"
}
}, {
"track" : {
"album" : {
"name" : "Donda"
},
"id" : "2Rd4eJ4KwXQQn2sMSToyUM",
"name" : "Moon"
}
}, {
"track" : {
"album" : {
"name" : "Yonaguni"
},
"id" : "2JPLbjOn0wPCngEot2STUS",
"name" : "Yonaguni"
}
}, {
"track" : {
"album" : {
"name" : "Volando (Remix)"
},
"id" : "0G2zPzWqVjR68iNPmx2TBe",
"name" : "Volando - Remix"
}
}, {
"track" : {
"album" : {
"name" : "23 Preguntas"
},
"id" : "5ANkjNH7elrxzggidjnH9v",
"name" : "23 Preguntas"
}
}, {
"track" : {
"album" : {
"name" : "En Mi Cuarto"
},
"id" : "6Ee34qCOE6FBzEPRAGwSrn",
"name" : "En Mi Cuarto"
}
}, {
"track" : {
"album" : {
"name" : "Volví"
},
"id" : "2vmfvSoZBFAt9hhRoEByLi",
"name" : "Volví"
}
}, {
"track" : {
"album" : {
"name" : "Afrodisíaco"
},
"id" : "4loQgPwJHocL3dFwLfjb1J",
"name" : "Reloj"
}
}, {
"track" : {
"album" : {
"name" : "Euphoria (Original Score from the HBO Series)"
},
"id" : "6EtKlIQmGPB9SX8UjDJG5s",
"name" : "Formula"
}
}, {
"track" : {
"album" : {
"name" : "Dimensión Caribe"
},
"id" : "5oI5l1FUvJqKCbZTYTKWII",
"name" : "Café Con Chocolate"
}
}, {
"track" : {
"album" : {
"name" : "Aguanta Corazón (En Vivo)"
},
"id" : "4poCTllWc00ZCJUgmCGpf5",
"name" : "La Tempestad - En Vivo"
}
}, {
"track" : {
"album" : {
"name" : "UN DIA (ONE DAY) (Feat. Tainy)"
},
"id" : "0EhpEsp4L0oRGM0vmeaN5e",
"name" : "UN DIA (ONE DAY) (Feat. Tainy)"
}
}, {
"track" : {
"album" : {
"name" : "CALL ME IF YOU GET LOST"
},
"id" : "3EG9FJ0ToLfgnc1IG2Z1wz",
"name" : "SWEET / I THOUGHT YOU WANTED TO DANCE (feat. Brent Faiyaz & Fana Hues)"
}
}, {
"track" : {
"album" : {
"name" : "17"
},
"id" : "7AQim7LbvFVZJE3O8TYgf2",
"name" : "Fuck Love (feat. Trippie Redd)"
}
}, {
"track" : {
"album" : {
"name" : "Man On The Moon: The End Of Day"
},
"id" : "1cdC9TCqyLwAlsw3fVJaJS",
"name" : "Soundtrack 2 My Life"
}
}, {
"track" : {
"album" : {
"name" : "Man On The Moon: The End Of Day"
},
"id" : "5FEXPoPnzueFJQCPRIrC3c",
"name" : "Day 'N' Nite (nightmare)"
}
}, {
"track" : {
"album" : {
"name" : "Die Lit"
},
"id" : "3L0IKstjUgDFVQAbQIRZRv",
"name" : "R.I.P."
}
}, {
"track" : {
"album" : {
"name" : "B4.DA.$$"
},
"id" : "2w9Qq5cflc8Z6BraiDCyes",
"name" : "Christ Conscious"
}
}, {
"track" : {
"album" : {
"name" : "KIDS SEE GHOSTS"
},
"id" : "3aUFrxO1B8EW63QchEl3wX",
"name" : "Feel The Love"
}
}, {
"track" : {
"album" : {
"name" : "MIA (feat. Drake)"
},
"id" : "116H0KvKr2Zl4RPuVBruDO",
"name" : "MIA (feat. Drake)"
}
}, {
"track" : {
"album" : {
"name" : "I See You"
},
"id" : "5CPqOpKSk0QcJ3dGdaWcRB",
"name" : "On Hold"
}
}, {
"track" : {
"album" : {
"name" : "Summer 08"
},
"id" : "0swxSYk0cxjhRDQIgEVhFi",
"name" : "Hang Me out to Dry (with Robyn)"
}
}, {
"track" : {
"album" : {
"name" : "Skrillex and Diplo present Jack Ü"
},
"id" : "3UgSQu6WwrXfKKDq019IHE",
"name" : "To Ü (feat. AlunaGeorge)"
}
}, {
"track" : {
"album" : {
"name" : "Summer 08"
},
"id" : "2sbZWEiRVqJpSitzVK9Owi",
"name" : "Night Owl"
}
}, {
"track" : {
"album" : {
"name" : "31 Minutos"
},
"id" : "5lsSBFMMOueNCwnoU4Ox9F",
"name" : "Señora Devuélvame La Pelota O Si No Se Que Haré"
}
}, {
"track" : {
"album" : {
"name" : "Hablando Con Música: Lo Mejor De Los 80´S"
},
"id" : "7tT88YwbSGi17DuytjREGo",
"name" : "Square Rooms"
}
}, {
"track" : {
"album" : {
"name" : "Revenge"
},
"id" : "1HLCN534JKZNojcDgbYTVb",
"name" : "I Don't Wanna Do This Anymore"
}
}, {
"track" : {
"album" : {
"name" : "Skrillex and Diplo present Jack Ü"
},
"id" : "66hayvUbTotekKU3H4ta1f",
"name" : "Where Are Ü Now (with Justin Bieber)"
}
}, {
"track" : {
"album" : {
"name" : "Scary Monsters and Nice Sprites EP"
},
"id" : "6I9sncEmtGc9rpKyb8U1f8",
"name" : "Kill EVERYBODY"
}
}, {
"track" : {
"album" : {
"name" : "Lady Lady"
},
"id" : "51rPRW8NjxZoWPPjnRGzHw",
"name" : "Tadow"
}
}, {
"track" : {
"album" : {
"name" : "Dancing With A Stranger (with Normani)"
},
"id" : "6Qs4SXO9dwPj5GKvVOv8Ki",
"name" : "Dancing With A Stranger (with Normani)"
}
}, {
"track" : {
"album" : {
"name" : "Purpose (Deluxe)"
},
"id" : "50kpGaPAhYJ3sGmk6vplg0",
"name" : "Love Yourself"
}
}, {
"track" : {
"album" : {
"name" : "The Reason"
},
"id" : "1lHtE5JDCas1EwXhQIMOIj",
"name" : "The Reason"
}
}, {
"track" : {
"album" : {
"name" : "Body"
},
"id" : "21RzyxY3EFaxVy6K4RqaU9",
"name" : "Body"
}
}, {
"track" : {
"album" : {
"name" : "Minecraft - Volume Alpha"
},
"id" : "1gNcPHAiVIQZmqJFJdt3ti",
"name" : "Dry Hands"
}
}, {
"track" : {
"album" : {
"name" : "SOUR"
},
"id" : "4ZtFanR9U6ndgddUvNcjcG",
"name" : "good 4 u"
}
}, {
"track" : {
"album" : {
"name" : "Unlock It (feat. Playboi Carti)"
},
"id" : "4EoZiih7SmUSDyIw8y011F",
"name" : "Unlock It (feat. Playboi Carti)"
}
}, {
"track" : {
"album" : {
"name" : "My Beautiful Dark Twisted Fantasy"
},
"id" : "3DK6m7It6Pw857FcQftMds",
"name" : "Runaway"
}
}, {
"track" : {
"album" : {
"name" : "Watch (feat. Lil Uzi Vert & Kanye West)"
},
"id" : "3DoBTwfr8yi2LN08SBpFkN",
"name" : "Watch (feat. Lil Uzi Vert & Kanye West)"
}
}, {
"track" : {
"album" : {
"name" : "The Life Of Pablo"
},
"id" : "1eQBEelI2NCy7AUTerX0KS",
"name" : "Ultralight Beam"
}
}, {
"track" : {
"album" : {
"name" : "The Life Of Pablo"
},
"id" : "2CHmgtK8OCL28WtIK96u4N",
"name" : "30 Hours"
}
}, {
"track" : {
"album" : {
"name" : "Donda"
},
"id" : "4IlOyxGATYUp7YVy5zrSW8",
"name" : "Keep My Spirit Alive"
}
}, {
"track" : {
"album" : {
"name" : "DÁKITI"
},
"id" : "47EiUVwUp4C9fGccaPuUCS",
"name" : "DÁKITI"
}
}, {
"track" : {
"album" : {
"name" : "EL ÚLTIMO TOUR DEL MUNDO"
},
"id" : "5RubKOuDoPn5Kj5TLVxSxY",
"name" : "TE MUDASTE"
}
}, {
"track" : {
"album" : {
"name" : "CÓMO SE SIENTE (Remix)"
},
"id" : "2tFwfmceQa1Y6nRPhYbEtC",
"name" : "CÓMO SE SIENTE - Remix"
}
}, {
"track" : {
"album" : {
"name" : "Vete"
},
"id" : "5DxXgozhkPLgrbKFY91w0c",
"name" : "Vete"
}
}, {
"track" : {
"album" : {
"name" : "Callaita"
},
"id" : "2TH65lNHgvLxCKXM3apjxI",
"name" : "Callaita"
}
}, {
"track" : {
"album" : {
"name" : "Famouz"
},
"id" : "4R8BJggjosTswLxtkw8V7P",
"name" : "No Me Conoce - Remix"
}
}, {
"track" : {
"album" : {
"name" : "YHLQMDLG"
},
"id" : "6NfrH0ANGmgBXyxgV2PeXt",
"name" : "La Difícil"
}
}, {
"track" : {
"album" : {
"name" : "YHLQMDLG"
},
"id" : "69vlMrzHwATKzupwNcUPyK",
"name" : "La Santa"
}
}, {
"track" : {
"album" : {
"name" : "YHLQMDLG"
},
"id" : "2DEZmgHKAvm41k4J3R2E9Y",
"name" : "Safaera"
}
}, {
"track" : {
"album" : {
"name" : "Me Rehúso"
},
"id" : "6De0lHrwBfPfrhorm9q1Xl",
"name" : "Me Rehúso"
}
}, {
"track" : {
"album" : {
"name" : "Dire, Dire Docks (Original)"
},
"id" : "6xGgtxLqEZRDYU6DhJ5x0Y",
"name" : "Dire, Dire Docks - Original"
}
}, {
"track" : {
"album" : {
"name" : "The Life Of Pi'erre 5"
},
"id" : "0VLkwjIaeJM5KMjcVVybBp",
"name" : "Couch"
}
}, {
"track" : {
"album" : {
"name" : "Youth (08-13)"
},
"id" : "5IVIp4RuaB0vLPbNYCCz4P",
"name" : "Fe Ciega para Sordos"
}
}, {
"track" : {
"album" : {
"name" : "good kid, m.A.A.d city (Deluxe)"
},
"id" : "1Z2FvDFZSDQ23s4BDqpWqZ",
"name" : "Sing About Me, I'm Dying Of Thirst"
}
}, {
"track" : {
"album" : {
"name" : "To Pimp A Butterfly"
},
"id" : "1bxEpNR75Hq3T2oF9AZjt8",
"name" : "u"
}
}, {
"track" : {
"album" : {
"name" : "To Pimp A Butterfly"
},
"id" : "1WT11QmhZutciEv1NsHt1R",
"name" : "Mortal Man"
}
}, {
"track" : {
"album" : {
"name" : "To Pimp A Butterfly"
},
"id" : "3iVcZ5G6tvkXZkZKlMpIUs",
"name" : "Alright"
}
}, {
"track" : {
"album" : {
"name" : "DAMN."
},
"id" : "7KXjTSCq5nL1LoYtL7XAwS",
"name" : "HUMBLE."
}
}, {
"track" : {
"album" : {
"name" : "Mask Off (feat. Kendrick Lamar) [Remix]"
},
"id" : "6DB2KOEwHnjkgEnBt5SdeJ",
"name" : "Mask Off (feat. Kendrick Lamar) - Remix"
}
}, {
"track" : {
"album" : {
"name" : "Supermarket (Soundtrack)"
},
"id" : "7Ii2ALuxW7NkLfQPhV4Rn2",
"name" : "Bohemian Trapsody"
}
}, {
"track" : {
"album" : {
"name" : "Valotte"
},
"id" : "2SPU97neddixGZ3wCMLAwx",
"name" : "Too Late For Goodbyes"
}
}, {
"track" : {
"album" : {
"name" : "Please Hammer Don't Hurt 'Em"
},
"id" : "1B75hgRqe7A4fwee3g3Wmu",
"name" : "U Can't Touch This"
}
}, {
"track" : {
"album" : {
"name" : "Yeezus"
},
"id" : "3sNVsP50132BTNlImLx70i",
"name" : "Bound 2"
}
}, {
"track" : {
"album" : {
"name" : "808s & Heartbreak"
},
"id" : "4EWCNWgDS8707fNSZ1oaA5",
"name" : "Heartless"
}
}, {
"track" : {
"album" : {
"name" : "Graduation"
},
"id" : "2aHlRZIGUFThu3eQePm6yI",
"name" : "Champion"
}
}, {
"track" : {
"album" : {
"name" : "Pick It Up (feat. A$AP Rocky)"
},
"id" : "3ncgNpxLoBQ65ABk4djDyd",
"name" : "PICK IT UP (feat. A$AP Rocky)"
}
}, {
"track" : {
"album" : {
"name" : "G-Sides"
},
"id" : "6P1SNP4w09zcvdt4oytb5d",
"name" : "Rock the House"
}
}, {
"track" : {
"album" : {
"name" : "Give It 2 Em Dogg"
},
"id" : "62d33ZVLAxgmlZnofs7Ang",
"name" : "Nuthin But A G'Thang"
}
}, {
"track" : {
"album" : {
"name" : "Cheese (International Deluxe Edition)"
},
"id" : "6uFreJoeTZVC7MgC7B6rF1",
"name" : "Alors on danse"
}
}, {
"track" : {
"album" : {
"name" : "YHLQMDLG"
},
"id" : "4r9jkMEnArtWGH2rL2FZl0",
"name" : "A Tu Merced"
}
}, {
"track" : {
"album" : {
"name" : "X 100PRE"
},
"id" : "69ZaPBHhRMRDjRpW1ivnOU",
"name" : "Como Antes"
}
}, {
"track" : {
"album" : {
"name" : "Moctezuma"
},
"id" : "0cPzGMb1WVMCYn4nUcQrGl",
"name" : "Rincón Yucateco"
}
} ]
} | 20.774595 | 87 | 0.417935 |
acf591485b7958083379e119bff41d46ec2bd9af | 12,185 | py | Python | train.py | aghyad/image-classifier-dl | e60b155b2227b7bbff040013708bb0b4be9473e8 | [
"MIT"
] | null | null | null | train.py | aghyad/image-classifier-dl | e60b155b2227b7bbff040013708bb0b4be9473e8 | [
"MIT"
] | null | null | null | train.py | aghyad/image-classifier-dl | e60b155b2227b7bbff040013708bb0b4be9473e8 | [
"MIT"
] | null | null | null | import time
import json
import torch
from torch import nn, optim
from torchvision import datasets, models, transforms
from workspace_utils import active_session
import argparse
def load_data(data_dir):
"""
Loads data for training and validation
:param data_directory: str; directory of images
:return: data loaders objects for training and validation sets
"""
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# define your transforms for the training, validation, and testing sets
data_transforms_training = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data_transforms_validation = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data_transforms_test = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load the datasets with ImageFolder
image_datasets_training = datasets.ImageFolder(train_dir, transform=data_transforms_training)
image_datasets_validation = datasets.ImageFolder(valid_dir, transform=data_transforms_validation)
image_datasets_test = datasets.ImageFolder(test_dir, transform=data_transforms_test)
# Using the image datasets and the trainforms, define the dataloaders
dataloaders_training = torch.utils.data.DataLoader(image_datasets_training, shuffle=True, batch_size=128)
dataloaders_validation = torch.utils.data.DataLoader(image_datasets_validation, shuffle=True, batch_size=128)
dataloaders_test = torch.utils.data.DataLoader(image_datasets_test, shuffle=True, batch_size=128)
return {"training_dataloader": dataloaders_training,
"validation_dataloader": dataloaders_validation,
"testing_dataloader": dataloaders_test,
"class_to_idx": image_datasets_training.class_to_idx}
def load_categories_to_names(cat_to_name_filepath):
"""
Loads a dictionary of categories names
:param cat_to_name_filepath: str; json file
:return: dict; with indexes being categories and value being
human-readable category names
"""
with open(cat_to_name_filepath, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
def build_and_train_model(dataloaders_training, dataloaders_validation,
class_to_idx, learning_rate=0.001, epochs=5,
hidden_units=512, arch='vgg11', device='cpu'):
"""
train the network on the training and validation data
:return: trained and validated model object
"""
model = getattr(models, arch)(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(
nn.Linear(25088, hidden_units),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(hidden_units, 102),
nn.LogSoftmax(dim=1)
)
model.classifier = classifier
with active_session():
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
model.to(device)
steps = 0
train_losses, test_losses = [], []
# TRAIN & VALIDATE for multiple epochs
for e in range(epochs):
running_loss = 0
# start a one epoch cycle
for ii, (images, labels) in enumerate(dataloaders_training):
# TRAINING happens here:
# setup
images, labels = images.to(device), labels.to(device)
start = time.time()
optimizer.zero_grad()
# feed forward
outputs = model.forward(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# record loss
running_loss += loss.item()
else:
# VALIDATION happens here:
test_loss = 0
accuracy = 0
with torch.no_grad():
model.eval()
for images, labels in dataloaders_validation:
images, labels = images.to(device), labels.to(device)
outputs = model.forward(images)
loss = criterion(outputs, labels)
test_loss += loss.item()
probs = torch.exp(outputs)
top_probs, top_class = probs.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
if device == 'cuda':
accuracy += torch.mean(
equals.type(torch.cuda.FloatTensor))
else:
accuracy += torch.mean(
equals.type(torch.FloatTensor))
model.train()
train_losses.append(running_loss / len(dataloaders_training))
test_losses.append(test_loss / len(dataloaders_validation))
print(
f'epoch {e + 1}/{epochs}',
f'Training Loss: {running_loss/len(dataloaders_training)}',
f'Test Loss: {test_loss / len(dataloaders_validation)}',
f'Accuracy: {accuracy / len(dataloaders_validation)}')
model.class_to_idx = class_to_idx
return {"model": model, "criterion": criterion, "optimizer": optimizer}
def test_trained_model(model, criterion, dataloaders_test, device):
"""
Test the trained model on the testing dataset
:param model: trained model object
:param dataloaders_test: testing dataloader object
:return: None; prints the accuracy and loss numbers
"""
with active_session():
model.to(device)
test_losses = []
with torch.no_grad():
model.eval()
equals_sum = 0
items_count = 0
for images, labels in dataloaders_test:
# Testing happens here:
test_loss = 0
accuracy = 0
images, labels = images.to(device), labels.to(device)
outputs = model.forward(images)
loss = criterion(outputs, labels)
test_loss += loss.item()
probs = torch.exp(outputs)
top_probs, top_class = probs.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
items_count += len(equals)
if device == 'cuda':
equals_sum += torch.sum(equals.type(torch.cuda.FloatTensor))
else:
equals_sum += torch.sum(equals.type(torch.FloatTensor))
test_losses.append(test_loss / len(dataloaders_test))
print(f'Test Loss: {test_loss / len(dataloaders_test)}',
f'Accuracy: {equals_sum / items_count}')
return None
def save_checkpoint(model, optimizer, save_directory, lr=0.001, epochs=5,
hidden_units=512, device='cpu'):
"""
Save the checkpoint
:param model: trained model object
:param optimizer: optimizer object
:return: None; saves a file for the checkpoint
"""
checkpoint = {
'classifier': {
'input_size': 25088,
'output_size': 102,
'hidden_layers': [hidden_units],
'dropout_p': 0.2
},
'state_dict': model.state_dict(),
'model_class_to_idx': model.class_to_idx,
'training': {
'optimizer': optimizer.state_dict,
'epochs': epochs,
'lr': lr
}
}
torch.save(checkpoint, save_directory + '/checkpoint_' + device + '.pth')
return None
def parse_command_line_arguments():
"""
Defines and parses command-line arguments (both positional and optional)
:return: parsed object to be used to extract arguments values
"""
parser = argparse.ArgumentParser()
# Positional args
parser.add_argument('data_directory', action="store")
# Optional args
parser.add_argument('--save_dir', action='store',
dest='save_dir',
help='Load categories names from given file',
default="checkpoint.pth")
parser.add_argument('--gpu', action='store_true',
dest='device',
help='Device of prediction processing',
default=False)
parser.add_argument('--arch', action='store',
dest='arch',
help='Name of pre-trained network used for training',
default="vgg11")
parser.add_argument('--learning_rate', action='store',
dest='learning_rate',
help='value of training learning rate',
default=0.001)
parser.add_argument('--hidden_units', action='store',
dest='hidden_units',
help='Number of units in the fully-connected hidden '
'layer of the neural netwrork',
default=512)
parser.add_argument('--epochs', action='store',
dest='epochs',
help='Number of training epochs',
default=5)
# Parse all args
results = parser.parse_args()
return results
if __name__ == "__main__":
"""
Command-line usage example:
$ python train.py data_directory
Options: (all or nothing; any order)
--save_dir save_directory
--arch "vgg13"
--learning_rate 0.01
--hidden_units 512
--epochs 20
--gpu
Prints out training loss, validation loss, and validation
accuracy as the network trains
AND when done training, it saves the trained model checkpoint
"""
cmd_arguments = parse_command_line_arguments()
data_directory = cmd_arguments.data_directory
save_dir = cmd_arguments.save_dir
device = "cuda" if cmd_arguments.device else "cpu"
arch = cmd_arguments.arch
learning_rate = float(cmd_arguments.learning_rate)
hidden_units = int(cmd_arguments.hidden_units)
epochs = int(cmd_arguments.epochs)
# load data
print('* Loading data in progress ...')
dataloaders = load_data(data_directory)
training_dataloader = dataloaders["training_dataloader"]
validation_dataloader = dataloaders["validation_dataloader"]
testing_dataloader = dataloaders["testing_dataloader"]
class_to_idx = dataloaders["class_to_idx"]
print('* Data loaded successfully!\n')
# start training and validation:
print('* Building and training model in progress ...')
print('* Following are training loss, validation loss, and model accuracy:\n')
model_details = build_and_train_model(
training_dataloader, validation_dataloader, class_to_idx,
learning_rate, epochs, hidden_units, arch, device)
model = model_details['model']
criterion = model_details['criterion']
optimizer = model_details['optimizer']
print('\n* Finished training model successfully!\n')
print(f'--> This is our trained model:\n\n{model}\n\n')
# test model
print('* Let\'s test our model against testing data ...\n')
test_trained_model(model, criterion, testing_dataloader, device)
print('\n* Done testing successfully!\n')
# save checkpoint
print(f'* Saving model checkpoint as {save_dir}/checkpoint_{device}.pth')
save_checkpoint(model, optimizer, save_dir, learning_rate, epochs,
hidden_units, device)
print('* Saved checkpoint successfully!\n')
| 33.201635 | 113 | 0.601888 |
acf59195f10a9b3d9730b695651d06bbc39cea9e | 6,864 | py | Python | sisppeo/utils/main.py | inrae/SISPPEO | f516bb778b505739fdf320affe651b715ed75324 | [
"Apache-2.0"
] | 5 | 2021-11-05T09:23:13.000Z | 2022-02-18T10:39:13.000Z | sisppeo/utils/main.py | inrae/SISPPEO | f516bb778b505739fdf320affe651b715ed75324 | [
"Apache-2.0"
] | null | null | null | sisppeo/utils/main.py | inrae/SISPPEO | f516bb778b505739fdf320affe651b715ed75324 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Arthur Coqué, Pôle OFB-INRAE ECLA, UR RECOVER
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various useful functions used in main.py."""
from pathlib import Path
from sisppeo.utils.exceptions import InputError
def str_to_path(path_str, exists=True):
"""Converts a path string to a pathlib.Path object.
Args:
path_str: a path string.
exists: a boolean whether to check if this path exists or not.
"""
if isinstance(path_str, str):
path = Path(path_str)
else:
path = path_str
if exists and not path.exists():
raise InputError(f'"{str(path)}" does not exist')
return path
def parse_params(key, params):
"""Parse and verify params given to the function generate (main.py)."""
# module
if 'product_type' in params and 'batch' in key:
product_type = params.pop('product_type')
params['product_types'] = [product_type for _
in range(len(params['input_products']))]
if 'dirname' in params:
params['dirname'] = str_to_path(params['dirname'])
if 'filename' in params:
params['filenames'] = [str_to_path(params.pop('filename'), False)]
elif 'filenames' in params:
params['filenames'] = [str_to_path(p, False)
for p in params['filenames']]
if 'algo' in params:
params['lst_algo'] = [params.pop('algo')]
if 'algo_band' in params:
params['lst_band'] = [params.pop('algo_band')]
if 'algo_calib' in params:
params['lst_calib'] = [params.pop('algo_calib')]
if 'mask' in params:
params['lst_mask'] = [params.pop('mask')]
if 'l3mask' in params:
lst_l3mask = params.pop('l3mask')
if not isinstance(lst_l3mask, list):
lst_l3mask = [lst_l3mask]
params['lst_l3mask'] = lst_l3mask
if 'l3mask_path' in params:
lst_l3mask_path = params.pop('l3mask_path')
if not isinstance(lst_l3mask_path, list):
lst_l3mask_path = [lst_l3mask_path]
params['lst_l3mask_path'] = lst_l3mask_path
if 'l3mask_type' in params:
lst_l3mask_type = params.pop('l3mask_type')
if not isinstance(lst_l3mask_type, list):
lst_l3mask_type = [lst_l3mask_type]
params['lst_l3mask_type'] = lst_l3mask_type
if 'tsmask' in params:
lst_tsmask = params.pop('tsmask')
if not isinstance(lst_tsmask, list):
lst_tsmask = [lst_tsmask]
params['lst_tsmask'] = lst_tsmask
if 'tsmask_path' in params:
lst_tsmask_path = params.pop('tsmask_path')
if not isinstance(lst_tsmask_path, list):
lst_tsmask_path = [lst_tsmask_path]
params['lst_tsmask_path'] = lst_tsmask_path
if 'tsmask_type' in params:
lst_tsmask_type = params.pop('tsmask_type')
if not isinstance(lst_tsmask_type, list):
lst_tsmask_type = [lst_tsmask_type]
params['lst_tsmask_type'] = lst_tsmask_type
# module + CLI
if 'input_product' in params:
params['input_product'] = str_to_path(params.pop('input_product'))
elif 'input_products' in params:
params['input_products'] = [str_to_path(product) for product
in params.pop('input_products')]
else:
msg = 'You must provide at least one input product.'
raise InputError(msg)
if 'lst_l3mask_path' in params: # l3algo / match up
params['lst_l3mask_path'] = [str_to_path(l3mask_path) for l3mask_path
in params['lst_l3mask_path']]
if 'lst_l3masks_paths' in params: # time series / batch
params['lst_l3masks_paths'] = [
[str_to_path(l3mask_path) for l3mask_path in lst_l3mask_path]
for lst_l3mask_path in params['lst_l3masks_paths']
]
if 'lst_tsmask_path' in params:
params['lst_tsmask_path'] = [str_to_path(tsmask_path) for tsmask_path
in params['lst_tsmask_path']]
if 'theia_bands' in params and ('time series' in key
or 'batch' in key):
theia_bands = params.pop('theia_bands')
params['lst_tb'] = [theia_bands for _
in range(len(params['input_products']))]
if 'theia_masks' in params and ('time series' in key or 'batch' in key):
theia_masks = params.pop('theia_masks')
params['lst_tm'] = [theia_masks for _
in range(len(params['input_products']))]
if 'glint_corrected' in params and ('time series' in key
or 'batch' in key):
glint_corrected = params.pop('glint_corrected')
params['lst_gc'] = [glint_corrected for _
in range(len(params['input_products']))]
if 'flags' in params and ('time series' in key or 'batch' in key):
flags = params.pop('flags')
params['lst_flags'] = [flags for _
in range(len(params['input_products']))]
if 'geom' in params and 'batch' in key:
geom = params.pop('geom')
params['lst_geom'] = [geom for _
in range(len(params['input_products']))]
if 'code_site' in params and 'batch' in key:
code_site = params.pop('code_site')
params['lst_code_site'] = [code_site for _
in range(len(params['input_products']))]
if 'out_resolution' in params and ('time series' in key or 'batch' in key):
out_resolution = params.pop('out_resolution')
params['lst_res'] = [out_resolution
for _ in range(len(params['input_products']))]
if 'processing_resolution' in params and ('time series' in key
or 'batch' in key):
processing_resolution = params.pop('processing_resolution')
params['lst_proc_res'] = [processing_resolution for _
in range(len(params['input_products']))]
return params
def series_to_batch(args, n):
product_type = args.pop('product_type')
args['product_types'] = [product_type for _ in range(n)]
geom = args.pop('geom')
if geom is not None:
args['lst_geom'] = [geom for _ in range(n)]
return args
| 43.443038 | 79 | 0.611451 |
acf592334043e2555ba481f5b471f53341c0f548 | 40 | py | Python | my_classes/.history/ModulesPackages_PackageNamespaces/modules_1_20210725183929.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/modules_1_20210725183929.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/modules_1_20210725183929.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | print('------- Running {0} -----------') | 40 | 40 | 0.325 |
acf5926b3ceb0d628e6a6d125d73ee97ecd3464c | 425 | py | Python | QUANTAXIS_Test/QABacktest_Test/QABacktestSimple_Test.py | beaquant/QUANTAXIS | ab91f82a344d2efeecd74007db45c22457df20e2 | [
"MIT"
] | null | null | null | QUANTAXIS_Test/QABacktest_Test/QABacktestSimple_Test.py | beaquant/QUANTAXIS | ab91f82a344d2efeecd74007db45c22457df20e2 | [
"MIT"
] | null | null | null | QUANTAXIS_Test/QABacktest_Test/QABacktestSimple_Test.py | beaquant/QUANTAXIS | ab91f82a344d2efeecd74007db45c22457df20e2 | [
"MIT"
] | null | null | null | import unittest
import QUANTAXIS as QA
class QABacktestSimple_Test(unittest.TestCase):
def setUp(self):
#准备数据
code = '300439'
start = '2017-01-01'
end = '2018-01-01'
stock_data_300439_2017 = QA.QA_fetch_stock_day_adv(code, start, end).to_qfq()
print(stock_data_300439_2017)
print(len(stock_data_300439_2017))
def test_simpleQABacktest(self):
pass
| 21.25 | 85 | 0.654118 |
acf5930ebebfaf06d9fb2ee08d5e8911db172040 | 708 | py | Python | www/cgi-bin/test.py | watay147/LightHttpServer | 450e489e49928af70ac45f7fea4bdc0e7c39b60f | [
"Apache-2.0"
] | null | null | null | www/cgi-bin/test.py | watay147/LightHttpServer | 450e489e49928af70ac45f7fea4bdc0e7c39b60f | [
"Apache-2.0"
] | null | null | null | www/cgi-bin/test.py | watay147/LightHttpServer | 450e489e49928af70ac45f7fea4bdc0e7c39b60f | [
"Apache-2.0"
] | null | null | null | #!python
import os,cgi
import Cookie
form = cgi.FieldStorage()
print "Set-Cookie: xx=5825"
print "Set-Cookie: ss=55"
print "Set-Cookie: xx=5888"
print "Content-type: text/html\n";
print "<html><body><h1>cgi got!</h1>"
print "<p> QUERY_STRING:"+os.environ.get( "QUERY_STRING")+"</p>"
print "<p> Post:hh="+form.getvalue("hh")+"</p><br>"
if 'HTTP_COOKIE' in os.environ:
cookie_string=os.environ.get('HTTP_COOKIE')
c=Cookie.SimpleCookie()
c.load(cookie_string)
try:
data=c['xx'].value
print "cookie data: "+data+"<br>"
print "cookie data: "+c['ss'].value+"<br>"
except KeyError:
print "cookie not set<br>"
else:
print "no cookie"
print "</body><html>" | 26.222222 | 64 | 0.631356 |
acf5939c08559ad5a4a36d611fa0635c924ab20f | 1,360 | py | Python | tests/test_getPackageDir.py | ktlim/utils | 3afcb0f245807a502f1e73dac4782a1b239bb3e0 | [
"BSD-3-Clause"
] | null | null | null | tests/test_getPackageDir.py | ktlim/utils | 3afcb0f245807a502f1e73dac4782a1b239bb3e0 | [
"BSD-3-Clause"
] | 1 | 2021-09-15T15:13:34.000Z | 2021-09-15T15:13:34.000Z | tests/test_getPackageDir.py | ktlim/utils | 3afcb0f245807a502f1e73dac4782a1b239bb3e0 | [
"BSD-3-Clause"
] | null | null | null | # This file is part of utils.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
import sys
import os
import unittest
import lsst.utils.tests
from lsst.utils import getPackageDir
@unittest.skipIf("UTILS_DIR" not in os.environ, "EUPS has not set up this package.")
class GetPackageDirTestCase(unittest.TestCase):
def testBasics(self):
utilsPath = getPackageDir("utils")
self.assertTrue(os.path.isfile(os.path.join(utilsPath, "tests", "test_getPackageDir.py")))
# Confirm that we have a correct Python exception and pex exception
with self.assertRaises(LookupError):
getPackageDir("nameOfNonexistendPackage2234q?#!")
def testUnicodeBasics(self):
utilsPath = getPackageDir(u"utils")
self.assertTrue(os.path.isfile(os.path.join(utilsPath, "tests", "test_getPackageDir.py")))
class TestMemory(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
setup_module(sys.modules[__name__])
unittest.main()
| 29.565217 | 98 | 0.730147 |
acf597b42a86aa669bc1d5ea591e4b2cbd82688b | 683 | py | Python | tests/configlet/util/helpers.py | lolyu/sonic-mgmt | ed888fd1ce26e7f44fd7f70af00c43ace4882668 | [
"Apache-2.0"
] | 132 | 2016-10-19T12:34:44.000Z | 2022-03-16T09:00:39.000Z | tests/configlet/util/helpers.py | lolyu/sonic-mgmt | ed888fd1ce26e7f44fd7f70af00c43ace4882668 | [
"Apache-2.0"
] | 3,152 | 2016-09-21T23:05:58.000Z | 2022-03-31T23:29:08.000Z | tests/configlet/util/helpers.py | lolyu/sonic-mgmt | ed888fd1ce26e7f44fd7f70af00c43ace4882668 | [
"Apache-2.0"
] | 563 | 2016-09-20T01:00:15.000Z | 2022-03-31T22:43:54.000Z | #! /usr/bin/env python
from datetime import datetime
import inspect
import logging
logger = logging.getLogger(__name__)
do_print = False
def log_init(name):
global logger
logger = logging.getLogger(name)
def log_msg(lgr_fn, m):
tstr = datetime.now().strftime("%H:%M:%S")
msg = "{}:{}:{} {}".format(inspect.stack()[2][1], inspect.stack()[2][2], tstr, m)
lgr_fn(msg)
if do_print:
print(msg)
def log_error(m):
log_msg(logger.error, m)
def log_info(m):
log_msg(logger.info, m)
def log_warn(m):
log_msg(logger.warning, m)
def log_debug(m):
log_msg(logger.debug, m)
def set_print():
global do_print
do_print = True
| 15.177778 | 85 | 0.644217 |
acf598ac0eca5d7a9e68793a3dfa34d8428de44e | 2,348 | py | Python | Lab_Week_02_-_Graph-Based_Search/Solutions/grid_search/occupancy_grid.py | annasu1225/COMP0037-21_22 | e98e8d278b35ee0550e6c09b35ab08b23e60ca82 | [
"Apache-2.0"
] | null | null | null | Lab_Week_02_-_Graph-Based_Search/Solutions/grid_search/occupancy_grid.py | annasu1225/COMP0037-21_22 | e98e8d278b35ee0550e6c09b35ab08b23e60ca82 | [
"Apache-2.0"
] | null | null | null | Lab_Week_02_-_Graph-Based_Search/Solutions/grid_search/occupancy_grid.py | annasu1225/COMP0037-21_22 | e98e8d278b35ee0550e6c09b35ab08b23e60ca82 | [
"Apache-2.0"
] | null | null | null | from .helpers import clamp
# This class stores the occupancy grid. This is a "chessboard-like"
# representation of the environment. The environment is represented by
# a set of square cells. Each cell encodes whether that bit of the
# environment is free, or whether it is blocked. A "0" says that a
# cell is free and so the robot can travel over it. A "1" means that
# it is blocked and the robot cannot travel over it.
class OccupancyGrid(object):
# Construct a new occupancy grid with a given width and
# height. The resolution says the lenght of the side of each cell
# in metres. By default, all the cells are set to "0" which means
# that there are no obstacles.
def __init__(self, width, height, resolution):
self._width = width
self._height = height
self._resolution = resolution
self._data = [[0 for x in range(width)] for y in range(height)]
# The width of the occupancy map in cells
def width(self):
return self._width
# The height of the occupancy map in cells
def height(self):
return self._height
# The resolution of each cell (the length of its side in metres)
def resolution(self):
return self._resolution
# Get the status of a cell.
def cell(self, x, y):
return self._data[y][x]
# Set the status of a cell.
def setCell(self, x, y, c):
self._data[y][x] = c
# Take a position in world coordinates (i.e., m) and turn it into
# cell coordinates. Clamp the value so that it always falls within
# the grid. The conversion uses integer rounding.
def getCellCoordinatesFromWorldCoordinates(self, worldCoords):
cellCoords = (clamp(int(worldCoords[0] / self._resolution), 0, self._width - 1), \
clamp(int(worldCoords[1] / self._resolution), 0, self._height - 1))
return cellCoords
# Convert a position in cell coordinates to world coordinates. The
# conversion uses the centre of a cell, hence the mysterious 0.5
# addition. No clamping is currently done.
def getWorldCoordinatesFromCellCoordinates(self, cellCoords):
worldCoords = ((cellCoords[0] + 0.5) * self._resolution, \
(cellCoords[1] + 0.5) * self._resolution)
return worldCoords
| 38.491803 | 90 | 0.658007 |
acf5991c26247b37a3fa498145dac4ba27108870 | 1,140 | py | Python | contract/__init__.py | trinity-project/trinity | 081eba1d4294a3bed33ba18c3f7b862b8803ee22 | [
"MIT"
] | 60 | 2018-01-12T07:33:15.000Z | 2021-12-28T23:06:28.000Z | contract/__init__.py | trinity-project/trinity | 081eba1d4294a3bed33ba18c3f7b862b8803ee22 | [
"MIT"
] | 13 | 2018-01-23T00:14:35.000Z | 2020-04-23T00:03:31.000Z | contract/__init__.py | trinity-project/trinity | 081eba1d4294a3bed33ba18c3f7b862b8803ee22 | [
"MIT"
] | 13 | 2018-01-05T07:27:29.000Z | 2021-01-06T16:45:05.000Z | # --*-- coding : utf-8 --*--
"""Author: Trinity Core Team
MIT License
Copyright (c) 2018 Trinity
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
__all__ = []
| 42.222222 | 78 | 0.785088 |
acf5996a2f122a5b5048da630dd0c402f58b5522 | 10,460 | py | Python | tests/macro_liquidMG_UO.py | niamorelreillet/openiec_with_OC | 9e027c7052ca98398bf09758bc05b3daf1aba151 | [
"MIT"
] | null | null | null | tests/macro_liquidMG_UO.py | niamorelreillet/openiec_with_OC | 9e027c7052ca98398bf09758bc05b3daf1aba151 | [
"MIT"
] | null | null | null | tests/macro_liquidMG_UO.py | niamorelreillet/openiec_with_OC | 9e027c7052ca98398bf09758bc05b3daf1aba151 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
from openiec.property.coherentenergy_OC import CoherentGibbsEnergy_OC
from openiec.calculate.calcsigma_OC import SigmaCoherent_OC
from pyOC import opencalphad as oc
from pyOC import GridMinimizerStatus as gmStat
def constituentToEndmembersConverter(constituentMolarFractions, constituentsDescription):
endmemberMolarFractions = {
'O2U1' : constituentMolarFractions['sublattice 0']['U+4']*constituentMolarFractions['sublattice 1']['O-2'],
'U1' : constituentMolarFractions['sublattice 0']['U+4']*constituentMolarFractions['sublattice 1']['VA'],
'O1' : constituentMolarFractions['sublattice 1']['O']
}
endmemberMolarMasses = {
'U1' : constituentsDescription['U+4']['mass'],
'O1' : constituentsDescription['O']['mass'],
'O2U1' : constituentsDescription['U+4']['mass']+2.0*constituentsDescription['O']['mass']
}
endMemberMassFractions = {k : endmemberMolarFractions[k]*endmemberMolarMasses[k] for k in endmemberMolarFractions}
factor=1.0/sum(endMemberMassFractions.values())
for k in endMemberMassFractions:
endMemberMassFractions[k] = endMemberMassFractions[k]*factor
return endMemberMassFractions
def run():
print('### test U-O coherent interface in the liquid miscibility gap ###\n')
# tdb filepath
#tdbFile=os.environ['TDBDATA_PRIVATE']+'/feouzr.tdb'
#tdbFile=os.environ['TDBDATA_PRIVATE']+'/NUCLEA-17_1_mod.TDB'
tdbFile='tests/TAF_uzrofe_V10.TDB'
# components
comps = ['O', 'U']
# mass density laws (from Barrachin2004)
constituentDensityLaws = {
'U1' : lambda T: 17270.0-1.358*(T-1408),
'ZR1' : lambda T: 6844.51-0.609898*T+2.05008E-4*T**2-4.47829E-8*T**3+3.26469E-12*T**4,
'O2U1' : lambda T: 8860.0-9.285E-1*(T-3120),
'O2ZR1': lambda T: 5150-0.445*(T-2983),
'O1' : lambda T: 1.141 # set to meaningless value but ok as, no 'free' oxygen in the considered mixtures
}
constituentDensityLaws['U'] = constituentDensityLaws['U1']
constituentDensityLaws['ZR'] = constituentDensityLaws['ZR1']
constituentDensityLaws['O'] = constituentDensityLaws['O1']
# phase names
phasenames = ['LIQUID', 'LIQUID']
# pressure
P = 1E5
# Given initial alloy composition. x0 is the mole fraction of U.
x0 = [0.65]
# Composition step for searching initial interfacial equilibrium composition.
dx = 0.05
# temperature range
Tmin = 2800.0
Tmax = 4400.0
Trange = np.linspace(Tmin, Tmax, num=60, endpoint=True)
results = pd.DataFrame(columns=['temperature', 'n_phase1', 'n_phase2', 'xU_phase1', 'xU_phase2', 'xU_interface', 'sigma'])
for T in Trange:
# Molar volumes of pure components evaluated at x0 and kept constant afterwards
CoherentGibbsEnergy_OC.initOC(tdbFile, comps)
model = CoherentGibbsEnergy_OC(T, P, phasenames[0], False)
functions=model.constantPartialMolarVolumeFunctions(x0, constituentDensityLaws, 1E-5, constituentToEndmembersConverter)
#functions=model.constantPartialMolarVolumeFunctions(x0, constituentDensityLaws, 1E-5)
# calculate global equilibrium and retrieve associated chemical potentials
model = CoherentGibbsEnergy_OC(T, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
if (len(phasesAtEquilibriumMolarAmounts)==1):
# it is possible that the miscibility gap has not been detected correctly (can happen when T increases)
#print(phasesAtEquilibriumMolarAmounts)
# ad hoc strategy: 1) calculate an equilibrium at lower temperature (hopefully finding the two phases)
# 2) redo the calculation at the target temperature afterwards without the grid minimizer
model = CoherentGibbsEnergy_OC(T-300.0, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
#print(phasesAtEquilibriumMolarAmounts)
oc.setTemperature(T)
oc.calculateEquilibrium(gmStat.Off)
mueq = model.getChemicalPotentials()
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
phasesAtEquilibriumElementCompositions = phasesAtEquilibrium.getPhaseElementComposition()
print(phasesAtEquilibriumMolarAmounts)
if (set(phasesAtEquilibriumMolarAmounts)==set(['LIQUID#1', 'LIQUID_AUTO#2'])):
# Composition range for searching initial interfacial equilibrium composition
# calculated from the actual phase compositions
componentsWithLimits = comps[1:]
limit = [ [1.0, 0.0] for each in componentsWithLimits ]
for phase in phasesAtEquilibriumElementCompositions:
for element in phasesAtEquilibriumElementCompositions[phase]:
elementMolarFraction = phasesAtEquilibriumElementCompositions[phase][element]
if element in componentsWithLimits:
limit[componentsWithLimits.index(element)][0] = min(limit[componentsWithLimits.index(element)][0], elementMolarFraction)
limit[componentsWithLimits.index(element)][1] = max(limit[componentsWithLimits.index(element)][1], elementMolarFraction)
limit = [ [each[0]+dx, each[1]-dx] for each in limit ]
print('limits: ', limit)
# calculate interfacial energy
sigma = SigmaCoherent_OC(
T=T,
x0=x0,
db=tdbFile,
comps=comps,
phasenames=phasenames,
purevms=functions,
limit=limit,
dx=dx,
enforceGridMinimizerForLocalEq=False,
mueq=mueq
)
print('at T=', T, ' sigma=', sigma.Interfacial_Energy.values, '\n')
if (np.abs(sigma.Interfacial_Energy.values)>1E-6):
# store results in pandas dataframe
results = results.append({'temperature' : T,
'n_phase1' : phasesAtEquilibriumMolarAmounts['LIQUID#1'],
'n_phase2' : phasesAtEquilibriumMolarAmounts['LIQUID_AUTO#2'],
'xU_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['U'],
'xU_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['U'],
'xU_interface' : sigma.Interfacial_Composition.values[1],
'sigma' : sigma.Interfacial_Energy.values,
},
ignore_index = True)
else:
raise ValueError('wrong value discarded')
else:
print('at T=', T, ' out of the miscibility gap')
print('phases at equilibrium:', phasesAtEquilibriumMolarAmounts)
# write csv result file
results.to_csv('macro_liquidMG_UO_run.csv')
def fit():
results = pd.read_csv('macro_liquidMG_UO_run.csv')
# Function to calculate the power-law with constants sigma0, Tc, mu, sigmaC
def power_law_plus_const(T, sigma0, Tc, mu, sigmaC):
return sigma0*np.power(1.0-T/Tc, mu)+sigmaC
def power_law_no_const(T, sigma0, Tc, mu):
return sigma0*np.power(1.0-T/Tc, mu)
# Fit the power-law data
power_law = power_law_no_const
print(results['temperature'])
print(results['sigma'])
pars, cov = curve_fit(f=power_law, xdata=results['temperature'], ydata=results['sigma'], p0=[0.7, results['temperature'][len(results['temperature']) - 1], 1.9], bounds=(-np.inf, np.inf))
# Get the standard deviations of the parameters (square roots of the # diagonal of the covariance)
stdevs = np.sqrt(np.diag(cov))
# Calculate the residuals
print(power_law(results['temperature'], *pars))
res = results['sigma'] - power_law(results['temperature'], *pars)
print(pars, stdevs)
plt.rcParams['figure.figsize'] = (12,7)
fig,axes=plt.subplots(2,2,constrained_layout=True)
# Plots associated with interfacial energy
ax = axes[0,0]
ax.grid(True)
ax.plot(results['temperature'], results['sigma'], marker = 'o', ls='', color='tab:cyan', label='calculated values: $\sigma_{calculated}$')
legLabel = 'fit: $\sigma_{fit}='+'{0:4.3f} (1-T/{1:4.1f})^'.format(pars[0], pars[1])+'{'+'{0:4.3f}'.format(pars[2])+'}$'
ax.plot(results['temperature'], power_law(results['temperature'], *pars), linestyle='--', linewidth=2, color='black', label=legLabel)
ax.set_xlabel('temperature T (K)',fontsize=12)
ax.set_ylabel('interfacial energy $\sigma$ (N.m$^{-1}$)',fontsize=12)
ax.legend(loc='upper right')
ax = axes[0,1]
ax.grid(True)
ax.plot(results['temperature'], res, marker = 'o', ls='', color='tab:cyan')
ax.set_xlabel('temperature T (K)',fontsize=12)
ax.set_ylabel('fit residuals $\sigma_{fit} - \sigma_{calculated}$ (N.m$^{-1}$)',fontsize=12)
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
# Plots associated with composition
ax = axes[1,0]
ax.plot(results['xU_phase1'], results['temperature'], marker = '', ls='-', color='tab:red', label='bulk liquid 1')
ax.plot(results['xU_phase2'], results['temperature'], marker = '', ls='-', color='tab:green', label='bulk liquid 2')
ax.plot(results['xU_interface'], results['temperature'], marker = '', ls='-', color='tab:cyan', label='interface')
ax.set_ylabel('temperature T (K)',fontsize=12)
ax.set_xlabel('U molar fraction',fontsize=12)
ax.legend(loc='upper right')
ax = axes[1,1]
ax.plot(results['xU_interface'], results['sigma'], marker = 'o', ls='--', color='tab:cyan')
ax.set_ylabel('interfacial energy $\sigma$ (N.m$^{-1}$)',fontsize=12)
ax.set_xlabel('interface U molar fraction',fontsize=12)
plt.savefig('macro_liquidMG_UO_fit.pdf')
plt.show()
if __name__ == '__main__':
run()
fit()
| 53.367347 | 190 | 0.650574 |
acf59a483c6f81fe4fa16b4b31a6de0eb090bd39 | 3,316 | py | Python | python/ParseFileList.py | fermi-lat/calibGenACD | 19067cb61fe8297aee91fc8a2cbdfb7e0eb2d386 | [
"BSD-3-Clause"
] | null | null | null | python/ParseFileList.py | fermi-lat/calibGenACD | 19067cb61fe8297aee91fc8a2cbdfb7e0eb2d386 | [
"BSD-3-Clause"
] | null | null | null | python/ParseFileList.py | fermi-lat/calibGenACD | 19067cb61fe8297aee91fc8a2cbdfb7e0eb2d386 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007
# by
# The Board of Trustees of the
# Leland Stanford Junior University.
# All rights reserved.
#
__facility__ = "calibGenACD"
__abstract__ = "Extracts the DAC to PHA set point relationship of ACD veto"
__author__ = "E. Charles"
__date__ = "$Date$"
__version__ = "$Revision$, $Author$"
__release__ = "$Name$"
#import LATTE.copyright_SLAC
import os, sys
import time
from optparse import OptionParser
from py_mootCore import MootQuery, vectorOfConstitInfo, ConstitInfo
DATACATBIN = "/afs/slac/g/glast/ground/bin/datacat"
def getDateStamp():
"""
"""
return time.strftime("%y%m%d")
def callDatacat(group,dateStamp):
"""
"""
dataCatList = "%s_%s.list"%(group,dateStamp)
dataCatLine = "%s find --sort nMetStart --group %s /Data/Flight/Level1/LPA/ > %s"%(DATACATBIN,group,dataCatList)
print "Calling datacat for group %s on %s"%(group,dateStamp)
os.system(dataCatLine)
return dataCatList
def configInfo(metTime,mq):
"""
"""
acqInfo = mq.getAcqSummaryInfo( int(metTime[1:]) )
if acqInfo is None:
return ("None",0)
key = int(acqInfo.getConfigKey())
configInfo = mq.getConfigInfo(key)
if configInfo is None:
return ("None",key)
return (configInfo.getName(),key)
def fmxKeys(mKey):
"""
"""
mq = MootQuery(None)
constits = vectorOfConstitInfo()
ci = mq.getActiveFilters(mKey,constits,0)
for ci in constits:
print (ci.getKey(),ci.getFswId(),ci.getSchemaId(),ci.getSchemaVersionId(),ci.getInstanceId() )
def utcDayAndWeek(metTime):
"""
"""
unixSecs = float(metTime[1:])
missionEpoch = time.mktime( time.strptime("Sun Dec 31 16:00:00 2000") )
missionStart = time.mktime( time.strptime("Sun Jun 8 15:00:00 2008") )
utcTime = time.gmtime(unixSecs+missionEpoch)
launchSecs = unixSecs+missionEpoch-missionStart
week = int ( launchSecs / 604800 )
day = "%02d%02d%02d"%(utcTime[0]-2000,utcTime[1],utcTime[2])
return (day,week)
def parseNames(inFileName):
"""
"""
outFileName = inFileName.replace("list","table")
outFile = open(outFileName,'w')
mq = MootQuery(None)
inFile = open(inFileName)
inline = inFile.readline()
while inline<>'':
w = inline.find('/r0')
runNum = inline[w+2:w+12]
(uDay,mWeek) = utcDayAndWeek(runNum)
(configName,configKey) = configInfo(runNum,mq)
outFile.write("%s %s %03d %-4d %s %s\n"%(runNum,uDay,mWeek,configKey,configName,inline.strip()))
inline = inFile.readline()
inFile.close()
outFile.close()
return None
if __name__=='__main__':
# argument parsing
usage = 'ParseFileList.py type'
parser = OptionParser(usage)
if len(sys.argv) == 1 or sys.argv[1] == '-h':
parser.print_help()
sys.exit()
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) < 1:
parser.print_help()
sys.exit()
dateStamp = getDateStamp()
for group in args:
dataCatList = callDatacat(group,dateStamp)
#Latch the time
parseNames(dataCatList)
| 28.101695 | 116 | 0.609469 |
acf59af32cdd1431b7d3691e648f31cfae11f7dc | 401 | py | Python | article/admin.py | sanalkrlk/django-blog | 4237a8a8427c89c80f7bc3e69ae53ec2dc21006d | [
"MIT"
] | null | null | null | article/admin.py | sanalkrlk/django-blog | 4237a8a8427c89c80f7bc3e69ae53ec2dc21006d | [
"MIT"
] | 1 | 2020-06-05T22:48:05.000Z | 2020-06-05T22:48:05.000Z | article/admin.py | akaraoluk/django-blog | 8e8bbcf00ea4c6b4ae1b57b1e968d442e90f0980 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Article,Comment
# Register your mpdels here.
admin.site.register(Comment)
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ["title","author","created_date"]
list_display_links = ["title","created_date"]
search_fields = ["title"]
list_filter = ["title"]
class Meta:
Model = Article
| 17.434783 | 52 | 0.700748 |
acf59c1f17bf7e3029df7a30253abc8cef06b6c1 | 31,730 | py | Python | optimizer/full_house/full_house_input_viz.py | efuller-gov/dm3k | 6cd95d647d1dfe188d68601d06ae9c92d2e1acf4 | [
"CC-BY-4.0",
"Apache-2.0",
"CC0-1.0"
] | 2 | 2021-11-29T14:46:43.000Z | 2021-12-29T02:22:34.000Z | optimizer/full_house/full_house_input_viz.py | efuller-gov/dm3k | 6cd95d647d1dfe188d68601d06ae9c92d2e1acf4 | [
"CC-BY-4.0",
"Apache-2.0",
"CC0-1.0"
] | null | null | null | optimizer/full_house/full_house_input_viz.py | efuller-gov/dm3k | 6cd95d647d1dfe188d68601d06ae9c92d2e1acf4 | [
"CC-BY-4.0",
"Apache-2.0",
"CC0-1.0"
] | 1 | 2021-10-03T23:20:52.000Z | 2021-10-03T23:20:52.000Z | """
This ingests the format from the DM3K-Viz tool
"""
import logging
from optimizer.full_house.full_house_input import FullHouseInput
log = logging.getLogger(__name__)
class FullHouseInputViz(FullHouseInput):
"""
The viz input format is a single json file that looks like...
.. note:
{
"resourceClasses": [
{
"className": "Ship",
"typeName": "container",
"budgets": [],
"containsClasses": ["Turret","Missile"],
"canBeAllocatedToClasses": []
}, ...],
"activityClasses": [
{
"className": "City",
"typeName": "area",
"rewards": [],
"costs": ["Direction"],
"containsClasses": ["VIP"],
"allocatedWhen": {}
}, ...],
"resourceInstances": [
{
"className": "Ship",
"instanceTable": [
{
"instanceName": "Ship_Resource_instance_0",
"budget": ""
},...],
},...],
"activityInstances": [
{
"className": "City",
"instanceTable": [
{
"instanceName": "Columbia MD",
"cost": 1,
"reward": ""
},...],
},...],
"allocationInstances": [
{
"resourceClassName": "Turret",
"activityClassName": "City",
"instanceTable": [
{
"resourceInstanceName": "ALL",
"activityInstanceName": "ALL"
}
]
},...],
"containsInstances": [
{
"parentClassName": "Ship",
"childClassName": "Turret",
"parentType": "resource",
"instanceTable": [
{
"parentInstanceName": "Ship_Resource_instance_0",
"childInstanceName": "Turret_Resource_instance_0"
},...],
},...],
"allocationConstraints": [
{
"allocationStart": {
"resourceClass": "Turret",
"activityClass": "City"
},
"allocationEnd": {
"resourceClass": "Missile",
"activityClass": "VIP"
},
"allocationConstraintType": "Contained IF-THEN"
},...],
}
"""
def __init__(self):
super().__init__()
def _get_all_instances(self, dm3k_viz_data, class_name, class_type="resource"):
if class_type == "resource":
class_type_name = "resourceInstances"
else:
class_type_name = "activityInstances"
instance_list = []
for i in dm3k_viz_data[class_type_name]:
if i["className"] == class_name:
for j in i["instanceTable"]:
instance_list.append(j["instanceName"])
return instance_list
def _get_instance_prop(self, dm3k_viz_data, class_name, instance_name, prop_name="cost", class_type="resource"):
if class_type == "resource":
class_type_name = "resourceInstances"
else:
class_type_name = "activityInstances"
prop_value = None
for i in dm3k_viz_data[class_type_name]:
if i["className"] == class_name:
for j in i["instanceTable"]:
if j["instanceName"] == instance_name:
prop_value = j[prop_name]
return prop_value
def ingest_validate(self, input_dict):
"""
Validate the constraints and activity scores to determine if following Errors are found
ERROR_CODE DESCRIPTION
1 the necessary constraints files do not exist
2 the formats of the constraints files are incorrect
3 the data within the constraints files are not consistent with each other
4 the data within the constraints files and the activity names are not consistent
5 unknown error
And then Load the files in the constraints path into this input (capturing them in the self._data attribute)
:param dict input_dict: a dict containing the name of the input and the data from files associated with this input
:return bool fatal: True=a fatal error has been found, the optimizer should not continue
:return list validation_errors: a list of errors where each error is a dict with the following attributes...
"err_code" : <a int where int is key in ERROR_CODE above>,
"err_txt" : <human readable text that describes the error>,
"offender" : <string name (of DU, resource, or resource group) that is causing error>,
"fix": <string name of process performed to fix the error or None>,
"is_fatal_error": <boolean; True = error is fatal, False = error is fixable>
"""
if "datasetName" in input_dict:
log.debug("Opening Dataset: " + input_dict["datasetName"])
else:
log.warning("'datasetName' is not in input_dict")
# determine if correct files exist
if "files" not in input_dict:
return (
True,
[
{
"err_code": 2,
"err_txt": "'files' attribute is not in input_dict...the format of the input is not correct!",
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
file_data = input_dict["files"]
if len(file_data) != 1:
return (
True,
[
{
"err_code": 1,
"err_txt": "system requires 'files' attribute to contain data from 1 file...you have submitted {} files...the necessary files do not exist!".format(
len(file_data)
),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
# assuming only 1 file is required
if "fileContents" not in file_data[0]:
return (
True,
[
{
"err_code": 2,
"err_txt": "'fileContents' attribute is not in input_dict['files'][0]...the format of the input is not correct!",
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
dm3k_viz_data = file_data[0]["fileContents"]
# --- INGEST ---
log.debug("Ingesting Data")
self._data = {
"parent_resources": [],
"child_resources": [],
"parent_activities": [],
"child_activities": [],
"avail_parent_amt": {},
"avail_child_amt": {},
"req_parent_amt": {},
"req_child_amt": {},
"child_score": {},
"force_list": [],
"forbid_list": [],
"parent_possible_allocations": {},
"child_possible_allocations": {},
"resource_families": {},
"activity_children": {},
"parent_budget_name": "",
"child_budget_name": "",
}
# a parent activity is an activity that contains another activity
# so go through all activityClasses and find the one that contains another activity
# when you find a contains, list all the instance names of that class
# FUTURE:
# - if no containing activity exists, and there is only 1 activity class, make a default containing activity
if "activityClasses" not in dm3k_viz_data:
return (
True,
[
{
"err_code": 2,
"err_txt": "'activityClasses' attribute is not in input_dict['files'][0]['fileContents']...the format of the input is not correct!",
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
activity_classes = dm3k_viz_data["activityClasses"]
parent_activity = None
try:
for ac in activity_classes:
if len(ac["containsClasses"]) >= 1:
parent_activity = ac["className"]
log.debug("Found Parent_activity = " + parent_activity)
if parent_activity is None:
self._add_to_validation_errors(
"Cannot find parent activity, no activity contains another activity",
is_fatal_error=True,
err_code=3,
offender="**YOU**",
fix=None,
)
return True, self._validation_errors # this is a fatal error, just stop now
else:
self._data["parent_activities"] = self._get_all_instances(dm3k_viz_data, parent_activity, "activity")
log.debug("parent_activities")
log.debug(self._data["parent_activities"])
except KeyError as e:
return (
True,
[
{
"err_code": 2,
"err_txt": "'{}' key name was not in input_dict['files'][0]['fileContents']['activityClasses']...the format of the input is not correct!".format(
e
),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
except Exception as e:
return (
True,
[
{
"err_code": 5,
"err_txt": "Unknown Error '{0}:{1!r}' found in ingest-validate".format(type(e).__name__, e.args),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
# the parent resource is a resource that can be allocated to the parent activity
# so go through all the resourceClasses and find the one that can be allocated to the parent activity class
# when you find the parent resource, list all the instance names of that class
if "resourceClasses" not in dm3k_viz_data:
return (
True,
[
{
"err_code": 2,
"err_txt": "'resourceClasses' attribute is not in input_dict['files'][0]['fileContents']...the format of the input is not correct!",
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
resource_classes = dm3k_viz_data["resourceClasses"]
parent_resource = None
try:
for rc in resource_classes:
if parent_activity in rc["canBeAllocatedToClasses"]:
parent_resource = rc["className"]
log.debug("Found Parent Resource = " + parent_resource)
if parent_resource is None:
self._add_to_validation_errors(
"Cannot find parent resource, no resource can be allocated to the activity: " + parent_activity,
is_fatal_error=True,
err_code=3,
offender="**YOU**",
fix=None,
)
return True, self._validation_errors # this is a fatal error, just stop now
else:
self._data["parent_resources"] = self._get_all_instances(dm3k_viz_data, parent_resource)
log.debug("parent_resources")
log.debug(self._data["parent_resources"])
except KeyError as e:
return (
True,
[
{
"err_code": 2,
"err_txt": "'{}' key name was not in input_dict['files'][0]['fileContents']['resourceClasses']...the format of the input is not correct!".format(
e
),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
except Exception as e:
return (
True,
[
{
"err_code": 5,
"err_txt": "Unknown Error '{0}:{1!r}' found in ingest-validate".format(type(e).__name__, e.args),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
# avail_parent amt is a dict of the total budget for each parent resource:
# keys are parent resource names,
# values are float amounts
parent_res_instance = None
if "resourceInstances" not in dm3k_viz_data:
return (
True,
[
{
"err_code": 2,
"err_txt": "'resourceInstances' attribute is not in input_dict['files'][0]['fileContents']...the format of the input is not correct!",
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
try:
for ri in dm3k_viz_data["resourceInstances"]:
if ri["className"] == parent_resource:
parent_res_instance = ri
for pri in parent_res_instance["instanceTable"]:
# budget is a dictionary...full house can only take values...also full house can only take 1 budget (i.e. the [0])
budget = list(pri["budget"].values())[0]
# need budget name for output
self._data["parent_budget_name"] = list(pri["budget"].keys())[0]
self._data["avail_parent_amt"][pri["instanceName"]] = budget
log.debug("avail_parent_amt")
log.debug(self._data["avail_parent_amt"])
except KeyError as e:
return (
True,
[
{
"err_code": 2,
"err_txt": "'{}' key name was not in input_dict['files'][0]['fileContents']['resourceInstances']...the format of the input is not correct!".format(
e
),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
except Exception as e:
return (
True,
[
{
"err_code": 5,
"err_txt": "Unknown Error '{0}:{1!r}' found in ingest-validate".format(type(e).__name__, e.args),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
# parent possible allocations is a a dict containing the list of possible parent activity allocations for each
# parent resource.
# keys are parent resource names and
# values are lists of parent activity names
# Use the allocation instances between the parent resource and activity to fill this
parent_allocation_instance = None
if "allocationInstances" not in dm3k_viz_data:
return (
True,
[
{
"err_code": 2,
"err_txt": "'allocationInstances' attribute is not in input_dict['files'][0]['fileContents']...the format of the input is not correct!",
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
try:
for ai in dm3k_viz_data["allocationInstances"]:
if (ai["resourceClassName"] == parent_resource) and (ai["activityClassName"] == parent_activity):
parent_allocation_instance = ai
for pai in parent_allocation_instance["instanceTable"]:
if pai["resourceInstanceName"] == "ALL":
rin = self._data["parent_resources"]
else:
rin = [pai["resourceInstanceName"]]
if pai["activityInstanceName"] == "ALL":
ain = self._data["parent_activities"]
else:
ain = [pai["activityInstanceName"]]
for r in rin:
for a in ain:
if r in self._data["parent_possible_allocations"]:
self._data["parent_possible_allocations"][r].append(a)
else:
self._data["parent_possible_allocations"][r] = [a]
log.debug("parent possible allocations")
log.debug(self._data["parent_possible_allocations"])
except KeyError as e:
return (
True,
[
{
"err_code": 2,
"err_txt": "'{}' key name was not in input_dict['files'][0]['fileContents']['allocationInstances']...the format of the input is not correct!".format(
e
),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
except Exception as e:
return (
True,
[
{
"err_code": 5,
"err_txt": "Unknown Error '{0}:{1!r}' found in ingest-validate".format(type(e).__name__, e.args),
"offender": "**YOU**",
"fix": "Cant fix this!",
"is_fatal_error": True,
}
],
)
# req_parent_amt is a dict of required cost for each parent activity instance
# keys are tuples (prn,pan) where prn is the parent resource name and pan is the parent activity name,
# values are float amounts
# to make the tuple you need to consult the parent possible allocations above
# to get the value you need to consult the activity instances
for pr in self._data["parent_possible_allocations"]:
for pa in self._data["parent_possible_allocations"][pr]:
tu = (pr, pa)
val = self._get_instance_prop(dm3k_viz_data, parent_activity, pa, prop_name="cost", class_type="activity")
# val can be a dictionary...full house can only take values
val = list(val.values())[0]
self._data["req_parent_amt"][tu] = val
log.debug("required parent amount")
log.debug(self._data["req_parent_amt"])
# a child activity is an activity that is contained by the parent activity
# (uses the class contained from parent activity above)
# list all the instance names of that class
# FUTURE:
# - if only 1 activity exists, this in the child activity
child_activity = None
for ac in activity_classes:
if ac["className"] == parent_activity:
if len(ac["containsClasses"]) > 1:
log.warning("Parent Activity ({}) has more than one activity it contains...we are taking the first one")
child_activity = ac["containsClasses"][0] # assuming the first one
log.debug("Found child_activity = " + child_activity)
self._data["child_activities"] = self._get_all_instances(dm3k_viz_data, child_activity, "activity")
log.debug("child_activities")
log.debug(self._data["child_activities"])
# the child resource is a resource that can be allocated to the child activity
# so go through all the resourceClasses and find the one that can be allocated to the child activity class
# when you find the child resource, list all the instance names of that class
child_resource = None
for rc in resource_classes:
if child_activity in rc["canBeAllocatedToClasses"]:
child_resource = rc["className"]
log.debug("Found Child Resource = " + child_resource)
if child_resource is None:
self._add_to_validation_errors(
"Cannot find child resource, no resource can be allocated to the activity: " + child_activity,
is_fatal_error=True,
err_code=3,
offender="**YOU**",
fix=None,
)
return True, self._validation_errors # this is a fatal error, just stop now
else:
self._data["child_resources"] = self._get_all_instances(dm3k_viz_data, child_resource)
log.debug("child_resources")
log.debug(self._data["child_resources"])
# avail_child_amt is a dict of the total budget for each child resource:
# keys are child resource names,
# values are float amounts
child_res_instance = None
for ri in dm3k_viz_data["resourceInstances"]:
if ri["className"] == child_resource:
child_res_instance = ri
for cri in child_res_instance["instanceTable"]:
# budget is a dictionary...full house can only take values...also full house can only take 1 budget (i.e. the [0])
budget = list(cri["budget"].values())[0]
# need budget name for output
self._data["child_budget_name"] = list(cri["budget"].keys())[0]
self._data["avail_child_amt"][cri["instanceName"]] = budget
log.debug("avail_child_amt")
log.debug(self._data["avail_child_amt"])
# child possible allocations is a a dict containing the list of possible child activity allocations for each
# child resource.
# keys are child resource names and
# values are lists of child activity names
# Use the allocation instances between the child resource and activity to fill this
child_allocation_instance = None
for ai in dm3k_viz_data["allocationInstances"]:
if (ai["resourceClassName"] == child_resource) and (ai["activityClassName"] == child_activity):
child_allocation_instance = ai
for cai in child_allocation_instance["instanceTable"]:
if cai["resourceInstanceName"] == "ALL":
rin = self._data["child_resources"]
else:
rin = [cai["resourceInstanceName"]]
if cai["activityInstanceName"] == "ALL":
ain = self._data["child_activities"]
else:
ain = [cai["activityInstanceName"]]
for r in rin:
for a in ain:
if r in self._data["child_possible_allocations"]:
self._data["child_possible_allocations"][r].append(a)
else:
self._data["child_possible_allocations"][r] = [a]
log.debug("child possible allocations")
log.debug(self._data["child_possible_allocations"])
# req_child_amt is a dict of required cost for each parent activity instance
# keys are tuples (prn,pan) where prn is the parent resource name and pan is the parent activity name,
# values are float amounts
# to make the tuple you need to consult the child possible allocations above
# to get the value you need to consult the activity instances
for cr in self._data["child_possible_allocations"]:
for ca in self._data["child_possible_allocations"][cr]:
tu = (cr, ca)
val = self._get_instance_prop(dm3k_viz_data, child_activity, ca, prop_name="cost", class_type="activity")
# val can be a dictionary...full house can only take values
val = list(val.values())[0]
self._data["req_child_amt"][tu] = val
log.debug("required child amount")
log.debug(self._data["req_child_amt"])
# a child score is a dict of the float value of each child activity:
# keys are child activity names,
# values are float amounts
# just go through the activity instances and grab reward
for ca in self._data["child_activities"]:
val = self._get_instance_prop(dm3k_viz_data, child_activity, ca, prop_name="reward", class_type="activity")
self._data["child_score"][ca] = val
log.debug("child scores")
log.debug(self._data["child_score"])
# force and forbid list - leave blank
# resource families is a dict containing the parent resources and child resources for each resource container.
# a resource container is a resource that contains both the parent and child resource
# FUTURE - make a default resource container if there is not one and put all parents and children in it
# keys are resource container names and values are dicts with 2 keys 'parent_resources'
# (referencing the list of parent resources under this resource container) and 'child_resources'
# (referencing the list of child resources under this resource container)
# To construct this, find the resource container
# examine the contains instance between the resource container and the parent_resource
# examine the contains instance between the resource container and the child_resource
resource_container = None
for rc in dm3k_viz_data["resourceClasses"]:
contains_classes = rc["containsClasses"]
if child_resource in contains_classes and parent_resource in contains_classes:
resource_container = rc["className"]
if resource_container is None:
self._add_to_validation_errors(
"Cannot find resource container, no resource contains both: " + parent_resource + " and " + child_resource,
is_fatal_error=True,
err_code=3,
offender="**YOU**",
fix=None,
)
return True, self._validation_errors # this is a fatal error, just stop now
for ci in dm3k_viz_data["containsInstances"]:
if ci["parentClassName"] == resource_container and ci["childClassName"] == parent_resource:
for i in ci["instanceTable"]:
if i["parentInstanceName"] not in self._data["resource_families"]:
self._data["resource_families"][i["parentInstanceName"]] = {"parent_resources": [], "child_resources": []}
self._data["resource_families"][i["parentInstanceName"]]["parent_resources"].append(i["childInstanceName"])
# had to do separate loops because instance container must be set first
for ci in dm3k_viz_data["containsInstances"]:
if ci["parentClassName"] == resource_container and ci["childClassName"] == child_resource:
for i in ci["instanceTable"]:
self._data["resource_families"][i["parentInstanceName"]]["child_resources"].append(i["childInstanceName"])
log.debug("Resource Families")
log.debug(self._data["resource_families"])
# activity children is a dict containing the child activities of each parent activity.
# keys are parent activity names,
# values are list of child activity names for that parent
# To construct this, examine the contains instance between the parent activity and the child activity name
for ci in dm3k_viz_data["containsInstances"]:
if ci["parentClassName"] == parent_activity and ci["childClassName"] == child_activity:
for i in ci["instanceTable"]:
pin = i["parentInstanceName"]
cin = i["childInstanceName"]
if pin in self._data["activity_children"]:
self._data["activity_children"][pin].append(cin)
else:
self._data["activity_children"][pin] = [cin]
log.debug("Activity Children")
log.debug(self._data["activity_children"])
# FINISH BY VALIDATING
log.debug("Calling validate method")
self._validate()
log.debug("Calling fix method")
self._fix()
return self._fatal_error, self._validation_errors
def add_scores(self, activity_scores):
"""
Add activity scores to the _data input dictionary
NOTE - this method is not necessary for operation with UI but is kept here for when optimizers are
used outside of the UI
:param dict activity_scores: Dictionary of DU scores. Keys are child_activities
"""
if not activity_scores: # this should catch None and empty dict {}
# assume this is how viz with enter scores
# just take existing scores
# first check that it exists
if "child_score" not in self._data:
raise ValueError("Activity Scores are None and no scores currently exist...cannot continue")
# then move to use existing child_scores
else: # you supplied activity scores
if set(activity_scores.keys()) == set(self._data["child_activities"]):
self._data["child_score"] = activity_scores
else:
raise ValueError(
"Activity names do not match names in dataset:\n{}\n{}\n".format(
set(activity_scores.keys()), set(self._data["child_activities"])
)
)
| 42.082228 | 173 | 0.510589 |
acf59c593c9283293c4c1f619a5b575401a7b13a | 44,473 | py | Python | run_classifier.py | Hillary060/bert | 39cb69689f552e927404b197a07b608960af2a51 | [
"Apache-2.0"
] | null | null | null | run_classifier.py | Hillary060/bert | 39cb69689f552e927404b197a07b608960af2a51 | [
"Apache-2.0"
] | null | null | null | run_classifier.py | Hillary060/bert | 39cb69689f552e927404b197a07b608960af2a51 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# Required parameters
if 1:
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
# Other parameters
if 1:
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer("num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.")
# 每一行数据 to InputExample对象
# 样例id,text_a,text_b,label 四部分 (与数据集中的数据列对应)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
# 当需要使用TPU训练时,eval和predict的数据需要是batch_size的整数倍,此类用于处理这类情况
class PaddingInputExample(object):
"""
Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
# InputFeatures就是一个存放特征的类,属性它包括input_ids、input_mask、segment_ids和label_id
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids, # 输入部分:token embedding:表示词向量,第一个词是CLS,分隔词有SEP
input_mask, # 输入部分:position embedding:为了令attention感知词与词之间的位置关系
segment_ids, # 输入部分:segment embedding:text_a与text_b的句子关系
label_id, # 输出部分:标签,对应Y
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
# 数据处理
# 获取train、dev、test、labels数据
# 读取tsv文件的方法
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
# XNLI(Cross-lingual Natural Language)
# 跨语言理解的数据集。给定一个前提和假设,判断这个假设与前提是否具有蕴涵、对立、中性关系。
# 语言推断任务 Natural Language Inference
# 评估模型的跨语句表征能力,即语言理解能力,
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
# MNLI(Multi-Genre Natural Language Inference)
# 语言推断任务。给定一个前提和假设,判断这个假设与前提是否具有蕴涵、对立、中性关系(假设是否成立)。
# 句子对输入,三分类
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
# MRPC(Microsoft Research Paraphrase Corpus)
# 判断两个给定句子,是否具有相同的语义
# 句子对输入,二分类
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
# CoLA(The Corpus of Linguistic Acceptability)
# 对一个给定句子判定其是否语法正确
# 单句输入,二分类
# 与句对输入的不同:1 get_labels() 的return 2 InputExample是否有text_b(没有则text_b=None)
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
# 自定义类:只需要改get_labels
class MyTaskProcessor(DataProcessor):
"""Processor for custom task classification """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, 'val.tsv')), 'val')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['电商', '新闻', '社交'] # 多分类
def _create_examples(self, lines, set_type):
"""create examples for the training and val sets"""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = '%s-%s' % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
# Input_example转化为Feature_example,再转换为TFRecord格式,便于Tensorflow处理
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file) # 2. 转换为TFRecord格式,便于Tensorflow处理
# 遍历每一个example(InputExample类的对象)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
# 1. 调用convert_single_example转化Input_example为Feature_example
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id]) # 除了label_id都是list类型
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
# 最外层是tf.train.Features()的实例,内层是feature的字典
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
# 读取并解析tfrecord数据,把TFRecord的一条Record变成tf.Example对象
# 1 从文件得到TFRecordDataset,然后根据是否训练来shuffle和重复读取
# 2 每一个TFRecord进行map_and_batch,调用_decode_record函数对record进行解析,最终返回tf.Example对象
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
"""
tf.FixedLenFeature 返回的是一个【定长】的tensor
第一个参数为特征的长度(元素的个数),如 max_seq_length
如果只有一个整数,直接传[]。如label_id是一个整数,则只传[]
第二个参数为特征的类型
因为tf.Example只支持tf.int64,所以传值tf.int64
"""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
# 解析tfrecord文件的每条记录(调用parse_single_example解析),并将其转化为int32
# tf.Example只支持tf.int64,但是TPU只支持tf.int32
# tfrecord文件的每条记录,即为序列化后的tf.train.Example
def _decode_record(record, name_to_features):
# 一条tf_record解析成tf.Example对象
example = tf.parse_single_example(record, name_to_features)
# int64变成int32
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# 1 读取record文件,得到TFRecordDataset,然后根据是否训练来shuffle和重复读取
"""
创建TFRecordDataset作为tensorflow代码所写的模型的输入
tf.data.TFRecordDataset(
filenames, compression_type=None, buffer_size=None, num_parallel_reads=None
)
filenames
后缀名为tfrecords的文件路径,如train.tf_record
"""
d = tf.data.TFRecordDataset(input_file) # input_file:如train.tf_record
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training: # 在训练过程中,多次执行并行读取和shuffling操作(测试和验证不需要)
d = d.repeat()
d = d.shuffle(buffer_size=100)
# 2 每一个TFRecord进行map_and_batch,调用_decode_record函数解析tfrecord文件中的所有记录
"""
tf.contrib.data.map_and_batch(
map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None
)
复合实现map和batch。
map_func横跨dataset的batch_size个连续元素,然后将它们组合成一个batch。
在功能上,它相当于map后面跟着batch。
map_func
将tensor的嵌套结构映射到另一个tensor嵌套结构的函数
batch_size
表示要在此数据集合并的单个batch中的连续元素数
tf.int64,标量tf.Tensor
drop_remainder(可选)
表示是否应丢弃最后一个batch,以防其大小小于所需值;
默认行为是不删除较小的batch。
"""
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder
)
)
return d
return input_fn
# 去除Token序列超过max_length的部分
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
# 在tokens_a和tokens_b中选择长的那个序列来pop掉最后面的那个Token
# 这样的结果是使得两个Token序列一样长
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
# 1 获取bert模型
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# 2 调用 BertModel 的 get_pooled_output 方法得到[CLS]最后一层的输出(分类任务),默认是768的向量
"""
分类任务:用[CLS]最后一层的输出,默认是768的向量
序列标注任务:使用model.get_sequence_output()
"""
# 得到[CLS]最后一层输出,把它看成句子的Embedding
output_layer = model.get_pooled_output() # entire segment
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
# 3 计算logits:接全连阶层对[CLS]输出的768的向量再做一个线性变换,得到logits(size为label的个数)
logits = tf.matmul(output_layer, output_weights, transpose_b=True) # 有batch_size个样本,每个样本用num_class个向量来表示
logits = tf.nn.bias_add(logits, output_bias)
# 4 softmax得到概率
probabilities = tf.nn.softmax(logits, axis=-1) # 输入全连接层的值,输出归一化后的值(该位置的概率)
log_probs = tf.nn.log_softmax(logits, axis=-1) # 方便后续交叉熵计算
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
# 根据真实的分类标签计算loss
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) # Float Tensor; the loss for this step
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
# [1] 构建transformer模型
# [2] 从checkpoint中获取参数,初始化模型
# [3] 构造SPEC:根据train/eval/predict mode构建不同的SPEC
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
"""
features:输入(特征) input_ids input_mask segment_ids label_ids
labels:输出
mode:三种,分别对应训练、测试、预测:tf.estimator.ModeKeys.TRAIN、EVAL、PREDICT
"""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
# todo
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
""" [1] 构建transformer模型 【最主要】----------------------------------------------------- """
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings) # total loss: Float Tensor; the loss for this step
"""
tf.trainable_variables()
- 查找需要训练的变量:通过变量的 trainable 是否为True查找
- 定义变量时,有trainable参数
True(默认):需要训练;
False:不需要训练
- 如 lr = tf.Variable(2e-5, trainable=False),表示 lr 不需要训练
- 需要训练的参数有:权重矩阵、偏置向量等
"""
tvars = tf.trainable_variables()
initialized_variable_names = {} # 初始化参数,从初始化的checkpoint中获取
scaffold_fn = None
"""
scaffold_fn
- 在CPU上运行,用来生成 Scaffold 的函数。
- 该函数不应在 model_fn 中捕获任何张量。
"""
""" [2] 从checkpoint中获取参数,初始化模型 """
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
"""
Spec是specification的简写,中文"规格"
tf.estimator.EstimatorSpec对象,是模型的【评价指标相关信息(如loss,metric)】,用来训练模型、分析模型结果。
train/eval/predict的spec不同,如
train过程包含train_op优化器
eval过程包含eval_metrics,测试评价指标
predict包含预测的结果概率
"""
""" [3] 构造SPEC """
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
"""构造训练spec"""
# 训练操作,estimator会根据它训练、优化模型
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
"""构造测试spec"""
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""计算准确率和损失值"""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
"""构造预测spec"""
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# 把获取的四列数据(InputExample),转化成bert输入表征(InputFeatures)
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample): # example实例为空时返回的数据
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list): # label和id映射
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# 把序列截断成最大允许长度
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack #son #ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# # # 可以看出,第一个[SEP]的segment token 是 0
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# 对于分类任务,[CLS]对应的向量可以被看成 sentence vector.
# Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a: # 将句子拆分成字
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]") # 第一个[SEP]的segment token 是 0
segment_ids.append(0)
if tokens_b:
for token in tokens_b: # 将句子拆分成字
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens) # 把每个字映射成词表中的id
# mask是1表示是"真正"的Token,0则是Padding出来的。
# 在后面的Attention时会通过tricky的技巧让模型不能attend to这些padding出来的Token上。
input_mask = [1] * len(input_ids)
# padding使得序列长度正好等于max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
# {task name: processor class}
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
# 添加自定义类
"mytask": MyTaskProcessor
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
# 数据验证
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
""" 构建Estimator """
# model_fn_builder
# [1] 构建transformer模型
# [2] 从checkpoint中获取参数,初始化模型
# [3] 构造SPEC:根据train/eval/predict mode构建不同的SPEC
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
""" Input_example转化为Feature_example,再转换为TFRecord格式"""
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
""" 模型输入
读取并解析tfrecord数据,把TFRecord的一条Record变成tf.Example对象
"""
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
# 把TFRecord的一条Record变成tf.Example对象
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run() | 38.672174 | 118 | 0.628943 |
acf59c71c1b2a97c8c9a58d5de7f4f615f6a3295 | 50,376 | py | Python | release/stubs.min/System/Windows/Forms/__init___parts/HScrollBar.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/HScrollBar.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/HScrollBar.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class HScrollBar(
ScrollBar,
IComponent,
IDisposable,
IOleControl,
IOleObject,
IOleInPlaceObject,
IOleInPlaceActiveObject,
IOleWindow,
IViewObject,
IViewObject2,
IPersist,
IPersistStreamInit,
IPersistPropertyBag,
IPersistStorage,
IQuickActivate,
ISupportOleDropSource,
IDropTarget,
ISynchronizeInvoke,
IWin32Window,
IArrangedElement,
IBindableComponent,
):
"""
Represents a standard Windows horizontal scroll bar.
HScrollBar()
"""
def AccessibilityNotifyClients(self, *args):
"""
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,objectID: int,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control .
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
objectID: The identifier of the System.Windows.Forms.AccessibleObject.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control.
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
"""
pass
def CreateAccessibilityInstance(self, *args):
"""
CreateAccessibilityInstance(self: Control) -> AccessibleObject
Creates a new accessibility object for the control.
Returns: A new System.Windows.Forms.AccessibleObject for the control.
"""
pass
def CreateControlsInstance(self, *args):
"""
CreateControlsInstance(self: Control) -> ControlCollection
Creates a new instance of the control collection for the control.
Returns: A new instance of System.Windows.Forms.Control.ControlCollection assigned to the control.
"""
pass
def CreateHandle(self, *args):
"""
CreateHandle(self: Control)
Creates a handle for the control.
"""
pass
def DefWndProc(self, *args):
"""
DefWndProc(self: Control,m: Message) -> Message
Sends the specified message to the default window procedure.
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def DestroyHandle(self, *args):
"""
DestroyHandle(self: Control)
Destroys the handle associated with the control.
"""
pass
def Dispose(self):
"""
Dispose(self: Control,disposing: bool)
Releases the unmanaged resources used by the System.Windows.Forms.Control and its child controls
and optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def GetAccessibilityObjectById(self, *args):
"""
GetAccessibilityObjectById(self: Control,objectId: int) -> AccessibleObject
Retrieves the specified System.Windows.Forms.AccessibleObject.
objectId: An Int32 that identifies the System.Windows.Forms.AccessibleObject to retrieve.
Returns: An System.Windows.Forms.AccessibleObject.
"""
pass
def GetAutoSizeMode(self, *args):
"""
GetAutoSizeMode(self: Control) -> AutoSizeMode
Retrieves a value indicating how a control will behave when its
System.Windows.Forms.Control.AutoSize property is enabled.
Returns: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def GetScaledBounds(self, *args):
"""
GetScaledBounds(self: ScrollBar,bounds: Rectangle,factor: SizeF,specified: BoundsSpecified) -> Rectangle
Returns the bounds to use when the System.Windows.Forms.ScrollBar is scaled by a specified
amount.
bounds: A System.Drawing.Rectangle that specifies the initial bounds.
factor: A System.Drawing.SizeF that indicates the amount the current bounds should be increased by.
specified: A bitwise combination of the System.Windows.Forms.BoundsSpecified values that indicate the how
to define the control's size and position returned by
System.Windows.Forms.ScrollBar.GetScaledBounds(System.Drawing.Rectangle,System.Drawing.SizeF,Syst
em.Windows.Forms.BoundsSpecified).
Returns: A System.Drawing.Rectangle specifying the scaled bounds.
"""
pass
def GetService(self, *args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def GetStyle(self, *args):
"""
GetStyle(self: Control,flag: ControlStyles) -> bool
Retrieves the value of the specified control style bit for the control.
flag: The System.Windows.Forms.ControlStyles bit to return the value from.
Returns: true if the specified control style bit is set to true; otherwise,false.
"""
pass
def GetTopLevel(self, *args):
"""
GetTopLevel(self: Control) -> bool
Determines if the control is a top-level control.
Returns: true if the control is a top-level control; otherwise,false.
"""
pass
def InitLayout(self, *args):
"""
InitLayout(self: Control)
Called after the control has been added to another container.
"""
pass
def InvokeGotFocus(self, *args):
"""
InvokeGotFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeLostFocus(self, *args):
"""
InvokeLostFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeOnClick(self, *args):
"""
InvokeOnClick(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Click event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokePaint(self, *args):
"""
InvokePaint(self: Control,c: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def InvokePaintBackground(self, *args):
"""
InvokePaintBackground(self: Control,c: Control,e: PaintEventArgs)
Raises the PaintBackground event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def IsInputChar(self, *args):
"""
IsInputChar(self: Control,charCode: Char) -> bool
Determines if a character is an input character that the control recognizes.
charCode: The character to test.
Returns: true if the character should be sent directly to the control and not preprocessed; otherwise,
false.
"""
pass
def IsInputKey(self, *args):
"""
IsInputKey(self: Control,keyData: Keys) -> bool
Determines whether the specified key is a regular input key or a special key that requires
preprocessing.
keyData: One of the System.Windows.Forms.Keys values.
Returns: true if the specified key is a regular input key; otherwise,false.
"""
pass
def MemberwiseClone(self, *args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def NotifyInvalidate(self, *args):
"""
NotifyInvalidate(self: Control,invalidatedArea: Rectangle)
Raises the System.Windows.Forms.Control.Invalidated event with a specified region of the control
to invalidate.
invalidatedArea: A System.Drawing.Rectangle representing the area to invalidate.
"""
pass
def OnAutoSizeChanged(self, *args):
"""
OnAutoSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.AutoSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackColorChanged(self, *args):
"""
OnBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageChanged(self, *args):
"""
OnBackgroundImageChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageLayoutChanged(self, *args):
"""
OnBackgroundImageLayoutChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBindingContextChanged(self, *args):
"""
OnBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnCausesValidationChanged(self, *args):
"""
OnCausesValidationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CausesValidationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnChangeUICues(self, *args):
"""
OnChangeUICues(self: Control,e: UICuesEventArgs)
Raises the System.Windows.Forms.Control.ChangeUICues event.
e: A System.Windows.Forms.UICuesEventArgs that contains the event data.
"""
pass
def OnClick(self, *args):
"""
OnClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnClientSizeChanged(self, *args):
"""
OnClientSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ClientSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnContextMenuChanged(self, *args):
"""
OnContextMenuChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnContextMenuStripChanged(self, *args):
"""
OnContextMenuStripChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuStripChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnControlAdded(self, *args):
"""
OnControlAdded(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlAdded event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnControlRemoved(self, *args):
"""
OnControlRemoved(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlRemoved event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnCreateControl(self, *args):
"""
OnCreateControl(self: Control)
Raises the System.Windows.Forms.Control.CreateControl method.
"""
pass
def OnCursorChanged(self, *args):
"""
OnCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDockChanged(self, *args):
"""
OnDockChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DockChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDoubleClick(self, *args):
"""
OnDoubleClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DoubleClick event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDpiChangedAfterParent(self, *args):
""" OnDpiChangedAfterParent(self: Control,e: EventArgs) """
pass
def OnDpiChangedBeforeParent(self, *args):
""" OnDpiChangedBeforeParent(self: Control,e: EventArgs) """
pass
def OnDragDrop(self, *args):
"""
OnDragDrop(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragDrop event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragEnter(self, *args):
"""
OnDragEnter(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragEnter event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragLeave(self, *args):
"""
OnDragLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DragLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragOver(self, *args):
"""
OnDragOver(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragOver event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnEnabledChanged(self, *args):
"""
OnEnabledChanged(self: ScrollBar,e: EventArgs)
e: The event data.
"""
pass
def OnEnter(self, *args):
"""
OnEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Enter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnFontChanged(self, *args):
"""
OnFontChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnForeColorChanged(self, *args):
"""
OnForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnGiveFeedback(self, *args):
"""
OnGiveFeedback(self: Control,gfbevent: GiveFeedbackEventArgs)
Raises the System.Windows.Forms.Control.GiveFeedback event.
gfbevent: A System.Windows.Forms.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnGotFocus(self, *args):
"""
OnGotFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHandleCreated(self, *args):
"""
OnHandleCreated(self: ScrollBar,e: EventArgs)
e: The event data.
"""
pass
def OnHandleDestroyed(self, *args):
"""
OnHandleDestroyed(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.HandleDestroyed event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHelpRequested(self, *args):
"""
OnHelpRequested(self: Control,hevent: HelpEventArgs)
Raises the System.Windows.Forms.Control.HelpRequested event.
hevent: A System.Windows.Forms.HelpEventArgs that contains the event data.
"""
pass
def OnImeModeChanged(self, *args):
"""
OnImeModeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ImeModeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnInvalidated(self, *args):
"""
OnInvalidated(self: Control,e: InvalidateEventArgs)
Raises the System.Windows.Forms.Control.Invalidated event.
e: An System.Windows.Forms.InvalidateEventArgs that contains the event data.
"""
pass
def OnKeyDown(self, *args):
"""
OnKeyDown(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyDown event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnKeyPress(self, *args):
"""
OnKeyPress(self: Control,e: KeyPressEventArgs)
Raises the System.Windows.Forms.Control.KeyPress event.
e: A System.Windows.Forms.KeyPressEventArgs that contains the event data.
"""
pass
def OnKeyUp(self, *args):
"""
OnKeyUp(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyUp event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnLayout(self, *args):
"""
OnLayout(self: Control,levent: LayoutEventArgs)
Raises the System.Windows.Forms.Control.Layout event.
levent: A System.Windows.Forms.LayoutEventArgs that contains the event data.
"""
pass
def OnLeave(self, *args):
"""
OnLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Leave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLocationChanged(self, *args):
"""
OnLocationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LocationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLostFocus(self, *args):
"""
OnLostFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMarginChanged(self, *args):
"""
OnMarginChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MarginChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnMouseCaptureChanged(self, *args):
"""
OnMouseCaptureChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseCaptureChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseClick(self, *args):
"""
OnMouseClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDoubleClick(self, *args):
"""
OnMouseDoubleClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDoubleClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDown(self, *args):
"""
OnMouseDown(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDown event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseEnter(self, *args):
"""
OnMouseEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseEnter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseHover(self, *args):
"""
OnMouseHover(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseHover event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseLeave(self, *args):
"""
OnMouseLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseMove(self, *args):
"""
OnMouseMove(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseMove event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseUp(self, *args):
"""
OnMouseUp(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseUp event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseWheel(self, *args):
"""
OnMouseWheel(self: ScrollBar,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseWheel event
e: A System.Windows.Forms.MouseEventArgs
"""
pass
def OnMove(self, *args):
"""
OnMove(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Move event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnNotifyMessage(self, *args):
"""
OnNotifyMessage(self: Control,m: Message)
Notifies the control of Windows messages.
m: A System.Windows.Forms.Message that represents the Windows message.
"""
pass
def OnPaddingChanged(self, *args):
"""
OnPaddingChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.PaddingChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnPaint(self, *args):
"""
OnPaint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnPaintBackground(self, *args):
"""
OnPaintBackground(self: Control,pevent: PaintEventArgs)
Paints the background of the control.
pevent: A System.Windows.Forms.PaintEventArgs that contains information about the control to paint.
"""
pass
def OnParentBackColorChanged(self, *args):
"""
OnParentBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event when the
System.Windows.Forms.Control.BackColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBackgroundImageChanged(self, *args):
"""
OnParentBackgroundImageChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event when the
System.Windows.Forms.Control.BackgroundImage property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBindingContextChanged(self, *args):
"""
OnParentBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event when the
System.Windows.Forms.Control.BindingContext property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentChanged(self, *args):
"""
OnParentChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ParentChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentCursorChanged(self, *args):
"""
OnParentCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentEnabledChanged(self, *args):
"""
OnParentEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event when the
System.Windows.Forms.Control.Enabled property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentFontChanged(self, *args):
"""
OnParentFontChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event when the
System.Windows.Forms.Control.Font property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentForeColorChanged(self, *args):
"""
OnParentForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event when the
System.Windows.Forms.Control.ForeColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRightToLeftChanged(self, *args):
"""
OnParentRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event when the
System.Windows.Forms.Control.RightToLeft property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentVisibleChanged(self, *args):
"""
OnParentVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event when the
System.Windows.Forms.Control.Visible property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnPreviewKeyDown(self, *args):
"""
OnPreviewKeyDown(self: Control,e: PreviewKeyDownEventArgs)
Raises the System.Windows.Forms.Control.PreviewKeyDown event.
e: A System.Windows.Forms.PreviewKeyDownEventArgs that contains the event data.
"""
pass
def OnPrint(self, *args):
"""
OnPrint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnQueryContinueDrag(self, *args):
"""
OnQueryContinueDrag(self: Control,qcdevent: QueryContinueDragEventArgs)
Raises the System.Windows.Forms.Control.QueryContinueDrag event.
qcdevent: A System.Windows.Forms.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnRegionChanged(self, *args):
"""
OnRegionChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RegionChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnResize(self, *args):
"""
OnResize(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Resize event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnRightToLeftChanged(self, *args):
"""
OnRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnScroll(self, *args):
"""
OnScroll(self: ScrollBar,se: ScrollEventArgs)
Raises the System.Windows.Forms.ScrollBar.Scroll event.
se: A System.Windows.Forms.ScrollEventArgs that contains the event data.
"""
pass
def OnSizeChanged(self, *args):
"""
OnSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.SizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnStyleChanged(self, *args):
"""
OnStyleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.StyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSystemColorsChanged(self, *args):
"""
OnSystemColorsChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.SystemColorsChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabIndexChanged(self, *args):
"""
OnTabIndexChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabIndexChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabStopChanged(self, *args):
"""
OnTabStopChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabStopChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTextChanged(self, *args):
"""
OnTextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidated(self, *args):
"""
OnValidated(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Validated event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidating(self, *args):
"""
OnValidating(self: Control,e: CancelEventArgs)
Raises the System.Windows.Forms.Control.Validating event.
e: A System.ComponentModel.CancelEventArgs that contains the event data.
"""
pass
def OnValueChanged(self, *args):
"""
OnValueChanged(self: ScrollBar,e: EventArgs)
Raises the System.Windows.Forms.ScrollBar.ValueChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnVisibleChanged(self, *args):
"""
OnVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def ProcessCmdKey(self, *args):
"""
ProcessCmdKey(self: Control,msg: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
msg: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogChar(self, *args):
"""
ProcessDialogChar(self: Control,charCode: Char) -> bool
Processes a dialog character.
charCode: The character to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogKey(self, *args):
"""
ProcessDialogKey(self: Control,keyData: Keys) -> bool
Processes a dialog key.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the key was processed by the control; otherwise,false.
"""
pass
def ProcessKeyEventArgs(self, *args):
"""
ProcessKeyEventArgs(self: Control,m: Message) -> (bool,Message)
Processes a key message and generates the appropriate control events.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyMessage(self, *args):
"""
ProcessKeyMessage(self: Control,m: Message) -> (bool,Message)
Processes a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyPreview(self, *args):
"""
ProcessKeyPreview(self: Control,m: Message) -> (bool,Message)
Previews a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessMnemonic(self, *args):
"""
ProcessMnemonic(self: Control,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true if the character was processed as a mnemonic by the control; otherwise,false.
"""
pass
def RaiseDragEvent(self, *args):
"""
RaiseDragEvent(self: Control,key: object,e: DragEventArgs)
Raises the appropriate drag event.
key: The event to raise.
e: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def RaiseKeyEvent(self, *args):
"""
RaiseKeyEvent(self: Control,key: object,e: KeyEventArgs)
Raises the appropriate key event.
key: The event to raise.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def RaiseMouseEvent(self, *args):
"""
RaiseMouseEvent(self: Control,key: object,e: MouseEventArgs)
Raises the appropriate mouse event.
key: The event to raise.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def RaisePaintEvent(self, *args):
"""
RaisePaintEvent(self: Control,key: object,e: PaintEventArgs)
Raises the appropriate paint event.
key: The event to raise.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def RecreateHandle(self, *args):
"""
RecreateHandle(self: Control)
Forces the re-creation of the handle for the control.
"""
pass
def RescaleConstantsForDpi(self, *args):
""" RescaleConstantsForDpi(self: Control,deviceDpiOld: int,deviceDpiNew: int) """
pass
def ResetMouseEventArgs(self, *args):
"""
ResetMouseEventArgs(self: Control)
Resets the control to handle the System.Windows.Forms.Control.MouseLeave event.
"""
pass
def RtlTranslateAlignment(self, *args):
"""
RtlTranslateAlignment(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
RtlTranslateAlignment(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
RtlTranslateAlignment(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateContent(self, *args):
"""
RtlTranslateContent(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
"""
pass
def RtlTranslateHorizontal(self, *args):
"""
RtlTranslateHorizontal(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateLeftRight(self, *args):
"""
RtlTranslateLeftRight(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
"""
pass
def ScaleControl(self, *args):
"""
ScaleControl(self: Control,factor: SizeF,specified: BoundsSpecified)
Scales a control's location,size,padding and margin.
factor: The factor by which the height and width of the control will be scaled.
specified: A System.Windows.Forms.BoundsSpecified value that specifies the bounds of the control to use
when defining its size and position.
"""
pass
def ScaleCore(self, *args):
"""
ScaleCore(self: Control,dx: Single,dy: Single)
This method is not relevant for this class.
dx: The horizontal scaling factor.
dy: The vertical scaling factor.
"""
pass
def Select(self):
"""
Select(self: Control,directed: bool,forward: bool)
Activates a child control. Optionally specifies the direction in the tab order to select the
control from.
directed: true to specify the direction of the control to select; otherwise,false.
forward: true to move forward in the tab order; false to move backward in the tab order.
"""
pass
def SetAutoSizeMode(self, *args):
"""
SetAutoSizeMode(self: Control,mode: AutoSizeMode)
Sets a value indicating how a control will behave when its System.Windows.Forms.Control.AutoSize
property is enabled.
mode: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def SetBoundsCore(self, *args):
"""
SetBoundsCore(self: Control,x: int,y: int,width: int,height: int,specified: BoundsSpecified)
Performs the work of setting the specified bounds of this control.
x: The new System.Windows.Forms.Control.Left property value of the control.
y: The new System.Windows.Forms.Control.Top property value of the control.
width: The new System.Windows.Forms.Control.Width property value of the control.
height: The new System.Windows.Forms.Control.Height property value of the control.
specified: A bitwise combination of the System.Windows.Forms.BoundsSpecified values.
"""
pass
def SetClientSizeCore(self, *args):
"""
SetClientSizeCore(self: Control,x: int,y: int)
Sets the size of the client area of the control.
x: The client area width,in pixels.
y: The client area height,in pixels.
"""
pass
def SetStyle(self, *args):
"""
SetStyle(self: Control,flag: ControlStyles,value: bool)
Sets a specified System.Windows.Forms.ControlStyles flag to either true or false.
flag: The System.Windows.Forms.ControlStyles bit to set.
value: true to apply the specified style to the control; otherwise,false.
"""
pass
def SetTopLevel(self, *args):
"""
SetTopLevel(self: Control,value: bool)
Sets the control as the top-level control.
value: true to set the control as the top-level control; otherwise,false.
"""
pass
def SetVisibleCore(self, *args):
"""
SetVisibleCore(self: Control,value: bool)
Sets the control to the specified visible state.
value: true to make the control visible; otherwise,false.
"""
pass
def SizeFromClientSize(self, *args):
"""
SizeFromClientSize(self: Control,clientSize: Size) -> Size
Determines the size of the entire control from the height and width of its client area.
clientSize: A System.Drawing.Size value representing the height and width of the control's client area.
Returns: A System.Drawing.Size value representing the height and width of the entire control.
"""
pass
def UpdateBounds(self, *args):
"""
UpdateBounds(self: Control,x: int,y: int,width: int,height: int,clientWidth: int,clientHeight: int)
Updates the bounds of the control with the specified size,location,and client size.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
clientWidth: The client System.Drawing.Size.Width of the control.
clientHeight: The client System.Drawing.Size.Height of the control.
UpdateBounds(self: Control,x: int,y: int,width: int,height: int)
Updates the bounds of the control with the specified size and location.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
UpdateBounds(self: Control)
Updates the bounds of the control with the current size and location.
"""
pass
def UpdateScrollInfo(self, *args):
"""
UpdateScrollInfo(self: ScrollBar)
Updates the System.Windows.Forms.ScrollBar control.
"""
pass
def UpdateStyles(self, *args):
"""
UpdateStyles(self: Control)
Forces the assigned styles to be reapplied to the control.
"""
pass
def UpdateZOrder(self, *args):
"""
UpdateZOrder(self: Control)
Updates the control in its parent's z-order.
"""
pass
def WndProc(self, *args):
"""
WndProc(self: ScrollBar,m: Message) -> Message
Overrides the System.Windows.Forms.Control.WndProc(System.Windows.Forms.Message@) method.
m: A Windows Message object.
"""
pass
def __enter__(self, *args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self, *args):
pass
CanEnableIme = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the System.Windows.Forms.Control.ImeMode property can be set to an active value,to enable IME support.
"""
CanRaiseEvents = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Determines if events can be raised on the control.
"""
CreateParams = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the required creation parameters when the control handle is created.
"""
DefaultCursor = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the default cursor for the control.
"""
DefaultImeMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the default Input Method Editor (IME) mode supported by this control.
"""
DefaultMargin = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the default distance between the System.Windows.Forms.ScrollBar control edges and its contents.
"""
DefaultMaximumSize = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the length and height,in pixels,that is specified as the default maximum size of a control.
"""
DefaultMinimumSize = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the length and height,in pixels,that is specified as the default minimum size of a control.
"""
DefaultPadding = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the internal spacing,in pixels,of the contents of a control.
"""
DefaultSize = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
DesignMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DoubleBuffered = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value indicating whether this control should redraw its surface using a secondary buffer to reduce or prevent flicker.
"""
Events = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
FontHeight = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the height of the font of the control.
"""
ImeModeBase = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the IME mode of a control.
"""
RenderRightToLeft = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is now obsolete.
"""
ResizeRedraw = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value indicating whether the control redraws itself when resized.
"""
ScaleChildren = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that determines the scaling of child controls.
"""
ShowFocusCues = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the control should display focus rectangles.
"""
ShowKeyboardCues = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the user interface is in the appropriate state to show or hide keyboard accelerators.
"""
| 23.182697 | 221 | 0.653704 |
acf59c7529ebaf9b43006db311dd17abf02180e3 | 338 | py | Python | week04/myproject/rent/admin.py | fuengfa/CS459 | cf8b8dcdb94ebcb894551174e5223b857425e1f6 | [
"BSD-2-Clause"
] | null | null | null | week04/myproject/rent/admin.py | fuengfa/CS459 | cf8b8dcdb94ebcb894551174e5223b857425e1f6 | [
"BSD-2-Clause"
] | null | null | null | week04/myproject/rent/admin.py | fuengfa/CS459 | cf8b8dcdb94ebcb894551174e5223b857425e1f6 | [
"BSD-2-Clause"
] | null | null | null | from django.contrib import admin
from rent.models import Rent,Car
# Register your models here.
class RentAdmin(admin.ModelAdmin):
list_display=[f.name for f in Rent._meta.fields]
admin.site.register(Rent,RentAdmin)
class CarAdmin(admin.ModelAdmin):
list_display=[f.name for f in Car._meta.fields]
admin.site.register(Car,CarAdmin)
| 22.533333 | 49 | 0.789941 |
acf59ca7d715f91f4c8078da84321bce9e3f6771 | 7,350 | py | Python | manhuagui.py | q6806161/manhuagui | 42e278d73c234f2015823a4ef3dadeebdc596d56 | [
"MIT"
] | null | null | null | manhuagui.py | q6806161/manhuagui | 42e278d73c234f2015823a4ef3dadeebdc596d56 | [
"MIT"
] | null | null | null | manhuagui.py | q6806161/manhuagui | 42e278d73c234f2015823a4ef3dadeebdc596d56 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author:Lvcong Chen
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException,NoSuchElementException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from multiprocessing import Process, Queue
import requests
import urllib3
import socket
import time
import random
import imp
import re
import sys
import os
import winsound
imp.reload(sys)
requests.packages.urllib3.disable_warnings()
class One_Punch_Man_Spider(object):
def __init__(self):
self.pattern_maxpage = re.compile(r"""(
<h2>(.*?)</h2>
.*?<span\s+id=['|"]page['|"]>\d+</span>\W+(\d+)
)""",re.VERBOSE|re.S)
self.pattern_picture_download_url = re.compile(r"""(
(id=['|"]mangaFile['|"]\s+src=['|"](.*?)['|"]) # 图片下载地址提取
)""",re.VERBOSE|re.S)
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Wi\
n64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.\
0.3729.108 Safari/537.36'}
self.s = requests.Session()
self.url_charpter_first_page_first = "https://www.manhuagui.com/comic/9637/438862.html"
#
def chrome_set(self):
"""chorm的selenium设置"""
chrome_options=Options()
chrome_options.add_argument('--ignore-certificate-errors')
# chrome_options.add_argument('--headless')
capa = DesiredCapabilities.CHROME
capa["pageLoadStrategy"] = "none"
driver = webdriver.Chrome(desired_capabilities=capa, options=chrome_options)
wait = WebDriverWait(driver,7)
return (driver,wait)
# 图片下载地址模块
def picture_url_crawler(self,maxpage,driver,wait):
page_turn = 1
picture_url_list = []
check_time = 0
while page_turn <= int(maxpage) and check_time<=3:
try:
wait.until(EC.presence_of_element_located((By.ID,"mangaFile")))
html_text = driver.page_source
items = re.findall(self.pattern_picture_download_url,html_text)
picture_url = re.sub(';','&',re.sub('&', '', items[0][-1]))
picture_url_list.append(picture_url)
page_next = wait.until(EC.element_to_be_clickable((By.ID,"next"))) # 点击下一页
driver.execute_script("arguments[0].click();", page_next)
time.sleep(random.uniform(1,3))
page_turn += 1
except TimeoutException as e:
driver.refresh()
check_time +=1
self.alarm_sound(e)
continue
if check_time ==3:
sys.exit()
return picture_url_list
# 警报音模块
def alarm_sound(self,e):
winsound.Beep(200, 3000)
print("元素不存在",e)
"""获取每话首页渲染后的html"""
def picture_url_list(self,q):
driver,wait = self.chrome_set()
try:
driver.get(self.url_charpter_first_page_first)
wait.until(EC.presence_of_element_located((By.ID,"tbBox")))
driver.execute_script('window.stop()')
end_flag = 1
check_time = 0
except TimeoutException as e:
self.alarm_sound(e)
else:
while end_flag!=0 and check_time<=3:
try:
url_now = driver.current_url
wait.until(EC.presence_of_element_located((By.ID,"mangaFile")))
html_text_maxpage = driver.page_source
maxpage = re.findall(self.pattern_maxpage,html_text_maxpage)[0][-1]
charpter = re.findall(self.pattern_maxpage,html_text_maxpage)[0][1]
referer = re.sub(r"#[p]{1}=\d+",'',driver.current_url)
if "卷" not in charpter:
print(f"{charpter}最大页数—{maxpage}")
picture_url_list = self.picture_url_crawler(maxpage,driver,wait)
time.sleep(2)
charpter_next = wait.until(EC.presence_of_element_located((By.CLASS_NAME,"nextC")))
driver.execute_script("arguments[0].click();", charpter_next) # 防止按键遮挡
try:
wait.until(EC.presence_of_element_located((By.CLASS_NAME,"tip-alert")))
end_flag = 0
print("全部爬取完毕,congratulations!")
except NoSuchElementException:
pass
while True:
if q.empty():
q.put((referer,charpter,picture_url_list,end_flag))
break
time.sleep(1)
except TimeoutException:
check_time += 1
driver.refresh()
continue
"""下载图片,并保存到文件夹中"""
def picture_download(self,q):
while True:
charpter_url_list_endflag = q.get(True)
picture_url_list = charpter_url_list_endflag[2]
charpter = charpter_url_list_endflag[1]
endflag = charpter_url_list_endflag[-1]
referer = charpter_url_list_endflag[0]
headers = {
"Referer":referer,
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Wi\
n64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.\
0.3729.108 Safari/537.36'}
page = 1
print(f"正在下载{charpter}")
for picture_url in picture_url_list:
reload_time = 0
while page <= len(picture_url_list) and reload_time <= 5:
try:
response = self.s.get(picture_url,headers=headers,timeout=5,verify=False)
os.makedirs(f"E:\黑色四叶操\{charpter}")
with open(f"E:\黑色四叶操\{charpter}\{page}.jpg","wb") as f:
writer = f.write(response.content)
break
except (requests.exceptions.ConnectionError,socket.timeout,urllib3.exceptions.ReadTimeoutError):
print("图片下载失败",e)
time.sleep(2)
reload_time += 1
continue
except FileExistsError:
with open(f"E:\黑色四叶操\{charpter}\{page}.jpg","wb") as f:
writer = f.write(response.content)
break
page += 1
if endflag ==0:
return
if __name__=="__main__":
q = Queue()
one_punch_man_cartoon_downloader = One_Punch_Man_Spider()
picture_url_writer = Process(target=one_punch_man_cartoon_downloader.picture_url_list,args=(q,))
picture_save = Process(target=one_punch_man_cartoon_downloader.picture_download,args=(q,))
picture_url_writer.start()
picture_save.start()
#等待proc_write1结束
picture_url_writer.join()
picture_save.join()
#picture_save进程是死循环,强制结束
# picture_save.terminate()
os.system(r'E:\KuGou\1.mp3')
| 40.384615 | 116 | 0.565306 |
acf59ce1b378e352e7f7218c4e77dbe9fa5f6af8 | 13,172 | py | Python | app/tests/conftest.py | BoostryJP/ibet-Issuer | efc599f8784be06588cf3ad8f239d36f24fdf3fa | [
"Apache-2.0"
] | 1 | 2021-06-16T03:38:07.000Z | 2021-06-16T03:38:07.000Z | app/tests/conftest.py | BoostryJP/ibet-Issuer | efc599f8784be06588cf3ad8f239d36f24fdf3fa | [
"Apache-2.0"
] | 17 | 2021-04-26T03:28:40.000Z | 2021-11-24T07:15:55.000Z | app/tests/conftest.py | BoostryJP/ibet-Issuer | efc599f8784be06588cf3ad8f239d36f24fdf3fa | [
"Apache-2.0"
] | 1 | 2021-05-30T14:09:11.000Z | 2021-05-30T14:09:11.000Z | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import json
import pytest
from cryptography.fernet import Fernet
from flask import url_for
from web3 import Web3
from web3.middleware import geth_poa_middleware
from app import create_app
from app import db as _db
from app.models import (
Role,
Issuer,
User
)
from app.tests.utils.account_config import eth_account
from app.utils import ContractUtils
from config import Config
web3 = Web3(Web3.HTTPProvider(Config.WEB3_HTTP_PROVIDER))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
# ---------------------------------------------------------------------------------------------
# DB系
# ---------------------------------------------------------------------------------------------
@pytest.fixture(scope='session')
def app(request):
print('')
print('<session-app start>')
app = create_app('testing')
ctx = app.app_context()
ctx.push()
def teardown():
ctx.pop()
print('<session-app end>')
request.addfinalizer(teardown)
return app
@pytest.fixture(scope='session')
def db(request, app):
print(' <session-db start> %s' % _db)
_db.app = app
_db.drop_all()
_db.create_all()
def teardown():
# _db.drop_all()
print(' <session-db end>')
request.addfinalizer(teardown)
return _db
@pytest.fixture(scope='session')
def init_login_user(db):
roles = ['admin', 'user']
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
db.session.add(role)
users = [
{
'login_id': 'admin',
'user_name': '管理者',
'role_id': 1,
'password': '1234',
'eth_account': eth_account['issuer']['account_address']
}, {
'login_id': 'user',
'user_name': 'ユーザ',
'role_id': 2,
'password': '1234',
'eth_account': eth_account['issuer']['account_address']
},
]
for u_dict in users:
user = User()
for key, value in u_dict.items():
setattr(user, key, value)
db.session.add(user)
db.session.commit()
@pytest.fixture(scope='session')
def issuer(db):
issuer = Issuer.query.filter(Issuer.eth_account == eth_account['issuer']['account_address']).first()
if issuer is not None:
return issuer
fernet = Fernet(Config.SECURE_PARAMETER_ENCRYPTION_KEY)
encrypted_account_password = fernet.encrypt(eth_account['issuer']['password'].encode()).decode()
with open('data/rsa/private.pem') as f:
encrypted_rsa_private_key = f.read()
issuer = Issuer(
eth_account=eth_account['issuer']['account_address'],
issuer_name='発行体1',
private_keystore='GETH',
network='IBET',
max_sell_price=100000000,
agent_address=eth_account['agent']['account_address'],
payment_gateway_contract_address='',
personal_info_contract_address='',
token_list_contract_address='',
ibet_share_exchange_contract_address='',
ibet_sb_exchange_contract_address='',
ibet_membership_exchange_contract_address='',
ibet_coupon_exchange_contract_address='',
encrypted_account_password=encrypted_account_password,
encrypted_rsa_private_key=encrypted_rsa_private_key,
)
db.session.add(issuer)
db.session.commit()
return issuer
@pytest.fixture(scope='session')
def issuer2(db):
issuer2 = Issuer.query.filter(Issuer.eth_account == eth_account['issuer2']['account_address']).first()
if issuer2 is not None:
return issuer2
fernet = Fernet(Config.SECURE_PARAMETER_ENCRYPTION_KEY)
encrypted_account_password = fernet.encrypt(eth_account['issuer2']['password'].encode()).decode()
with open('data/rsa/private.pem') as f:
encrypted_rsa_private_key = f.read()
deployer = Issuer(
eth_account=eth_account['issuer2']['account_address'],
issuer_name='issuer2',
private_keystore='GETH',
network='IBET',
max_sell_price=100000000,
agent_address=eth_account['agent']['account_address'],
payment_gateway_contract_address='',
personal_info_contract_address='',
token_list_contract_address='',
ibet_share_exchange_contract_address='',
ibet_sb_exchange_contract_address='',
ibet_membership_exchange_contract_address='',
ibet_coupon_exchange_contract_address='',
encrypted_account_password=encrypted_account_password,
encrypted_rsa_private_key=encrypted_rsa_private_key,
)
db.session.add(deployer)
# 発行体2のユーザ
user = User()
user.login_id = 'admin2',
user.user_name = '管理者2',
user.role_id = 1,
user.eth_account = eth_account['issuer2']['account_address']
user.password = '1234'
db.session.add(user)
db.session.commit()
return deployer
@pytest.fixture(scope="session", autouse=True)
def db_setup(request, db):
print(' <class start>')
init_login_user(db)
issuer(db)
issuer2(db)
def teardown():
db.session.remove()
print(' <class end>')
request.addfinalizer(teardown)
class TestBase(object):
data = None
@staticmethod
def add_data(param_db, table_name, data_rows):
for row_no, row_values in data_rows.items():
cls = globals()[table_name]
entity = cls()
for key, value in row_values.items():
setattr(entity, key, value)
param_db.session.add(entity)
param_db.session.commit()
@staticmethod
def client_with_user_login(param_app):
client = param_app.test_client()
client.post(url_for('auth.login'), data={'login_id': 'user', 'password': '1234'})
return client
@staticmethod
def client_with_admin_login(param_app, login_id='admin'):
client = param_app.test_client()
client.post(url_for('auth.login'), data={'login_id': login_id, 'password': '1234'})
return client
@staticmethod
def client_with_api_login(param_app, login_id="admin"):
client = param_app.test_client()
response = client.post('/api/auth', json={'login_id': login_id, 'password': '1234'})
return client, json.loads(response.data.decode('utf-8'))['access_token']
# ---------------------------------------------------------------------------------------------
# quorum系
# ---------------------------------------------------------------------------------------------
def payment_gateway_contract():
deployer = eth_account['deployer']
agent = eth_account['agent']
web3.eth.defaultAccount = deployer['account_address']
contract_address, abi, _ = \
ContractUtils.deploy_contract('PaymentGateway', [], deployer['account_address'])
print(contract_address)
contract = ContractUtils.get_contract('PaymentGateway', contract_address)
tx_hash = contract.functions.addAgent(agent['account_address']).transact(
{'from': deployer['account_address'], 'gas': Config.TX_GAS_LIMIT}
)
web3.eth.waitForTransactionReceipt(tx_hash)
return {'address': contract_address, 'abi': abi}
def personalinfo_contract():
deployer = eth_account['deployer']
web3.eth.defaultAccount = deployer['account_address']
contract_address, abi, _ = ContractUtils.deploy_contract(
'PersonalInfo', [], deployer['account_address'])
return {'address': contract_address, 'abi': abi}
def tokenlist_contract():
deployer = eth_account['deployer']
web3.eth.defaultAccount = deployer['account_address']
contract_address, abi, _ = ContractUtils.deploy_contract(
'TokenList', [], deployer['account_address'])
return {'address': contract_address, 'abi': abi}
# Straight Bond Exchange
def bond_exchange_contract(payment_gateway_address):
deployer = eth_account['deployer']
storage_address, _, _ = ContractUtils.deploy_contract(
contract_name='ExchangeStorage',
args=[],
deployer=deployer['account_address']
)
args = [
payment_gateway_address,
storage_address,
]
contract_address, abi, _ = ContractUtils.deploy_contract(
contract_name='IbetExchange',
args=args,
deployer=deployer['account_address']
)
storage = ContractUtils.get_contract('ExchangeStorage', storage_address)
storage.functions.upgradeVersion(contract_address).transact({
'from': deployer['account_address'],
'gas': Config.TX_GAS_LIMIT}
)
return {'address': contract_address, 'abi': abi}
# Coupon Exchange
def coupon_exchange_contract(payment_gateway_address):
deployer = eth_account['deployer']
web3.eth.defaultAccount = deployer['account_address']
storage_address, _, _ = ContractUtils.deploy_contract(
contract_name='ExchangeStorage',
args=[],
deployer=deployer['account_address'])
args = [
payment_gateway_address,
storage_address
]
contract_address, abi, _ = ContractUtils.deploy_contract(
contract_name='IbetExchange',
args=args,
deployer=deployer['account_address']
)
storage = ContractUtils.get_contract('ExchangeStorage', storage_address)
storage.functions.upgradeVersion(contract_address).transact(
{'from': deployer['account_address'], 'gas': Config.TX_GAS_LIMIT}
)
return {'address': contract_address, 'abi': abi}
# Membership Exchange
def membership_exchange_contract(payment_gateway_address):
deployer = eth_account['deployer']
web3.eth.defaultAccount = deployer['account_address']
storage_address, _, _ = ContractUtils.deploy_contract(
contract_name='ExchangeStorage',
args=[],
deployer=deployer['account_address']
)
args = [
payment_gateway_address,
storage_address
]
contract_address, abi, _ = ContractUtils.deploy_contract(
contract_name='IbetExchange',
args=args,
deployer=deployer['account_address']
)
storage = ContractUtils.get_contract('ExchangeStorage', storage_address)
storage.functions.upgradeVersion(contract_address).transact(
{'from': deployer['account_address'], 'gas': Config.TX_GAS_LIMIT}
)
return {'address': contract_address, 'abi': abi}
# Share Exchange
def share_exchange_contract(payment_gateway_address):
deployer = eth_account['deployer']
storage_address, _, _ = ContractUtils.deploy_contract(
contract_name='ExchangeStorage',
args=[],
deployer=deployer['account_address']
)
args = [
payment_gateway_address,
storage_address,
]
contract_address, abi, _ = ContractUtils.deploy_contract(
contract_name='IbetExchange',
args=args,
deployer=deployer['account_address']
)
storage = ContractUtils.get_contract('ExchangeStorage', storage_address)
storage.functions.upgradeVersion(contract_address).transact(
{'from': deployer['account_address'], 'gas': Config.TX_GAS_LIMIT}
)
return {'address': contract_address, 'abi': abi}
@pytest.fixture(scope='class', autouse=True)
def shared_contract(db, issuer: Issuer):
payment_gateway = payment_gateway_contract()
personal_info = personalinfo_contract()
token_list = tokenlist_contract()
bond_exchange = bond_exchange_contract(payment_gateway['address'])
membership_exchange = membership_exchange_contract(payment_gateway['address'])
coupon_exchange = coupon_exchange_contract(payment_gateway['address'])
share_exchange = share_exchange_contract(payment_gateway['address'])
contracts = {
'PaymentGateway': payment_gateway,
'PersonalInfo': personal_info,
'IbetStraightBondExchange': bond_exchange,
'TokenList': token_list,
'IbetCouponExchange': coupon_exchange,
'IbetMembershipExchange': membership_exchange,
'IbetShareExchange': share_exchange
}
issuer.payment_gateway_contract_address = payment_gateway['address']
issuer.personal_info_contract_address = personal_info['address']
issuer.token_list_contract_address = token_list['address']
issuer.ibet_sb_exchange_contract_address = bond_exchange['address']
issuer.ibet_coupon_exchange_contract_address = coupon_exchange['address']
issuer.ibet_membership_exchange_contract_address = membership_exchange['address']
issuer.ibet_share_exchange_contract_address = share_exchange['address']
db.session.commit()
return contracts
| 31.436754 | 106 | 0.664819 |
acf59d98391ac6555a55769394e430d1674066f7 | 248 | py | Python | day5/day5.py | jorn86/adventofcode2019 | fa80d094312ee048e6416a26292fff368ff83b71 | [
"MIT"
] | null | null | null | day5/day5.py | jorn86/adventofcode2019 | fa80d094312ee048e6416a26292fff368ff83b71 | [
"MIT"
] | null | null | null | day5/day5.py | jorn86/adventofcode2019 | fa80d094312ee048e6416a26292fff368ff83b71 | [
"MIT"
] | null | null | null | from IntCoder import IntCoder
def run(input_value):
memory = [int(i) for i in open('./input.txt', 'r').read().split(',')]
coder = IntCoder(memory, [input_value])
coder.run()
print(coder.output)
run(1) # part 1
run(5) # part 2
| 19.076923 | 73 | 0.616935 |
acf59dc490d7c9043501abf6154db11987ced5b0 | 26,216 | py | Python | complete_verifier/utils.py | robust-robots/alpha-beta-CROWN | 4acbc582f309d9b933598c49d93ec2aa8b2c2f88 | [
"BSD-3-Clause"
] | 70 | 2021-06-29T13:29:40.000Z | 2022-03-31T03:46:28.000Z | complete_verifier/utils.py | robust-robots/alpha-beta-CROWN | 4acbc582f309d9b933598c49d93ec2aa8b2c2f88 | [
"BSD-3-Clause"
] | 1 | 2021-11-23T09:28:09.000Z | 2021-12-29T21:05:34.000Z | complete_verifier/utils.py | robust-robots/alpha-beta-CROWN | 4acbc582f309d9b933598c49d93ec2aa8b2c2f88 | [
"BSD-3-Clause"
] | 13 | 2021-07-18T04:22:52.000Z | 2022-03-25T03:23:49.000Z | #########################################################################
## This file is part of the alpha-beta-CROWN verifier ##
## ##
## Copyright (C) 2021, Huan Zhang <huan@huan-zhang.com> ##
## Kaidi Xu <xu.kaid@northeastern.edu> ##
## Shiqi Wang <sw3215@columbia.edu> ##
## Zhouxing Shi <zshi@cs.ucla.edu> ##
## Yihan Wang <yihanwang@ucla.edu> ##
## ##
## This program is licenced under the BSD 3-Clause License, ##
## contained in the LICENCE file in this directory. ##
## ##
#########################################################################
from collections import OrderedDict
import os
import gzip
from functools import partial
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import onnx2pytorch
import onnx
import onnxruntime as ort
import arguments
# Import all model architectures.
from model_defs import *
def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
"""
Utility function for computing output of convolutions
takes a tuple of (h,w) and returns a tuple of (h,w)
"""
if type(h_w) is not tuple:
h_w = (h_w, h_w)
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
if type(stride) is not tuple:
stride = (stride, stride)
if type(pad) is not tuple:
pad = (pad, pad)
h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) // stride[0] + 1
w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) // stride[1] + 1
return h, w
def get_test_acc(model, input_shape, is_channel_last=False, batch_size=256):
database_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets')
mean = torch.tensor(arguments.Config["data"]["mean"])
std = torch.tensor(arguments.Config["data"]["std"])
device = arguments.Config["general"]["device"]
normalize = transforms.Normalize(mean=mean, std=std)
if input_shape == (3, 32, 32):
testset = torchvision.datasets.CIFAR10(root=database_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), normalize]))
elif input_shape == (1, 28, 28):
testset = torchvision.datasets.MNIST(root=database_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), normalize]))
else:
raise RuntimeError("Unable to determine dataset for test accuracy.")
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
total = 0
correct = 0
if device != 'cpu':
model = model.to(device)
print_first_batch = True
with torch.no_grad():
for data in testloader:
images, labels = data
if device != 'cpu':
images = images.to(device)
labels = labels.to(device)
if is_channel_last:
images = images.permute(0,2,3,1)
outputs = model(images)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if print_first_batch:
print_first_batch = False
for i in range(min(outputs.size(0), 10)):
print(f"Image {i} norm {images[i].abs().sum().item()} label {labels[i].item()} correct {labels[i].item() == outputs[i].argmax().item()}\nprediction {outputs[i].cpu().numpy()}")
print(f'correct {correct} of {total}')
def load_onnx(path):
if path.endswith('.gz'):
onnx_model = onnx.load(gzip.GzipFile(path))
else:
onnx_model = onnx.load(path)
return onnx_model
def inference_onnx(path, *inputs):
print(inputs)
sess = ort.InferenceSession(load_onnx(path).SerializeToString())
names = [i.name for i in sess.get_inputs()]
inp = dict(zip(names, inputs))
res = sess.run(None, inp)
return res
def load_model_onnx(path, input_shape, compute_test_acc=False, force_convert=False):
# pip install onnx2pytorch
onnx_model = load_onnx(path)
onnx_input_dims = onnx_model.graph.input[0].type.tensor_type.shape.dim
onnx_shape = tuple(d.dim_value for d in onnx_input_dims[1:])
input_shape = tuple(input_shape)
pytorch_model = onnx2pytorch.ConvertModel(onnx_model)
if force_convert:
new_modules = []
modules = list(pytorch_model.modules())[1:]
for mi, m in enumerate(modules):
if isinstance(m, torch.nn.Linear):
new_m = nn.Linear(in_features=m.in_features, out_features=m.out_features, bias=m.bias is not None)
new_m.weight.data.copy_(m.weight.data)
new_m.bias.data.copy_(m.bias)
new_modules.append(new_m)
elif isinstance(m, torch.nn.ReLU):
new_modules.append(torch.nn.ReLU())
elif isinstance(m, onnx2pytorch.operations.flatten.Flatten):
new_modules.append(torch.nn.Flatten())
else:
raise NotImplementedError
seq_model = nn.Sequential(*new_modules)
return seq_model
if len(input_shape) <= 2:
return pytorch_model
# Check model input shape.
is_channel_last = False
if onnx_shape != input_shape:
# Change channel location.
onnx_shape = onnx_shape[2:] + onnx_shape[:2]
if onnx_shape == input_shape:
is_channel_last = True
else:
print(f"Unexpected input shape in onnx: {onnx_shape}, given {input_shape}")
# Fixup converted ONNX model. For ResNet we directly return; for other models, we convert them to a Sequential model.
# We also need to handle NCHW and NHWC formats here.
conv_c, conv_h, conv_w = input_shape
modules = list(pytorch_model.modules())[1:]
new_modules = []
need_permute = False
for mi, m in enumerate(modules):
if isinstance(m, onnx2pytorch.operations.add.Add):
# ResNet model. No need to convert to sequential.
return pytorch_model, is_channel_last
if isinstance(m, torch.nn.Conv2d):
# Infer the output size of conv.
conv_h, conv_w = conv_output_shape((conv_h, conv_w), m.kernel_size, m.stride, m.padding)
conv_c = m.weight.size(0)
if isinstance(m, onnx2pytorch.operations.reshape.Reshape):
# Replace reshape with flatten.
new_modules.append(nn.Flatten())
# May need to permute the next linear layer if the model was in NHWC format.
need_permute = True and is_channel_last
elif isinstance(m, torch.nn.Linear) and need_permute:
# The original model is in NHWC format and we now have NCHW format, so the dense layer's weight must be adjusted.
new_m = nn.Linear(in_features=m.in_features, out_features=m.out_features, bias=m.bias is not None)
new_m.weight.data.copy_(m.weight.view(m.weight.size(0), conv_h, conv_w, conv_c).permute(0, 3, 1, 2).contiguous().view(m.weight.size(0), -1))
new_m.bias.data.copy_(m.bias)
need_permute = False
new_modules.append(new_m)
elif isinstance(m, torch.nn.ReLU) and mi == (len(modules)-1):
# not add relu if last layer is relu
pass
else:
new_modules.append(m)
seq_model = nn.Sequential(*new_modules)
if compute_test_acc:
get_test_acc(seq_model, input_shape)
return seq_model, is_channel_last
def load_model(weights_loaded=True):
"""
Load the model architectures and weights
"""
# You can customize this function to load your own model based on model name.
model_ori = eval(arguments.Config['model']['name'])()
model_ori.eval()
print(model_ori)
if not weights_loaded:
return model_ori
sd = torch.load(arguments.Config["model"]["path"], map_location=torch.device('cpu'))
if 'state_dict' in sd:
sd = sd['state_dict']
if isinstance(sd, list):
sd = sd[0]
if not isinstance(sd, dict):
raise NotImplementedError("Unknown model format, please modify model loader yourself.")
model_ori.load_state_dict(sd)
return model_ori
########################################
# Preprocess and load the datasets
########################################
def preprocess_cifar(image, inception_preprocess=False, perturbation=False):
"""
Proprocess images and perturbations.Preprocessing used by the SDP paper.
"""
MEANS = np.array([125.3, 123.0, 113.9], dtype=np.float32)/255
STD = np.array([63.0, 62.1, 66.7], dtype=np.float32)/255
upper_limit, lower_limit = 1., 0.
if inception_preprocess:
# Use 2x - 1 to get [-1, 1]-scaled images
rescaled_devs = 0.5
rescaled_means = 0.5
else:
rescaled_means = MEANS
rescaled_devs = STD
if perturbation:
return image / rescaled_devs
else:
return (image - rescaled_means) / rescaled_devs
def load_cifar_sample_data(normalized=True, MODEL="a_mix"):
"""
Load sampled cifar data: 100 images that are classified correctly by each MODEL
"""
database_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets/sample100_unnormalized')
X = np.load(os.path.join(database_path, MODEL, "X.npy"))
if normalized:
X = preprocess_cifar(X)
X = np.transpose(X, (0, 3, 1, 2))
y = np.load(os.path.join(database_path, MODEL, "y.npy"))
runnerup = np.load(os.path.join(database_path, MODEL, "runnerup.npy"))
X = torch.from_numpy(X.astype(np.float32))
y = torch.from_numpy(y.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
print("############################")
if normalized:
print("Sampled data loaded. Data already preprocessed!")
else:
print("Sampled data loaded. Data not preprocessed yet!")
print("Shape:", X.shape, y.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
print("############################")
return X, y, runnerup
def load_mnist_sample_data(MODEL="mnist_a_adv"):
"""
Load sampled mnist data: 100 images that are classified correctly by each MODEL
"""
database_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets/sample100_unnormalized')
X = np.load(os.path.join(database_path, MODEL, "X.npy"))
X = np.transpose(X, (0, 3, 1, 2))
y = np.load(os.path.join(database_path, MODEL, "y.npy"))
runnerup = np.load(os.path.join(database_path, MODEL, "runnerup.npy"))
X = torch.from_numpy(X.astype(np.float32))
y = torch.from_numpy(y.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
print("############################")
print("Shape:", X.shape, y.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
print("############################")
return X, y, runnerup
def load_dataset():
"""
Load regular data; Robustness region defined in results pickle
"""
database_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets')
normalize = transforms.Normalize(mean=arguments.Config["data"]["mean"], std=arguments.Config["data"]["std"])
if arguments.Config["data"]["dataset"] == 'MNIST':
loader = datasets.MNIST
elif arguments.Config["data"]["dataset"] == 'CIFAR':
loader = datasets.CIFAR10
else:
raise ValueError("Dataset {} not supported.".format(arguments.Config["data"]["dataset"]))
test_data = loader(database_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), normalize]))
test_data.mean = torch.tensor(arguments.Config["data"]["mean"])
test_data.std = torch.tensor(arguments.Config["data"]["std"])
# set data_max and data_min to be None if no clip
data_max = torch.reshape((1. - test_data.mean) / test_data.std, (1, -1, 1, 1))
data_min = torch.reshape((0. - test_data.mean) / test_data.std, (1, -1, 1, 1))
return test_data, data_max, data_min
def load_sampled_dataset():
"""
Load sampled data and define the robustness region
"""
if arguments.Config["data"]["dataset"] == "CIFAR_SAMPLE":
X, labels, runnerup = load_cifar_sample_data(normalized=True, MODEL=arguments.Config['model']['name'])
data_max = torch.tensor(preprocess_cifar(1.)).reshape(1,-1,1,1)
data_min = torch.tensor(preprocess_cifar(0.)).reshape(1,-1,1,1)
eps_temp = 2./255.
eps_temp = torch.tensor(preprocess_cifar(eps_temp, perturbation=True)).reshape(1,-1,1,1)
elif arguments.Config["data"]["dataset"] == "MNIST_SAMPLE":
X, labels, runnerup = load_mnist_sample_data(MODEL=arguments.Config['model']['name'])
data_max = torch.tensor(1.).reshape(1,-1,1,1)
data_min = torch.tensor(0.).reshape(1,-1,1,1)
eps_temp = 0.3
eps_temp = torch.tensor(eps_temp).reshape(1,-1,1,1)
return X, labels, runnerup, data_max, data_min, eps_temp
def load_sdp_dataset(eps_temp=None):
database_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets/sdp')
if arguments.Config["data"]["dataset"] == "CIFAR_SDP":
X = np.load(os.path.join(database_path, "cifar/X_sdp.npy"))
X = preprocess_cifar(X)
X = np.transpose(X, (0,3,1,2))
y = np.load(os.path.join(database_path, "cifar/y_sdp.npy"))
runnerup = np.copy(y)
X = torch.from_numpy(X.astype(np.float32))
y = torch.from_numpy(y.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
if eps_temp is None: eps_temp = 2./255.
eps_temp = torch.tensor(preprocess_cifar(eps_temp, perturbation=True)).reshape(1,-1,1,1)
data_max = torch.tensor(preprocess_cifar(1.)).reshape(1,-1,1,1)
data_min = torch.tensor(preprocess_cifar(0.)).reshape(1,-1,1,1)
print("############################")
print("Sampled data loaded. Data already preprocessed!")
print("Shape:", X.shape, y.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
print("############################")
elif arguments.Config["data"]["dataset"] == "MNIST_SDP":
X = np.load(os.path.join(database_path, "mnist/X_sdp.npy"))
X = np.transpose(X, (0,3,1,2))
y = np.load(os.path.join(database_path, "mnist/y_sdp.npy"))
runnerup = np.copy(y)
X = torch.from_numpy(X.astype(np.float32))
y = torch.from_numpy(y.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
if eps_temp is None: eps_temp = torch.tensor(0.3)
data_max = torch.tensor(1.).reshape(1,-1,1,1)
data_min = torch.tensor(0.).reshape(1,-1,1,1)
print("############################")
print("Shape:", X.shape, y.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
print("############################")
else:
exit("sdp dataset not supported!")
return X, y, runnerup, data_max, data_min, eps_temp
def load_generic_dataset(eps_temp=None):
"""Load MNIST/CIFAR test set with normalization."""
print("Trying generic MNIST/CIFAR data loader.")
test_data, data_max, data_min = load_dataset()
if eps_temp is None:
raise ValueError('You must specify an epsilon')
testloader = torch.utils.data.DataLoader(test_data, batch_size=10000, shuffle=False, num_workers=4)
X, labels = next(iter(testloader))
runnerup = None
# Rescale epsilon.
eps_temp = torch.reshape(eps_temp / torch.tensor(arguments.Config["data"]["std"], dtype=torch.get_default_dtype()), (1, -1, 1, 1))
return X, labels, runnerup, data_max, data_min, eps_temp
def load_eran_dataset(eps_temp=None):
"""
Load sampled data and define the robustness region
"""
database_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets/eran')
if arguments.Config["data"]["dataset"] == "CIFAR_ERAN":
X = np.load(os.path.join(database_path, "cifar_eran/X_eran.npy"))
mean = np.array([0.4914, 0.4822, 0.4465]).reshape(1, -1, 1, 1).astype(np.float32)
std = np.array([0.2023, 0.1994, 0.201]).reshape(1, -1, 1, 1).astype(np.float32)
X = (X - mean) / std
labels = np.load(os.path.join(database_path, "cifar_eran/y_eran.npy"))
runnerup = np.copy(labels)
X = torch.from_numpy(X.astype(np.float32))
labels = torch.from_numpy(labels.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
if eps_temp is None: eps_temp = 2. / 255.
eps_temp = torch.tensor(eps_temp / std).reshape(1, -1, 1, 1)
data_max = torch.tensor((1. - mean) / std).reshape(1, -1, 1, 1)
data_min = torch.tensor((0. - mean) / std).reshape(1, -1, 1, 1)
print("############################")
print("Sampled data loaded. Data already preprocessed!")
print("Shape:", X.shape, labels.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
# print("epsilon:", eps_temp)
# print("max, min:", data_max, data_min)
print("Note runnerup label is empty here!")
print("############################")
elif arguments.Config["data"]["dataset"] == "MNIST_ERAN":
X = np.load(os.path.join(database_path, "mnist_eran/X_eran.npy"))
mean = 0.1307
std = 0.3081
X = (X - mean) / std
labels = np.load(os.path.join(database_path, "mnist_eran/y_eran.npy"))
runnerup = np.copy(labels)
X = torch.from_numpy(X.astype(np.float32))
labels = torch.from_numpy(labels.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
if eps_temp is None: eps_temp = 0.3
eps_temp = torch.tensor(eps_temp / std).reshape(1, -1, 1, 1)
data_max = torch.tensor((1. - mean) / std).reshape(1, -1, 1, 1)
data_min = torch.tensor((0. - mean) / std).reshape(1, -1, 1, 1)
print("############################")
print("Sampled data loaded. Data already preprocessed!")
print("Shape:", X.shape, labels.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
# print("epsilon:", eps_temp)
# print("max, min:", data_max, data_min)
print("Note runnerup label is empty here!")
print("############################")
elif arguments.Config["data"]["dataset"] == "MNIST_ERAN_UN":
X = np.load(os.path.join(database_path, "mnist_eran/X_eran.npy"))
labels = np.load(os.path.join(database_path, "mnist_eran/y_eran.npy"))
runnerup = np.copy(labels)
X = torch.from_numpy(X.astype(np.float32))
labels = torch.from_numpy(labels.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
if eps_temp is None: eps_temp = 0.3
eps_temp = torch.tensor(eps_temp).reshape(1, -1, 1, 1)
data_max = torch.tensor(1.).reshape(1, -1, 1, 1)
data_min = torch.tensor(0.).reshape(1, -1, 1, 1)
print("############################")
print("Sampled data loaded. No normalization used!")
print("Shape:", X.shape, labels.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
# print("epsilon:", eps_temp)
# print("max, min:", data_max, data_min)
print("Note runnerup label is empty here!")
print("############################")
elif arguments.Config["data"]["dataset"] == "MNIST_MADRY_UN":
X = np.load(os.path.join(database_path, "mnist_madry/X.npy")).reshape(-1, 1, 28, 28)
labels = np.load(os.path.join(database_path, "mnist_madry/y.npy"))
runnerup = np.copy(labels)
X = torch.from_numpy(X.astype(np.float32))
labels = torch.from_numpy(labels.astype(int))
runnerup = torch.from_numpy(runnerup.astype(int))
if eps_temp is None: eps_temp = 0.3
eps_temp = torch.tensor(eps_temp).reshape(1, -1, 1, 1)
data_max = torch.tensor(1.).reshape(1, -1, 1, 1)
data_min = torch.tensor(0.).reshape(1, -1, 1, 1)
print("############################")
print("Sampled data loaded. No normalization used!")
print("Shape:", X.shape, labels.shape, runnerup.shape)
print("X range:", X.max(), X.min(), X.mean())
# print("epsilon:", eps_temp)
# print("max, min:", data_max, data_min)
print("Note runnerup label is empty here!")
print("############################")
else:
raise(f'Unsupported dataset {arguments.Config["data"]["dataset"]}')
return X, labels, runnerup, data_max, data_min, eps_temp
def load_verification_dataset(eps_before_normalization):
target_label = None
# Add your customized dataset here.
if arguments.Config["data"]["pkl_path"] is not None:
# for oval20 base, wide, deep or other datasets saved in .pkl file, we load the pkl file here.
assert arguments.Config["specification"]["epsilon"] is None, 'will use epsilon saved in .pkl file'
gt_results = pd.read_pickle(arguments.Config["data"]["pkl_path"])
test_data, data_max, data_min = load_dataset()
X, labels = zip(*test_data)
X = torch.stack(X, dim=0)
labels = torch.tensor(labels)
runnerup = None
idx = gt_results["Idx"].to_list()
X, labels = X[idx], labels[idx]
target_label = gt_results['prop'].to_list()
eps_new = gt_results['Eps'].to_list()
print('Overwrite epsilon that saved in .pkl file, they should be after normalized!')
eps_new = [torch.reshape(torch.tensor(i, dtype=torch.get_default_dtype()), (1, -1, 1, 1)) for i in eps_new]
# Some special model loaders.
elif "ERAN" in arguments.Config["data"]["dataset"] or "MADRY" in arguments.Config["data"]["dataset"]:
X, labels, runnerup, data_max, data_min, eps_new = load_eran_dataset(eps_temp=eps_before_normalization)
elif "SDP" in arguments.Config["data"]["dataset"]:
X, labels, runnerup, data_max, data_min, eps_new = load_sdp_dataset(eps_temp=eps_before_normalization)
elif "SAMPLE" in arguments.Config["data"]["dataset"]:
# Sampled datapoints (a small subset of MNIST/CIFAR), only for reproducing some paper results.
X, labels, runnerup, data_max, data_min, eps_new = load_sampled_dataset()
elif "CIFAR" in arguments.Config["data"]["dataset"] or "MNIST" in arguments.Config["data"]["dataset"]:
# general MNIST and CIFAR dataset with mean/std defined in config file.
X, labels, runnerup, data_max, data_min, eps_new = load_generic_dataset(eps_temp=eps_before_normalization)
else:
exit("Dataset not supported in this file! Please customize load_verification_dataset() function in utils.py.")
if arguments.Config["specification"]["norm"] != np.inf:
assert arguments.Config["data"]["std"].count(arguments.Config["data"]["std"][0]) == len(
arguments.Config["data"]["std"]), print('For non-Linf norm, we only support 1d eps.')
arguments.Config["data"]["std"] = arguments.Config["data"]["std"][0]
eps_new = eps_new[0, 0, 0, 0] # only support eps as a scalar for non-Linf norm
return X, labels, runnerup, data_max, data_min, eps_new, target_label
def convert_test_model(model_ori):
# NOTE: It looks like `in_features` and `out_features` are in the wrong order
# after converting the onnx model to pytorch model.
# Swap them below.
modules = []
for m in model_ori._modules.values():
if isinstance(m, nn.Linear):
layer = nn.Linear(m.in_features, m.out_features) # Fix a bug in onnx converter for test models.
layer.weight.data = m.weight.data.to(torch.float)
layer.bias.data = m.bias.data.to(torch.float) if m.bias is not None else torch.zeros_like(layer.bias.data)
modules.append(layer)
# pdb.set_trace()
else:
modules.append(m)
model_ori = nn.Sequential(*modules)
return model_ori
def convert_nn4sys_model(model_ori):
model_ori = nn.Sequential(*list(model_ori._modules.values()))
# Split the model into v1 and v2 models to resolve numerical issues
modules_v1 = []
modules_v2 = []
stage = 1
for m in model_ori._modules.values():
if isinstance(m, nn.Linear):
if m.weight.abs().max() > 1e9:
stage = 2 if len(modules_v2) == 0 else 3
continue
else:
continue
if stage == 1:
modules_v1 += [m, nn.ReLU(inplace=True)]
elif stage == 2:
dim = modules_v1[-2].out_features - 1
lin = nn.Linear(m.in_features - dim, m.out_features - dim)
lin.weight.data = m.weight[:lin.out_features, :lin.in_features]
lin.weight = lin.weight.to(dtype=torch.float64)
lin.bias.data = m.bias[:lin.out_features]
lin.bias = lin.bias.to(dtype=torch.float64)
modules_v2 += [lin, nn.ReLU(inplace=True)]
x = torch.tensor([[119740.8]], dtype=torch.float64)
modules_v1 = modules_v1[:-1]
model_v1 = nn.Sequential(*modules_v1)
y = model_v1(x)
dim = y.size(-1) - 1
modules_v2 = modules_v2[:-1]
linear_ident = nn.Linear(1, dim, bias=False)
linear_ident.weight.data = torch.ones_like(linear_ident.weight, dtype=torch.float64)
modules_v2.insert(0, linear_ident)
model_v2 = nn.Sequential(*modules_v2)
y[:, :-2] *= (y[:, 1:-1] <= 0).int()
select = (y[:, :-1] > 0).int()
y2 = model_v2(x)
y2 = y2[:] * select
res = y2.sum(dim=-1, keepdim=True)
res_ref = model_ori(x)
print(res.item(), res_ref.item())
# import pdb; pdb.set_trace()
model_ori = (model_v1, model_v2, model_ori)
return model_ori
| 44.358714 | 196 | 0.610162 |
acf59f54cdb00d4388d94975a523179471f054ce | 4,378 | py | Python | contrib/seeds/generate-seeds.py | uscoin-project/uscoin | 6a459cc884133ba881c7ededce95320466bd4c07 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | uscoin-project/uscoin | 6a459cc884133ba881c7ededce95320466bd4c07 | [
"MIT"
] | 1 | 2018-07-18T18:25:36.000Z | 2018-07-19T14:19:43.000Z | contrib/seeds/generate-seeds.py | uscoin-project/uscoin | 6a459cc884133ba881c7ededce95320466bd4c07 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9317)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19317)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.496403 | 98 | 0.582458 |
acf5a008dd69669e3bd807267b9253900cbe18a5 | 2,763 | py | Python | generate_custom_slots.py | Isthar84/kokorogerproves | e6f388c4dbdb7a2a6f0b7f338f9a2b61606b568c | [
"MIT"
] | 10 | 2018-05-24T11:11:26.000Z | 2021-09-08T11:50:08.000Z | generate_custom_slots.py | Isthar84/kokorogerproves | e6f388c4dbdb7a2a6f0b7f338f9a2b61606b568c | [
"MIT"
] | 3 | 2019-05-29T06:06:21.000Z | 2021-05-26T14:26:44.000Z | generate_custom_slots.py | Isthar84/kokorogerproves | e6f388c4dbdb7a2a6f0b7f338f9a2b61606b568c | [
"MIT"
] | 7 | 2018-05-27T08:18:14.000Z | 2020-09-01T18:28:19.000Z | import re
import string
import random
import os
from kodi_voice import KodiConfigParser, Kodi
config_file = os.path.join(os.path.dirname(__file__), "kodi.config")
config = KodiConfigParser(config_file)
kodi = Kodi(config)
def most_words(l=[]):
longest = 0
for s in l:
if len(s.split()) > longest:
longest = len(s.split())
return longest
def sort_by_words(l, longest):
distributed = []
for i in range(1, longest + 1):
dl = [s for s in l if len(s.split()) == i]
if dl:
distributed.append(dl)
return distributed
def clean_results(resp, cat, key, limit=None):
if not limit:
try:
limit = kodi.config.get('alexa', 'slot_items_max')
if limit and limit != 'None':
limit = int(limit)
else:
limit = None
except:
limit = None
if not limit:
limit = 100
cleaned = []
if 'result' in resp and cat in resp['result']:
for v in retrieved['result'][cat]:
name = kodi.sanitize_name(v[key], normalize=False)
# omit titles with digits, as Amazon never passes numbers as digits
if not re.search(r'\d', name):
cleaned.append(name)
cleaned = {v.lower(): v for v in cleaned}.values()
cleaned = filter(None, cleaned)
random.shuffle(cleaned)
# distribute strings evenly by number of words
if len(cleaned) > limit:
longest = most_words(cleaned)
distributed = sort_by_words(cleaned, longest)
if distributed:
total = 0
cleaned = []
while total < limit:
for l in distributed:
if l:
total += 1
cleaned.append(l.pop())
# sort by number of words just for visibility
if cleaned:
longest = most_words(cleaned)
distributed = sort_by_words(cleaned, longest)
if distributed:
cleaned = []
for dl in distributed:
cleaned += [l for l in dl]
return cleaned[:limit]
def write_file(filename, items=[]):
print 'Writing: %s' % (filename)
f = open(filename, 'w')
for a in items:
f.write("%s\n" % a.encode("utf-8"))
f.close()
# Generate MUSICPLAYLISTS Slot
retrieved = kodi.GetMusicPlaylists()
cl = clean_results(retrieved, 'files', 'label')
write_file('MUSICPLAYLISTS', cl)
# Generate MUSICGENRES Slot
retrieved = kodi.GetMusicGenres()
cl = clean_results(retrieved, 'genres', 'label')
write_file('MUSICGENRES', cl)
# Generate MUSICARTISTS Slot
retrieved = kodi.GetMusicArtists()
cl = clean_results(retrieved, 'artists', 'artist')
write_file('MUSICARTISTS', cl)
# Generate MUSICALBUMS Slot
retrieved = kodi.GetAlbums()
cl = clean_results(retrieved, 'albums', 'label')
write_file('MUSICALBUMS', cl)
# Generate MUSICSONGS Slot
retrieved = kodi.GetSongs()
cl = clean_results(retrieved, 'songs', 'label')
write_file('MUSICSONGS', cl)
| 23.818966 | 73 | 0.658704 |
acf5a0b727a207b0fdd40a1523d3c887c2a0543d | 3,609 | py | Python | pyathena/pandas_cursor.py | alexells/PyAthena | e365b3f4568ebc755b3d6d631dc5da43bca867b1 | [
"MIT"
] | null | null | null | pyathena/pandas_cursor.py | alexells/PyAthena | e365b3f4568ebc755b3d6d631dc5da43bca867b1 | [
"MIT"
] | null | null | null | pyathena/pandas_cursor.py | alexells/PyAthena | e365b3f4568ebc755b3d6d631dc5da43bca867b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from pyathena.common import CursorIterator
from pyathena.cursor import BaseCursor
from pyathena.error import OperationalError, ProgrammingError
from pyathena.model import AthenaQueryExecution
from pyathena.result_set import AthenaPandasResultSet, WithResultSet
from pyathena.util import synchronized
_logger = logging.getLogger(__name__)
class PandasCursor(BaseCursor, CursorIterator, WithResultSet):
def __init__(
self,
connection,
s3_staging_dir,
schema_name,
work_group,
poll_interval,
encryption_option,
kms_key,
converter,
formatter,
retry_config,
kill_on_interrupt=True,
**kwargs
):
super(PandasCursor, self).__init__(
connection=connection,
s3_staging_dir=s3_staging_dir,
schema_name=schema_name,
work_group=work_group,
poll_interval=poll_interval,
encryption_option=encryption_option,
kms_key=kms_key,
converter=converter,
formatter=formatter,
retry_config=retry_config,
kill_on_interrupt=kill_on_interrupt,
**kwargs
)
@property
def rownumber(self):
return self._result_set.rownumber if self._result_set else None
def close(self):
if self._result_set and not self._result_set.is_closed:
self._result_set.close()
@synchronized
def execute(
self,
operation,
parameters=None,
work_group=None,
s3_staging_dir=None,
cache_size=0,
):
self._reset_state()
self._query_id = self._execute(
operation,
parameters=parameters,
work_group=work_group,
s3_staging_dir=s3_staging_dir,
cache_size=cache_size,
)
query_execution = self._poll(self._query_id)
if query_execution.state == AthenaQueryExecution.STATE_SUCCEEDED:
self._result_set = AthenaPandasResultSet(
self._connection,
self._converter,
query_execution,
self.arraysize,
self._retry_config,
)
else:
raise OperationalError(query_execution.state_change_reason)
return self
def executemany(self, operation, seq_of_parameters):
for parameters in seq_of_parameters:
self.execute(operation, parameters)
# Operations that have result sets are not allowed with executemany.
self._reset_state()
@synchronized
def cancel(self):
if not self._query_id:
raise ProgrammingError("QueryExecutionId is none or empty.")
self._cancel(self._query_id)
@synchronized
def fetchone(self):
if not self.has_result_set:
raise ProgrammingError("No result set.")
return self._result_set.fetchone()
@synchronized
def fetchmany(self, size=None):
if not self.has_result_set:
raise ProgrammingError("No result set.")
return self._result_set.fetchmany(size)
@synchronized
def fetchall(self):
if not self.has_result_set:
raise ProgrammingError("No result set.")
return self._result_set.fetchall()
@synchronized
def as_pandas(self):
if not self.has_result_set:
raise ProgrammingError("No result set.")
return self._result_set.as_pandas()
| 29.826446 | 76 | 0.638127 |
acf5a25e93b694d6a94a301ce11b478f4d2157ea | 50 | py | Python | src/pyvboxmanage/exceptions/PyVBoxManageException.py | ndejong/pyvboxmanage | 6cb49546782ae97f177e7035982b1dc86b8f61db | [
"BSD-2-Clause"
] | 1 | 2020-12-28T02:19:35.000Z | 2020-12-28T02:19:35.000Z | src/pyvboxmanage/exceptions/PyVBoxManageException.py | ndejong/pyvboxmanage | 6cb49546782ae97f177e7035982b1dc86b8f61db | [
"BSD-2-Clause"
] | null | null | null | src/pyvboxmanage/exceptions/PyVBoxManageException.py | ndejong/pyvboxmanage | 6cb49546782ae97f177e7035982b1dc86b8f61db | [
"BSD-2-Clause"
] | null | null | null |
class PyVBoxManageException(Exception):
pass
| 12.5 | 39 | 0.78 |
acf5a33deefef5a2a6a7c2242ae31605068d0235 | 12,366 | py | Python | extensions/GraphvizClBridge.py | dszmaj/wikidpad | 1127375665935524ddb623da8dd5137038c7e53e | [
"Apache-2.0",
"MIT"
] | 16 | 2015-02-05T17:32:04.000Z | 2022-01-14T13:46:36.000Z | extensions/GraphvizClBridge.py | dszmaj/wikidpad | 1127375665935524ddb623da8dd5137038c7e53e | [
"Apache-2.0",
"MIT"
] | 8 | 2015-06-20T20:02:41.000Z | 2016-02-23T14:52:32.000Z | extensions/GraphvizClBridge.py | dszmaj/wikidpad | 1127375665935524ddb623da8dd5137038c7e53e | [
"Apache-2.0",
"MIT"
] | 11 | 2015-05-19T09:17:16.000Z | 2017-09-14T00:43:13.000Z | import os, os.path, traceback
import subprocess
import wx
from pwiki.TempFileSet import createTempFile
from pwiki.StringOps import mbcsEnc, mbcsDec, utf8Enc, lineendToOs
WIKIDPAD_PLUGIN = (("InsertionByKey", 1), ("Options", 1))
def describeInsertionKeys(ver, app):
"""
API function for "InsertionByKey" plugins
Returns a sequence of tuples describing the supported
insertion keys. Each tuple has the form (insKey, exportTypes, handlerFactory)
where insKey is the insertion key handled, exportTypes is a sequence of
strings describing the supported export types and handlerFactory is
a factory function (normally a class) taking the wxApp object as
parameter and returning a handler object fulfilling the protocol
for "insertion by key" (see EqnHandler as example).
ver -- API version (can only be 1 currently)
app -- wxApp object
"""
return (
(u"dot", ("html_single", "html_previewWX", "html_preview", "html_multi"), DotHandler),
(u"neato", ("html_single", "html_previewWX", "html_preview", "html_multi"), NeatoHandler),
(u"twopi", ("html_single", "html_previewWX", "html_preview", "html_multi"), TwopiHandler),
(u"circo", ("html_single", "html_previewWX", "html_preview", "html_multi"), CircoHandler),
(u"fdp", ("html_single", "html_previewWX", "html_preview", "html_multi"), FdpHandler)
)
class GraphVizBaseHandler:
"""
Base class fulfilling the "insertion by key" protocol.
"""
# Filled in by derived classes
EXAPPNAME = ""
EXECONFIGKEY = ""
def __init__(self, app):
self.app = app
self.extAppExe = None
def taskStart(self, exporter, exportType):
"""
This is called before any call to createContent() during an
export task.
An export task can be a single HTML page for
preview or a single page or a set of pages for export.
exporter -- Exporter object calling the handler
exportType -- string describing the export type
Calls to createContent() will only happen after a
call to taskStart() and before the call to taskEnd()
"""
# Find MimeTeX executable by configuration setting
dirPath = self.app.getGlobalConfig().get("main",
"plugin_graphViz_dirExe", "")
if not dirPath:
self.extAppExe = ""
return
exeName = self.app.getGlobalConfig().get("main", self.EXECONFIGKEY, "")
self.extAppExe = os.path.join(self.app.getWikiAppDir(), dirPath, exeName)
def taskEnd(self):
"""
Called after export task ended and after the last call to
createContent().
"""
pass
def createContent(self, exporter, exportType, insToken):
"""
Handle an insertion and create the appropriate content.
exporter -- Exporter object calling the handler
exportType -- string describing the export type
insToken -- insertion token to create content for
An insertion token has the following member variables:
key: insertion key (unistring)
value: value of an insertion (unistring)
appendices: sequence of strings with the appendices
Meaning and type of return value is solely defined by the type
of the calling exporter.
For HtmlExporter a unistring is returned with the HTML code
to insert instead of the insertion.
"""
# Retrieve quoted content of the insertion
bstr = lineendToOs(utf8Enc(insToken.value, "replace")[0]) # mbcsEnc
if not bstr:
# Nothing in, nothing out
return u""
if self.extAppExe == "":
# No path to MimeTeX executable -> show message
return u'<pre>' + _(u'[Please set path to GraphViz executables]') + \
'</pre>'
# Get exporters temporary file set (manages creation and deletion of
# temporary files)
tfs = exporter.getTempFileSet()
pythonUrl = (exportType != "html_previewWX")
dstFullPath = tfs.createTempFile("", ".png", relativeTo="")
url = tfs.getRelativeUrl(None, dstFullPath, pythonUrl=pythonUrl)
# Store token content in a temporary file
srcfilepath = createTempFile(bstr, ".dot")
try:
cmdline = subprocess.list2cmdline((self.extAppExe, "-Tpng", "-o" + dstFullPath,
srcfilepath))
# Run external application
# childIn, childOut, childErr = os.popen3(cmdline, "b")
popenObject = subprocess.Popen(cmdline, shell=True,
stderr=subprocess.PIPE, stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
childErr = popenObject.stderr
# See http://bytes.com/topic/python/answers/634409-subprocess-handle-invalid-error
# why this is necessary
popenObject.stdin.close()
popenObject.stdout.close()
if u"noerror" in [a.strip() for a in insToken.appendices]:
childErr.read()
errResponse = ""
else:
errResponse = childErr.read()
childErr.close()
finally:
os.unlink(srcfilepath)
if errResponse != "":
appname = mbcsDec(self.EXAPPNAME, "replace")[0]
errResponse = mbcsDec(errResponse, "replace")[0]
return u'<pre>' + _(u'[%s Error: %s]') % (appname, errResponse) +\
u'</pre>'
# Return appropriate HTML code for the image
if exportType == "html_previewWX":
# Workaround for internal HTML renderer
return (u'<img src="%s" border="0" align="bottom" alt="formula" />'
u' ') % url
else:
return u'<img src="%s" border="0" align="bottom" alt="formula" />' \
% url
def getExtraFeatures(self):
"""
Returns a list of bytestrings describing additional features supported
by the plugin. Currently not specified further.
"""
return ()
class DotHandler(GraphVizBaseHandler):
EXAPPNAME = "Dot"
EXECONFIGKEY = "plugin_graphViz_exeDot"
class NeatoHandler(GraphVizBaseHandler):
EXAPPNAME = "Neato"
EXECONFIGKEY = "plugin_graphViz_exeNeato"
class TwopiHandler(GraphVizBaseHandler):
EXAPPNAME = "Twopi"
EXECONFIGKEY = "plugin_graphViz_exeTwopi"
class CircoHandler(GraphVizBaseHandler):
EXAPPNAME = "Circo"
EXECONFIGKEY = "plugin_graphViz_exeCirco"
class FdpHandler(GraphVizBaseHandler):
EXAPPNAME = "Fdp"
EXECONFIGKEY = "plugin_graphViz_exeFdp"
def registerOptions(ver, app):
"""
API function for "Options" plugins
Register configuration options and their GUI presentation
ver -- API version (can only be 1 currently)
app -- wxApp object
"""
# Register options
app.getDefaultGlobalConfigDict()[("main", "plugin_graphViz_dirExe")] = u""
app.getDefaultGlobalConfigDict()[("main", "plugin_graphViz_exeDot")] = u"dot.exe"
app.getDefaultGlobalConfigDict()[("main", "plugin_graphViz_exeNeato")] = u"neato.exe"
app.getDefaultGlobalConfigDict()[("main", "plugin_graphViz_exeTwopi")] = u"twopi.exe"
app.getDefaultGlobalConfigDict()[("main", "plugin_graphViz_exeCirco")] = u"circo.exe"
app.getDefaultGlobalConfigDict()[("main", "plugin_graphViz_exeFdp")] = u"fdp.exe"
# Register panel in options dialog
app.addGlobalPluginOptionsDlgPanel(GraphVizOptionsPanel, u"GraphViz")
class GraphVizOptionsPanel(wx.Panel):
def __init__(self, parent, optionsDlg, mainControl):
"""
Called when "Options" dialog is opened to show the panel.
Transfer here all options from the configuration file into the
text fields, check boxes, ...
"""
wx.Panel.__init__(self, parent)
self.app = wx.GetApp()
pt = self.app.getGlobalConfig().get("main", "plugin_graphViz_dirExe",
u"")
self.tfDir = wx.TextCtrl(self, -1, pt)
pt = self.app.getGlobalConfig().get("main", "plugin_graphViz_exeDot",
u"dot.exe")
self.tfDot = wx.TextCtrl(self, -1, pt)
pt = self.app.getGlobalConfig().get("main", "plugin_graphViz_exeNeato",
u"neato.exe")
self.tfNeato = wx.TextCtrl(self, -1, pt)
pt = self.app.getGlobalConfig().get("main", "plugin_graphViz_exeTwopi",
u"twopi.exe")
self.tfTwopi = wx.TextCtrl(self, -1, pt)
pt = self.app.getGlobalConfig().get("main", "plugin_graphViz_exeCirco",
u"circo.exe")
self.tfCirco = wx.TextCtrl(self, -1, pt)
pt = self.app.getGlobalConfig().get("main", "plugin_graphViz_exeFdp",
u"fdp.exe")
self.tfFdp = wx.TextCtrl(self, -1, pt)
mainsizer = wx.FlexGridSizer(6, 2, 0, 0)
mainsizer.AddGrowableCol(1, 1)
mainsizer.Add(wx.StaticText(self, -1, _(u"Directory of executables:")), 0,
wx.ALL | wx.EXPAND, 5)
mainsizer.Add(self.tfDir, 1, wx.ALL | wx.EXPAND, 5)
mainsizer.Add(wx.StaticText(self, -1, _(u"Name of dot executable:")), 0,
wx.ALL | wx.EXPAND, 5)
mainsizer.Add(self.tfDot, 1, wx.ALL | wx.EXPAND, 5)
mainsizer.Add(wx.StaticText(self, -1, _(u"Name of neato executable:")), 0,
wx.ALL | wx.EXPAND, 5)
mainsizer.Add(self.tfNeato, 1, wx.ALL | wx.EXPAND, 5)
mainsizer.Add(wx.StaticText(self, -1, _(u"Name of twopi executable:")), 0,
wx.ALL | wx.EXPAND, 5)
mainsizer.Add(self.tfTwopi, 1, wx.ALL | wx.EXPAND, 5)
mainsizer.Add(wx.StaticText(self, -1, _(u"Name of circo executable:")), 0,
wx.ALL | wx.EXPAND, 5)
mainsizer.Add(self.tfCirco, 1, wx.ALL | wx.EXPAND, 5)
mainsizer.Add(wx.StaticText(self, -1, _(u"Name of fdp executable:")), 0,
wx.ALL | wx.EXPAND, 5)
mainsizer.Add(self.tfFdp, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizer(mainsizer)
self.Fit()
def setVisible(self, vis):
"""
Called when panel is shown or hidden. The actual wxWindow.Show()
function is called automatically.
If a panel is visible and becomes invisible because another panel is
selected, the plugin can veto by returning False.
When becoming visible, the return value is ignored.
"""
return True
def checkOk(self):
"""
Called when "OK" is pressed in dialog. The plugin should check here if
all input values are valid. If not, it should return False, then the
Options dialog automatically shows this panel.
There should be a visual indication about what is wrong (e.g. red
background in text field). Be sure to reset the visual indication
if field is valid again.
"""
return True
def handleOk(self):
"""
This is called if checkOk() returned True for all panels. Transfer here
all values from text fields, checkboxes, ... into the configuration
file.
"""
pt = self.tfDir.GetValue()
self.app.getGlobalConfig().set("main", "plugin_graphViz_dirExe", pt)
pt = self.tfDot.GetValue()
self.app.getGlobalConfig().set("main", "plugin_graphViz_exeDot", pt)
pt = self.tfNeato.GetValue()
self.app.getGlobalConfig().set("main", "plugin_graphViz_exeNeato", pt)
pt = self.tfTwopi.GetValue()
self.app.getGlobalConfig().set("main", "plugin_graphViz_exeTwopi", pt)
pt = self.tfCirco.GetValue()
self.app.getGlobalConfig().set("main", "plugin_graphViz_exeCirco", pt)
pt = self.tfFdp.GetValue()
self.app.getGlobalConfig().set("main", "plugin_graphViz_exeFdp", pt)
| 38.049231 | 103 | 0.598738 |
acf5a4eaac10b9c99f3a126f94af83df67420d32 | 12,326 | py | Python | examples/tutorials/simulation.py | empymod/emg3d-gallery | 7de6ba58546a819e80c957c312bb1cdfeba0bd9e | [
"Apache-2.0"
] | 3 | 2020-01-12T00:34:33.000Z | 2020-09-03T08:56:47.000Z | examples/tutorials/simulation.py | empymod/emg3d-gallery | 7de6ba58546a819e80c957c312bb1cdfeba0bd9e | [
"Apache-2.0"
] | 14 | 2020-02-11T14:25:43.000Z | 2020-12-06T16:54:23.000Z | examples/tutorials/simulation.py | empymod/emg3d-gallery | 7de6ba58546a819e80c957c312bb1cdfeba0bd9e | [
"Apache-2.0"
] | null | null | null | """
3. Simulation
=============
The easiest way to model CSEM data for a survey is to make use of the Survey
and Simulation classes, :class:`emg3d.surveys.Survey` and
:class:`emg3d.simulations.Simulation`, respectively, together with the
automatic gridding functionality.
For this example we use the resistivity model created in the example
:ref:`sphx_glr_gallery_models_gempy-ii.py`.
"""
import os
import pooch
import emg3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from scipy.interpolate import RectBivariateSpline
plt.style.use('bmh')
# Adjust this path to a folder of your choice.
data_path = os.path.join('..', 'download', '')
###############################################################################
# Fetch the model
# ---------------
#
# Retrieve and load the pre-computed resistivity model.
fname = "GemPy-II.h5"
pooch.retrieve(
'https://raw.github.com/emsig/data/2021-05-21/emg3d/models/'+fname,
'ea8c23be80522d3ca8f36742c93758370df89188816f50cb4e1b2a6a3012d659',
fname=fname,
path=data_path,
)
model = emg3d.load(data_path + fname)['model']
###############################################################################
# Let's check the model
model
###############################################################################
# So it is an isotropic model defined in terms of resistivities. Let's check
# the grid
grid = model.grid
grid
###############################################################################
# Define the survey
# -----------------
#
# If you have actual field data then this info would normally come from a data
# file or similar. Here we create our own dummy survey, and later will create
# synthetic data for it.
#
# A **Survey** instance contains all survey-related information, hence source
# and receiver positions and measured data. See the relevant documentation for
# more details: :class:`emg3d.surveys.Survey`.
#
#
# Extract seafloor to simulate source and receiver depths
# '''''''''''''''''''''''''''''''''''''''''''''''''''''''
#
# To create a realistic survey we create a small routine that finds the
# seafloor, so we can place receivers on the seafloor and sources 50 m above
# it. We use the fact that the seawater has resistivity of 0.3 Ohm.m in the
# model, and is the lowest value.
seafloor = np.ones((grid.shape_cells[0], grid.shape_cells[1]))
for i in range(grid.shape_cells[0]):
for ii in range(grid.shape_cells[1]):
# We take the seafloor to be the first cell which resistivity
# is below 0.33
seafloor[i, ii] = grid.nodes_z[:-1][
model.property_x[i, ii, :] < 0.33][0]
# Create a 2D interpolation function from it
bathymetry = RectBivariateSpline(
grid.cell_centers_x, grid.cell_centers_y, seafloor)
###############################################################################
# Source and receiver positions
# '''''''''''''''''''''''''''''
#
# Sources and receivers can be defined in a few different ways. One way is by
# providing coordinates, where two coordinate formats are accepted:
#
# - ``(x0, x1, y0, y1, z0, z1)``: finite length dipole,
# - ``(x, y, z, azimuth, elevation)``: point dipole,
#
# where the angles (azimuth and elevation) are in degrees. For the coordinate
# system see `coordinate_system
# <https://empymod.emsig.xyz/en/stable/examples/coordinate_system.html>`_.
#
# A survey can contain electric and magnetic receivers, arbitrarily rotated.
# However, the ``Simulation`` is currently limited to electric receivers.
#
# Note that the survey just knows about the sources, receivers, frequencies,
# and observed data - it does not know anything of an underlying model.
# Angles for horizontal, x-directed Ex point dipoles
elevation = 0.0
azimuth = 0.0
# Acquisition source frequencies (Hz)
frequencies = [0.5, 1.0]
# Source coordinates
src_x = np.arange(1, 4)*5000
src_y = 7500
# Source depths: 50 m above seafloor
src_z = bathymetry(src_x, src_y).ravel()+50
src = emg3d.surveys.txrx_coordinates_to_dict(
emg3d.TxElectricDipole,
(src_x, src_y, src_z, azimuth, elevation)
)
# Receiver positions
rec_x = np.arange(3, 18)*1e3
rec_y = np.arange(3)*1e3+6500
RX, RY = np.meshgrid(rec_x, rec_y, indexing='ij')
RZ = bathymetry(rec_x, rec_y)
rec = emg3d.surveys.txrx_coordinates_to_dict(
emg3d.RxElectricPoint,
(RX.ravel(), RY.ravel(), RZ.ravel(), azimuth, elevation)
)
###############################################################################
# Create Survey
# '''''''''''''
#
# If you have observed data you can provide them, here we will create synthetic
# data later on. What you have to define is the expected noise floor and
# relative error, which is used to compute the misfit later on. Alternatively
# you can provide directly the standard deviation; see
# :class:`emg3d.surveys.Survey`.
survey = emg3d.surveys.Survey(
name='GemPy-II Survey A', # Name of the survey
sources=src, # Source coordinates
receivers=rec, # Receiver coordinates
frequencies=frequencies, # Two frequencies
# data=data, # If you have observed data
noise_floor=1e-15,
relative_error=0.05,
)
# Let's have a look at the survey:
survey
###############################################################################
# Our survey has our sources and receivers and initiated a variable
# ``observed``, with NaN's. Each source and receiver got a name assigned. If
# you prefer other names you would have to define the sources and receivers
# through ``emg3d.surveys.Dipole``, and provide a list of dipoles to the survey
# instead of only a tuple of coordinates.
#
# We can also look at a particular source or receiver, e.g.,
survey.sources['TxED-1']
###############################################################################
# Which shows you all you need to know about a particular dipole: name, type
# (electric or magnetic), coordinates of its center, angles, and length.
#
# QC model and survey
# -------------------
grid.plot_3d_slicer(model.property_x, xslice=12000, yslice=7000,
pcolor_opts={'norm': LogNorm(vmin=0.3, vmax=200)})
# Plot survey in figure above
fig = plt.gcf()
fig.suptitle('Resistivity model (Ohm.m) and survey layout')
axs = fig.get_children()
rec_coords = survey.receiver_coordinates()
src_coords = survey.source_coordinates()
axs[1].plot(rec_coords[0], rec_coords[1], 'bv')
axs[2].plot(rec_coords[0], rec_coords[2], 'bv')
axs[3].plot(rec_coords[2], rec_coords[1], 'bv')
axs[1].plot(src_coords[0], src_coords[1], 'r*')
axs[2].plot(src_coords[0], src_coords[2], 'r*')
axs[3].plot(src_coords[2], src_coords[1], 'r*')
plt.show()
###############################################################################
# Create a Simulation (to compute 'observed' data)
# ------------------------------------------------
#
# The simulation class combines a model with a survey, and can compute
# synthetic data for it.
#
# Automatic gridding
# ''''''''''''''''''
#
# We use the automatic gridding feature implemented in the simulation class to
# use source- and frequency- dependent grids for the computation.
# Consult the following docs for more information:
#
# - `gridding_opts` in :class:`emg3d.simulations.Simulation`;
# - :func:`emg3d.meshes.estimate_gridding_opts`; and
# - :func:`emg3d.meshes.construct_mesh`.
gopts = {
'properties': [0.3, 10, 1., 0.3],
'min_width_limits': (100, 100, 50),
'stretching': (None, None, [1.05, 1.5]),
'domain': (
[rec_coords[0].min()-100, rec_coords[0].max()+100],
[rec_coords[1].min()-100, rec_coords[1].max()+100],
[-5500, -2000]
),
}
###############################################################################
# Now we can initiate the simulation class and QC it:
simulation = emg3d.simulations.Simulation(
name="True Model", # A name for this simulation
survey=survey, # Our survey instance
model=model, # The model
gridding='both', # Frequency- and source-dependent meshes
max_workers=4, # How many parallel jobs
# solver_opts, # Any parameter to pass to emg3d.solve
gridding_opts=gopts, # Gridding options
)
# Let's QC our Simulation instance
simulation
###############################################################################
# Compute the data
# ''''''''''''''''
#
# We pass here the argument ``observed=True``; this way, the synthetic data is
# stored in our Survey as ``observed`` data, otherwise it would be stored as
# ``synthetic``. This is important later for optimization. It also adds
# Gaussian noise according to the noise floor and relative error we defined in
# the survey. By setting a minimum offset the receivers close to the source are
# switched off.
#
# This computes all results in parallel; in this case six models, three sources
# times two frequencies. You can change the number of workers at any time by
# setting ``simulation.max_workers``.
simulation.compute(observed=True, min_offset=500)
###############################################################################
# A ``Simulation`` has a few convenience functions, e.g.:
#
# - ``simulation.get_efield('TxED-1', 0.5)``: Returns the electric field of the
# entire domain for source ``'TxED-1'`` and frequency 0.5 Hz.
# - ``simulation.get_hfield``; ``simulation.get_sfield``: Similar functions to
# retrieve the magnetic fields and the source fields.
# - ``simulation.get_model``; ``simulation.get_grid``: Similar functions to
# retrieve the computational grid and the model for a given source and
# frequency.
#
# When we now look at our survey we see that the observed data variable is
# filled with the responses at the receiver locations. Note that the
# ``synthetic`` data is the actual computed data, the ``observed`` data, on the
# other hand, has Gaussian noise added and is set to NaN's for positions too
# close to the source.
survey
###############################################################################
# QC Data
# -------
plt.figure()
plt.title("Inline receivers for all sources")
obs = simulation.data.observed[:, 1::3, :]
syn = simulation.data.synthetic[:, 1::3, :]
for i, src in enumerate(survey.sources.keys()):
for ii, freq in enumerate(survey.frequencies):
plt.plot(rec_coords[0][1::3],
abs(syn.loc[src, :, freq].data.real),
"k-", lw=0.5)
plt.plot(rec_coords[0][1::3],
abs(syn.loc[src, :, freq].data.imag),
"k-", lw=0.5)
plt.plot(rec_coords[0][1::3],
abs(obs.loc[src, :, freq].data.real),
f"C{ii}.-",
label=f"|Real|; freq={freq} Hz" if i == 0 else None
)
plt.plot(rec_coords[0][1::3],
abs(obs.loc[src, :, freq].data.imag),
f"C{ii}.--",
label=f"|Imag|; freq={freq} Hz" if i == 0 else None
)
plt.yscale('log')
plt.legend(ncol=2, framealpha=1)
plt.xlabel('x-coordinate (m)')
plt.ylabel('$|E_x|$ (V/m)')
plt.show()
###############################################################################
# How to store surveys and simulations to disk
# --------------------------------------------
#
# Survey and Simulations can store (and load) themselves to (from) disk.
#
# - A survey stores all sources, receivers, frequencies, and the observed data.
# - A simulation stores the survey, the model, the synthetic data. (It can also
# store much more, such as all electric fields, source and frequency
# dependent meshes and models, etc. What it actually stores is defined by the
# parameter ``what``).
# Survey file name
# survey_fname = 'GemPy-II-survey-A.h5'
# To store, run
# survey.to_file(survey_fname) # .h5, .json, or .npz
# To load, run
# survey = emg3d.surveys.Survey.from_file(survey_fname)
# In the same manner you could store and load the entire simulation:
# Simulation file name
# simulation_fname = file-name.ending # for ending in [h5, json, npz]
# To store, run
# simulation.to_file(simulation_fname, what='results')
# To load, run
# simulation = emg3d.simulations.Simulation.from_file(simulation_fname)
###############################################################################
emg3d.Report()
| 34.721127 | 79 | 0.611553 |
acf5a70a59b7570b5e834d0989218cc2cb463814 | 12,330 | py | Python | pyoverkiz/enums/general.py | egguy/python-overkiz-api | b83f9220c39a65dc4f9b3e8e4acecf6cfe724193 | [
"MIT"
] | null | null | null | pyoverkiz/enums/general.py | egguy/python-overkiz-api | b83f9220c39a65dc4f9b3e8e4acecf6cfe724193 | [
"MIT"
] | null | null | null | pyoverkiz/enums/general.py | egguy/python-overkiz-api | b83f9220c39a65dc4f9b3e8e4acecf6cfe724193 | [
"MIT"
] | null | null | null | import logging
from enum import Enum, IntEnum, unique
_LOGGER = logging.getLogger(__name__)
@unique
class ProductType(IntEnum):
NONE = 0
ACTUATOR = 1
SENSOR = 2
VIDEO = 3
CONTROLLABLE = 4
GATEWAY = 5
INFRASTRUCTURE_COMPONENT = 6
GROUP = 7
@unique
class DataType(IntEnum):
NONE = 0
INTEGER = 1
FLOAT = 2
STRING = 3
BLOB = 4
DATE = 5
BOOLEAN = 6
PASSWORD = 9
JSON_ARRAY = 10
JSON_OBJECT = 11
@unique
class FailureType(IntEnum):
UNKNOWN = -1
NO_FAILURE = 0
NON_EXECUTING = 11
ERROR_WHILE_EXECUTING = 12
ACTUATORUNKNOWN = 101
ACTUATORNOANSWER = 102
ERRORREADWRITEACCESS = 103
ERRORCOMMAND = 104
CMDUNKNOWN = 105
CMDCANCELLED = 106
NOREMOTECONTROL = 107
ERROR_TRANSFER_KEY = 108
ERRORDATABASE = 109
MODELOCALENABLED = 110
BAD_CMD = 111
BAD_HD = 112
BAD_LEN = 113
BAD_ADDRESS = 114
BAD_PARAM = 115
NOT_FOUND_ETX = 116
BAD_CRC_SERIAL = 117
BAD_STATUS = 118
KEY_NOT_RECEIVE = 119
INSERTION_ERROR = 120
NODE_NOT_VERIFY_WITH_NEW_KEY = 121
POOL_FULL = 122
ADDRESS_UNKNOWN = 123
NODE_CANT_PAIRED = 124
NODE_CANT_UPDATE_TRANSFER_STATUS = 125
UNKNOWN_ERROR = 126
INVALID_CHANNEL = 127
INVALID_COMMAND = 128
SERIAL_IO_ERROR = 129
OPERATION_NOT_ALLOWED = 130
RESTART_STACK = 131
INCOMPLETE_DISCOVER = 132
TRANFER_KEY_NO_REMOTE_CONTROLLER = 133
TRANFER_KEY_MULTI_REMOTE_CONTROLLER = 134
RF_PROTOCOL_FATAL_ERROR = 135
INTERNAL_ERROR = 136
BUSY_RADIO_ERROR = 137
BAD_MAC_ERROR = 138
SETUP_REQUIRED = 139
MASTER_AUTHENTICATION_FAILED_ERROR = 140
END_OF_RECEIVING_CONFIGURATION_MODE = 141
DATA_TRANSPORT_SERVICE_ERROR = 142
DATA_TRANSPORT_SERVICE_ABORTED_BY_RECIPIENT = 143
STOPPED_BY_CONFIGURATION_OPERATION_ERROR = 144
COMMAND_NAME_TYPE_INVALID = 145
COMMAND_NAME_NOT_INSTALLED_OR_INVALID = 146
COMMAND_INVALID_LEN_ON_FRAME = 147
COMMAND_ZONE_INVALID_OR_NOT_INSTALLED = 148
COMMAND_SENSOR_VALUE_INVALID = 149
COMMAND_ZONE_TEMPERATURE_INVALID = 150
COMMAND_DHW_NOT_INSTALLED_OR_INVALID = 151
COMMAND_INSERTION_FAILED_ERROR = 152
NONEXEC_BLOCKED_BY_HAZARD = 153
NONEXEC_OVERHEATING_PROTECTION = 154
NONEXEC_DEVICE_LIMITATION = 155
NONEXEC_DOOR_IS_OPENED = 156
NONEXEC_MAINTENANCE_REQUIRED = 157
DEAD_SENSOR = 158
SENSOR_MAINTENANCE_REQUIRED = 159
NONEXEC_OTHER = 160
WHILEEXEC_BLOCKED_BY_HAZARD = 161
WHILEEXEC_OVERHEATING_PROTECTION = 162
WHILEEXEC_DEVICE_LIMITATION = 163
WHILEEXEC_DOOR_IS_OPENED = 164
WHILEEXEC_MAINTENANCE_REQUIRED = 165
WHILEEXEC_OTHER = 166
PRIORITY_LOCK__LOCAL_USER = 167
PRIORITY_LOCK__USER = 168
PRIORITY_LOCK__RAIN = 169
PRIORITY_LOCK__TIMER = 170
PRIORITY_LOCK__SECURITY = 171
PRIORITY_LOCK__UPS = 172
PRIORITY_LOCK__SFC = 173
PRIORITY_LOCK__LSC = 174
PRIORITY_LOCK__SAAC = 175
PRIORITY_LOCK__WIND = 176
PRIORITY_LOCK__EXTERNAL_ACCESS = 177
PRIORITY_LOCK__EMERGENCY = 178
NO_DISTANT_FOR_DISCOVER = 179
ANOTHER_COMMAND_IS_RUNNING = 180
PROBLEM_WITH_BOILER_COMMUNICATION = 181
LOCKED_BY_RCM = 182
RCM_NO_REMOTE_CONTROL = 183
DISCOVER_NO_REMOTE_CONTROLLER_ERROR = 184
COMMAND_INTERRUPTED = 185
PRIORITY_LOCK__WIND_FORCING_AVAILABLE = 190
PRIORITY_LOCK__WIND_FORCING_UNAVAILABLE = 191
PRIORITY_LOCK__NO_SECURITY_DEVICE = 192
PRIORITY_LOCK__DEAD_SENSOR = 193
PRIORITY_LOCK__UNKNOWN_ERROR = 194
DBUS_ERROR = 200
DBUS_NO_MEMORY = 201
DBUS_SERVICE_UNKNOWN = 202
DBUS_NAME_HAS_NO_OWNER = 203
DBUS_NO_REPLY = 204
DBUS_IO_ERROR = 205
DBUS_BAD_ADDRESS = 206
DBUS_NOT_SUPPORTED = 207
DBUS_LIMITS_EXCEEDED = 208
DBUS_ACCESS_DENIED = 209
DBUS_AUTH_FAILED = 210
DBUS_NO_SERVER = 211
DBUS_TIMEOUT = 212
DBUS_NO_NETWORK = 213
DBUS_ADDRESS_IN_USE = 214
DBUS_DISCONNECTED = 215
DBUS_INVALID_ARGS = 216
DBUS_FILE_NOT_FOUND = 217
DBUS_FILE_EXISTS = 218
DBUS_UNKNOWN_METHOD = 219
DBUS_UNKNOWN_OBJECT = 220
DBUS_UNKNOWN_INTERFACE = 221
DBUS_UNKNOWN_PROPERTY = 222
DBUS_PROPERTY_READ_ONLY = 223
DBUS_TIMED_OUT = 224
DBUS_MATCH_RULE_NOT_FOUND = 225
DBUS_MATCH_RULE_INVALID = 226
DBUS_SPAWN_EXEC_FAILED = 227
DBUS_SPAWN_FORK_FAILED = 228
DBUS_SPAWN_CHILD_EXITED = 229
DBUS_SPAWN_CHILD_SIGNALED = 230
DBUS_SPAWN_FAILED = 231
DBUS_SPAWN_SETUP_FAILED = 232
DBUS_SPAWN_CONFIG_INVALID = 233
DBUS_SPAWN_SERVICE_INVALID = 234
DBUS_SPAWN_SERVICE_NOT_FOUND = 235
DBUS_SPAWN_PERMISSIONS_INVALID = 236
DBUS_SPAWN_FILE_INVALID = 237
DBUS_SPAWN_NO_MEMORY = 238
DBUS_UNIX_PROCESS_ID_UNKNOWN = 239
DBUS_INVALID_SIGNATURE = 240
DBUS_INVALID_FILE_CONTENT = 241
DBUS_SELINUX_SECURITY_CONTEXT_UNKNOWN = 242
DBUS_ADT_AUDIT_DATA_UNKNOWN = 243
DBUS_OBJECT_PATH_IN_USE = 244
DBUS_INCONSISTENT_MESSAGE = 245
NOT_IMPLEMENTED_YET = 300
MODULE_NOT_LOADED = 301
APPLICATION_NOT_RUNNING = 302
NONEXEC_MANUALLY_CONTROLLED = 400
NONEXEC_AUTOMATIC_CYCLE = 401
NONEXEC_BATTERY_LEVEL = 402
NONEXEC_WRONG_LOAD_CONNECTED = 403
NONEXEC_HIGH_CONSUMPTION = 404
NONEXEC_LOW_CONSUMPTION = 405
NONEXEC_COLOUR_NOT_REACHABLE = 406
NONEXEC_USER_ACTION_NEEDED = 407
NONEXEC_COMMAND_INCOMPATIBLE_WITH_MOVEMENT = 408
NONEXEC_CANNOT_CHANGE_STATE = 409
NONEXEC_FILTER_MAINTENANCE = 410
NONEXEC_OPERATING_MODE_NOT_SUPPORTED = 411
WHILEEXEC_MANUALLY_CONTROLLED = 420
WHILEEXEC_AUTOMATIC_CYCLE = 421
WHILEEXEC_BATTERY_LEVEL = 422
WHILEEXEC_WRONG_LOAD_CONNECTED = 423
WHILEEXEC_HIGH_CONSUMPTION = 424
WHILEEXEC_LOW_CONSUMPTION = 425
WHILEEXEC_COLOUR_NOT_REACHABLE = 426
WHILEEXEC_USER_ACTION_NEEDED = 427
WHILEEXEC_COMMAND_INCOMPATIBLE_WITH_MOVEMENT = 428
WHILEEXEC_CANNOT_CHANGE_STATE = 429
WHILEEXEC_FILTER_MAINTENANCE = 430
WHILEEXEC_OPERATING_MODE_NOT_SUPPORTED = 431
OVERRIDEMODE_ERROR = 450
CAMERA_INVALID_CREDENTIALS = 500
UNSUPPORTED_CAMERA_TYPE = 501
NETWORK_COULDNT_RESOLVE_HOST = 601
NETWORK_COULDNT_CONNECT = 602
NETWORK_OPERATION_TIMEDOUT = 603
LPB_APP_OUT_OF_RANGE = 701
LPB_APP_OUT_OF_MAXRANGE = 702
LPB_APP_OUT_OF_MINRANGE = 703
LPB_APP_MEMORY_ERROR = 704
LPB_APP_READ_ONLY = 705
LPB_APP_ILLEGAL_CMD = 706
LPB_APP_VOID_DP = 707
LPB_APP_TYPE_CONFLICT = 708
LPB_APP_READ_CMD_INCORRECT = 709
LPB_APP_WRITE_CMD_INCORRECT = 710
LPB_APP_CMD_TYPE_INCORRECT = 711
LPB_APP_WRITE_TIMEOUT = 712
LPB_APP_CANNOT_WRITE_GW = 713
LPB_APP_UNKNOWN_GATEWAY = 714
LPB_APP_GATEWAY_UNREACHABLE = 715
APPLICATION_ERROR = 800
HUE_INVALID_CREDENTIALS = 900
HUE_LINK_BUTTON_NOT_PRESSED = 901
HUE_DEVICE_IS_OFF = 902
TIMED_OUT = 10001
CANCELLED = 10002
UNKNOWN_ERROR_CODE = 10003
SERVER_FAILURE = 10004
PEER_DOWN = 10005
GATEWAY_BUFFER_OVERFLOW = 10006
UNKNOWN_DETAILED_ERROR = 10007
@classmethod
def _missing_(cls, value): # type: ignore
_LOGGER.warning(f"Unsupported value {value} has been returned for {cls}")
return cls.UNKNOWN
@unique
class EventName(str, Enum):
UNKNOWN = "Unknown"
ACTION_GROUP_CREATED = "ActionGroupCreatedEvent"
ACTION_GROUP_DELETED = "ActionGroupDeletedEvent"
ACTION_GROUP_UPDATED = "ActionGroupUpdatedEvent"
CALENDAR_DAY_CREATED = "CalendarDayCreatedEvent"
CALENDAR_DAY_UPDATED = "CalendarDayUpdatedEvent"
CALENDAR_RULE_CREATED = "CalendarRuleCreatedEvent"
CALENDAR_RULE_DELETED = "CalendarRuleDeletedEvent"
CAMERA_DISCOVERED = "CameraDiscoveredEvent"
CAMERA_DISCOVER_FAILED = "CameraDiscoverFailedEvent"
CAMERA_UPLOAD_PHOTO = "CameraUploadPhotoEvent"
CLOUD_SYNCHRONIZATION_FAILED = "CloudSynchronizationFailedEvent"
COMMAND_EXECUTION_STATE_CHANGE = "CommandExecutionStateChangedEvent"
CONDITION_GROUP_CREATED = "ConditionGroupCreatedEvent"
CONDITION_GROUP_DELETED = "ConditionGroupDeletedEvent"
CONDITION_GROUP_UPDATED = "ConditionGroupUpdatedEvent"
DELAYED_TRIGGER_CANCELLED = "DelayedTriggerCancelledEvent"
DEVICE_AVAILABLE = "DeviceAvailableEvent"
DEVICE_CREATED = "DeviceCreatedEvent"
DEVICE_DELETION_FAILED = "DeviceDeletionFailedEvent"
DEVICE_DISABLED = "DeviceDisabledEvent"
DEVICE_FIRMWARE_UPDATE_FAILED = "DeviceFirmwareUpdateFailedEvent"
DEVICE_PROTOCOL_AVAILABLE = "DeviceProtocolAvailableEvent"
DEVICE_PROTOCOL_UNAVAILABLE = "DeviceProtocolUnavailableEvent"
DEVICE_REMOVED = "DeviceRemovedEvent"
DEVICE_STATE_CHANGED = "DeviceStateChangedEvent"
DEVICE_UNAVAILABLE = "DeviceUnavailableEvent"
DEVICE_UPDATED = "DeviceUpdatedEvent"
DISCOVER_COMPLETE = "DiscoverCompleteEvent"
DISCOVER_FAILED = "DiscoverFailedEvent"
ELIOT_DISCOVER_GATEWAYS_COMPLETED = "EliotDiscoverGatewaysCompletedEvent"
ELIOT_DISCOVER_GATEWAYS_FAILED = "EliotDiscoverGatewaysFailedEvent"
ELIOT_DISCOVER_GATEWAY_COMPLETED = "EliotDiscoverGatewayCompletedEvent"
ELIOT_DISCOVER_GATEWAY_FAILED = "EliotDiscoverGatewayFailedEvent"
ELIOT_REFRESH_CURRENT_TOKEN_COMPLETED = "EliotRefreshCurrentTokenCompletedEvent"
ELIOT_REFRESH_CURRENT_TOKEN_FAILED = "EliotRefreshCurrentTokenFailedEvent"
END_USER_LOGIN = "EndUserLoginEvent"
ENOCEAN_BAD_DEVICE_STIMULATION = "EnOceanBadDeviceStimulationEvent"
ENOCEAN_KNOWN_DEVICE_FOUND = "EnOceanKnownDeviceFoundEvent"
ENOCEAN_LEARN_STARTED = "EnOceanLearnStartedEvent"
ENOCEAN_LEARN_STOPPED = "EnOceanLearnStoppedEvent"
EXECUTION_REGISTERED = "ExecutionRegisteredEvent"
EXECUTION_STATE_CHANGED = "ExecutionStateChangedEvent"
GATEWAY_ALIVE = "GatewayAliveEvent"
GATEWAY_BOOT = "GatewayBootEvent"
GATEWAY_DOWN = "GatewayDownEvent"
GATEWAY_FUNCTION_CHANGED = "GatewayFunctionChangedEvent"
GATEWAY_MODE_CHANGED = "GatewayModeChangedEvent"
GATEWAY_SYNCHRONIZATION_ENDED = "GatewaySynchronizationEndedEvent"
GATEWAY_SYNCHRONIZATION_STARTED = "GatewaySynchronizationStartedEvent"
GROUP_TRIGGERED = "GroupTriggeredEvent"
INVALID_ADDRESS = "InvalidAddressEvent"
IO_CHANGED_KEY = "IOChangedKeyEvent"
OPENDOORS_DISCOVER_COMPLETED = "OpenDoorsDiscoverCompletedEvent"
OPENDOORS_DISCOVER_FAILED = "OpenDoorsDiscoverFailedEvent"
OPENDOORS_GENERATE_OAUTH_TOKENS_COMPLETED = (
"OpenDoorsGenerateOAuthTokensCompletedEvent"
)
OPENDOORS_GENERATE_OAUTH_TOKENS_FAILED = "OpenDoorsGenerateOAuthTokensFailedEvent"
PLACE_CREATED = "PlaceCreatedEvent"
PLACE_DELETED = "PlaceDeletedEvent"
PLACE_UPDATED = "PlaceUpdatedEvent"
PURGE_PARTIAL_RAW_DEVICES = "PurgePartialRawDevicesEvent"
REFRESH_ALL_DEVICES_STATES_COMPLETED = "RefreshAllDevicesStatesCompletedEvent"
SETUP_JOB_COMPLETED = "SetupJobCompletedEvent"
SETUP_JOB_FAILED = "SetupJobFailedEvent"
SETUP_TRIGGER_TRIGGERED = "SetupTriggerTriggeredEvent"
SOMFY_PROTECT_CURRENT_TOKEN_COMPLETED = "SomfyProtectCurrentTokenCompletedEvent"
SOMFY_PROTECT_CURRENT_TOKEN_FAILED = "SomfyProtectCurrentTokenFailedEvent"
SOMFY_PROTECT_GET_SITES_COMPLETED = "SomfyProtectGetSitesCompletedEvent"
SONOS_GET_TOPOLOGY_SUCCESS = "SonosGetTopologySuccessEvent"
TOKEN_CREATED = "TokenCreatedEvent"
TOKEN_CREATION_FAILED = "TokenCreationFailedEvent"
TOKEN_DELETION_FAILED = "TokenDeletionFailedEvent"
TOKEN_REMOVED = "TokenRemovedEvent"
PUSH_SUBSCRIPTION_CREATED = "PushSubscriptionCreatedEvent"
VALID_ADDRESS = "ValidAddressEvent"
ZIGBEE_BIND_NETWORK_COMPLETED = "ZigbeeBindNetworkCompletedEvent"
ZIGBEE_BIND_NETWORK_FAILED = "ZigbeeBindNetworkFailedEvent"
ZIGBEE_CREATE_NETWORK_COMPLETED = "ZigbeeCreateNetworkCompletedEvent"
ZIGBEE_CREATE_NETWORK_FAILED = "ZigbeeCreateNetworkFailedEvent"
ZIGBEE_JOIN_NETWORK_FAILED = "ZigbeeJoinNetworkFailedEvent"
ZIGBEE_LEAVE_NETWORK_COMPLETED = "ZigbeeLeaveNetworkCompletedEvent"
ZIGBEE_LEAVE_NETWORK_FAILED = "ZigbeeLeaveNetworkFailedEvent"
ZIGBEE_REFRESH_NETWORK_COMPLETED = "ZigbeeRefreshNetworkCompletedEvent"
@classmethod
def _missing_(cls, value): # type: ignore
_LOGGER.warning(f"Unsupported value {value} has been returned for {cls}")
return cls.UNKNOWN
| 37.027027 | 86 | 0.776237 |
acf5a75a552ee971906b03bd7b25d5287c02dbf8 | 23,589 | py | Python | tests/python/frontend/mxnet/test_forward.py | YuanLinNV/tvm | 4e8bc87340623c4363400f70d36b3a69a32b3839 | [
"Apache-2.0"
] | null | null | null | tests/python/frontend/mxnet/test_forward.py | YuanLinNV/tvm | 4e8bc87340623c4363400f70d36b3a69a32b3839 | [
"Apache-2.0"
] | null | null | null | tests/python/frontend/mxnet/test_forward.py | YuanLinNV/tvm | 4e8bc87340623c4363400f70d36b3a69a32b3839 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import operator
import tvm
from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
from tvm import relay
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.model_zoo import vision
import model_zoo
def verify_mxnet_frontend_impl(mx_symbol,
data_shape=(1, 3, 224, 224),
out_shape=(1, 1000),
gluon_impl=False,
name=None,
dtype='float32'):
"""Use name different from test to avoid let nose pick it up"""
if gluon_impl:
def get_gluon_output(name, x):
net = vision.get_model(name)
net.collect_params().initialize(mx.init.Xavier())
net_sym = gluon.nn.SymbolBlock(outputs=net(mx.sym.var('data')),
inputs=mx.sym.var('data'),
params=net.collect_params())
out = net_sym(mx.nd.array(x.astype(dtype))).asnumpy()
return out, net_sym
else:
def get_mxnet_output(symbol, x, dtype='float32'):
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
mod = mx.mod.Module(symbol, label_names=None)
mod.bind(data_shapes=[('data', x.shape)], for_training=False)
mod.init_params()
mod.forward(Batch([mx.nd.array(x.astype(dtype))]))
out = mod.get_outputs()[0].asnumpy()
args, auxs = mod.get_params()
return out, args, auxs
def get_tvm_output(symbol, x, args, auxs, target, ctx, dtype='float32'):
shape_dict = {"data": x.shape}
if gluon_impl:
new_sym, params = relay.frontend.from_mxnet(symbol, shape_dict)
else:
new_sym, params = relay.frontend.from_mxnet(symbol,
shape_dict,
arg_params=args,
aux_params=auxs)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(new_sym, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
# random input
x = np.random.uniform(size=data_shape)
if gluon_impl:
gluon_out, gluon_sym = get_gluon_output(name, x)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(gluon_sym, x, None, None, target, ctx, dtype)
tvm.testing.assert_allclose(gluon_out, tvm_out, rtol=1e-5, atol=1e-5)
else:
mx_out, args, auxs = get_mxnet_output(mx_symbol, x, dtype)
assert "data" not in args
for target, ctx in ctx_list():
tvm_out = get_tvm_output(mx_symbol, x, args, auxs, target, ctx, dtype)
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_mlp():
mlp = model_zoo.mx_mlp()
verify_mxnet_frontend_impl(mlp,
data_shape=(1, 1, 28, 28),
out_shape=(1, 10))
def test_forward_vgg():
for n in [11]:
mx_sym = model_zoo.mx_vgg(n)
verify_mxnet_frontend_impl(mx_sym)
def test_forward_resnet():
for n in [18]:
mx_sym = model_zoo.mx_resnet(18)
verify_mxnet_frontend_impl(mx_sym)
def test_forward_elu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='elu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_rrelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='rrelu', lower_bound=0.3, upper_bound=0.7)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_prelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='prelu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_softrelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.Activation(data, act_type='softrelu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_fc_flatten():
# test flatten=True option in mxnet 0.11.1
data = mx.sym.var('data')
try:
mx_sym = mx.sym.FullyConnected(data, num_hidden=100, flatten=True)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
mx_sym = mx.sym.FullyConnected(mx.sym.Flatten(data), num_hidden=100, flatten=False)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
except:
pass
def test_forward_clip():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicity
mx_sym = mx.sym.clip(data, a_min=0, a_max=1)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_split():
data = mx.sym.var('data')
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=False)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 1, 2, 1))
def test_forward_split_squeeze():
data = mx.sym.var('data')
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=True)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 2, 1))
def test_forward_expand_dims():
data = mx.sym.var('data')
mx_sym = mx.sym.expand_dims(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 1, 3, 4))
def test_forward_pooling():
data = mx.sym.var('data')
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type='avg')
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type='max')
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
def test_forward_lrn():
data = mx.sym.var('data')
mx_sym = mx.sym.LRN(data, alpha=2, beta=2, knorm=1, nsize=5)
verify_mxnet_frontend_impl(mx_sym, (1, 10, 24, 24), (1, 10, 24, 24))
def test_forward_ones():
data = mx.sym.var('data')
ones = mx.sym.ones(shape=(2, 3, 4), dtype='float32')
mx_sym = mx.sym.elemwise_add(data, ones)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_zeros():
data = mx.sym.var('data')
zeros = mx.sym.zeros(shape=(2, 3, 4), dtype='float32')
mx_sym = mx.sym.elemwise_add(data, zeros)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_ones_like():
data = mx.sym.var('data')
mx_sym = mx.sym.ones_like(data, dtype='float32')
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_zeros_like():
data = mx.sym.var('data')
mx_sym = mx.sym.zeros_like(data, dtype='float32')
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_argmax():
data = mx.sym.var('data')
mx_sym = mx.sym.argmax(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (5, 3), (5,))
def test_forward_argmin():
data = mx.sym.var('data')
mx_sym = mx.sym.argmin(data, axis=0)
verify_mxnet_frontend_impl(mx_sym, (5, 4), (4,))
def test_forward_slice():
data = mx.sym.var('data')
mx_sym = mx.sym.slice(data, begin=(0, 1), end=(2, 4))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 3))
mx_sym = mx.sym.slice(data, begin=(-1, 1), end=(-3, 4), step=(-1, 2))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 2))
def test_forward_where():
cond = mx.sym.var('cond')
x = mx.sym.var('x')
y = mx.sym.var('y')
dshape = (2, 2)
dtype = 'float32'
mx_sym = mx.sym.where(cond, x, y)
np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
np_x = np.random.uniform(size=dshape).astype(dtype)
np_y = np.random.uniform(size=dshape).astype(dtype)
mx_cond = mx.nd.array(np_cond)
mx_x = mx.nd.array(np_x)
mx_y = mx.nd.array(np_y)
shapes = {'cond': dshape, 'x': dshape, 'y': dshape}
mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
mod.bind(data_shapes=shapes.items(), for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, args, auxs)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(np_cond, np_x, np_y)
tvm.testing.assert_allclose(op_res.asnumpy(), mx_out)
def test_forward_arange():
def _mx_symbol(F, start, stop, step):
if start is None and step is None:
sym = F.arange(stop)
elif start is None:
sym = F.arange(stop, step=step)
elif step is None:
sym = F.arange(start, stop)
else:
sym = F.arange(start, stop, step)
return sym
def verify(start, stop, step):
ref_res = _mx_symbol(mx.nd, start, stop, step).asnumpy()
mx_sym = _mx_symbol(mx.sym, start, stop, step)
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
verify(0, 20, None)
verify(0, 20, 2)
verify(1, 20, None)
verify(1, 20, 2)
verify(1, 20, 1.5)
verify(1, 20.5, None)
verify(1, 20, 3)
verify(20, 1, -1)
verify(20, 1, -1.5)
def _mx_symbol(F, op_name, inputs):
op = getattr(F, op_name)
return op(*inputs)
def test_forward_broadcast_ops():
for op in ["broadcast_add", "broadcast_sub", "broadcast_mul",
"broadcast_div", "broadcast_mod", "broadcast_maximum",
"broadcast_minimum", "broadcast_equal", "broadcast_not_equal",
"broadcast_greater", "broadcast_greater_equal",
"broadcast_lesser", "broadcast_lesser_equal"]:
a_shape = (3, 4, 5)
b_shape = (4, 5)
if op == "broadcast_mod":
dtype = 'int32'
a_np = np.random.randint(1, 100, size=a_shape).astype(dtype)
b_np = np.random.randint(1, 100, size=b_shape).astype(dtype)
else:
dtype = 'float32'
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
shapes = {'a': a_shape, 'b': b_shape}
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(a_np, b_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_elemwise_ops():
for op in ["elemwise_add", "elemwise_sub", "elemwise_mul",
"elemwise_div", "maximum", "minimum"]:
shape = (3, 4, 5)
dtype = 'float32'
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = np.random.uniform(size=shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
shapes = {'a': shape, 'b': shape}
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(a_np, b_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_scalar_ops():
for op in [operator.add, operator.sub, operator.mul, operator.truediv,
operator.pow, operator.lt, operator.le, operator.eq,
operator.ne, operator.gt, operator.ge]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = op(mx.sym.var('a'), b_scalar)
ref_res = op(mx.nd.array(a_np), b_scalar)
shapes = {'a': a_shape}
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
for op in ["maximum", "minimum"]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), b_scalar])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), b_scalar])
shapes = {'a': a_shape}
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_slice_axis():
def verify(shape, axis, begin, end):
data_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.slice_axis(mx.nd.array(data_np), axis, begin, end)
mx_sym = mx.sym.slice_axis(mx.sym.var("data"), axis, begin, end)
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(data_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4), 0, 1, 2)
verify((3, 4), 0, 1, None)
verify((3, 4), 1, 0, 2)
verify((3, 4), 1, -3, -1)
verify((3, 4), -1, -3, -1)
def test_forward_slice_like():
def verify(x_shape, y_shape, axes):
x_np = np.random.uniform(size=x_shape).astype("float32")
y_np = np.random.uniform(size=y_shape).astype("float32")
if axes is None:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np))
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"))
else:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np), axes=axes)
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"), axes=axes)
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": x_shape, "y": y_shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x_np, y_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4), (2, 3), None)
verify((3, 4), (2, 3), (0, 1))
verify((3, 4), (2, 3), (0))
verify((3, 4), (2, 3), (-1))
def test_forward_l2_normalize():
data = mx.sym.var('data')
mx_sym = mx.sym.L2Normalization(data, mode="channel")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4, 5), (2, 3, 4, 5))
def test_forward_shape_array():
def verify(shape):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.shape_array(mx.nd.array(x_np))
mx_sym = mx.sym.shape_array(mx.sym.var("x"))
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1,))
verify((3, 4, 5))
verify((3, 4, 5, 6))
def test_forward_squeeze():
def verify(shape, axis):
x_np = np.random.uniform(size=shape).astype("float32")
if axis is None:
ref_res = mx.nd.squeeze(mx.nd.array(x_np))
mx_sym = mx.sym.squeeze(mx.sym.var("x"))
else:
ref_res = mx.nd.squeeze(mx.nd.array(x_np), axis=axis)
mx_sym = mx.sym.squeeze(mx.sym.var("x"), axis=axis)
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 3, 1), None)
verify((1, 3, 1), 0)
verify((1, 3, 1), 2)
verify((1, 3, 1), (0, 2))
def test_forward_broadcast_axis():
def verify(shape, axis, size):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.broadcast_axis(mx.nd.array(x_np), axis=axis, size=size)
mx_sym = mx.sym.broadcast_axis(mx.sym.var("x"), axis=axis, size=size)
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 2, 1), 2, 3)
verify((1, 2, 1), (0, 2), (2, 3))
def test_forward_full():
def verify(val, shape, dtype):
ctx = mx.cpu()
ref_res = mx.nd.full(shape, val, dtype=dtype)
mx_sym = mx.sym.full(shape, val, dtype=dtype)
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, ctx in ctx_list():
# Skip testing graph runtime because this op will be optimized out
# by constant folding.
for kind in ["debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify(2, (3, 4), "float32")
verify(2, (3, 4), "int32")
verify(3.5, (1, 3, 4), "float32")
def test_forward_embedding():
def verify(data_shape, weight_shape):
in_dim, out_dim = weight_shape
x_np = np.random.randint(0, weight_shape[0], size=data_shape).astype("float32")
w_np = np.random.uniform(size=weight_shape).astype("float32")
ref_res = mx.nd.Embedding(mx.nd.array(x_np), mx.nd.array(w_np),
input_dim=in_dim, output_dim=out_dim)
mx_sym = mx.sym.Embedding(mx.sym.var("x"), mx.sym.var("w"),
input_dim=in_dim, output_dim=out_dim)
new_sym, _ = relay.frontend.from_mxnet(
mx_sym, {"x": data_shape, "w": weight_shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x=x_np, w=w_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2, 2), (4, 5))
verify((2, 3, 4), (4, 5))
def test_forward_smooth_l1():
data = mx.sym.var('data')
mx_sym = mx.sym.smooth_l1(data)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
mx_sym = mx.sym.smooth_l1(data, scalar=1.0)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
def test_forward_take():
def verify(shape, indices_src, axis, mode="clip"):
x_np = np.random.uniform(size=shape).astype("float32")
indices_np = np.array(indices_src, dtype="float32")
ref_res = mx.nd.take(mx.nd.array(x_np), mx.nd.array(indices_np), axis, mode)
mx_sym = mx.sym.take(mx.sym.var("x"), mx.sym.var("y"), axis, mode)
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape, "y": indices_np.shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x_np, indices_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2,2), [[[1,0],[0,1]]], 0)
verify((2,2), [[[1,0],[0,1]]], 1)
verify((4,3,5,6), [[2,1,0,0]], -2)
verify((3,4), [-1, 5], 0)
verify((3,4), [-1, 5], 0, mode="wrap")
verify((3,4), [-1, 5], 1)
verify((3,4), [-1, 5], 1, mode="wrap")
def test_forward_gather_nd():
def verify(xshape, yshape, y_data):
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = mx.nd.gather_nd(mx.nd.array(x_data), mx.nd.array(y_data))
mx_sym = mx.sym.gather_nd(mx.sym.var("x_data"), mx.sym.var("y_data"))
new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x_data": xshape, "y_data": yshape}, {"x_data": "float32", "y_data": "int32"})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
def test_forward_bilinear_resize():
# add tests including scale_height and scale_width when mxnet is updated to version 1.5
data = mx.sym.var('data')
mx_sym = mx.sym.contrib.BilinearResize2D(data, height=5, width=10)
verify_mxnet_frontend_impl(mx_sym, (1, 2, 3, 4), (1, 2, 5, 10))
if __name__ == '__main__':
test_forward_mlp()
test_forward_vgg()
test_forward_resnet()
test_forward_elu()
test_forward_rrelu()
test_forward_prelu()
test_forward_softrelu()
test_forward_fc_flatten()
test_forward_clip()
test_forward_split()
test_forward_split_squeeze()
test_forward_expand_dims()
test_forward_pooling()
test_forward_lrn()
test_forward_ones()
test_forward_zeros()
test_forward_ones_like()
test_forward_zeros_like()
test_forward_argmax()
test_forward_argmin()
test_forward_where()
test_forward_arange()
test_forward_broadcast_ops()
test_forward_elemwise_ops()
test_forward_scalar_ops()
test_forward_slice_like()
test_forward_slice_axis()
test_forward_l2_normalize()
test_forward_shape_array()
test_forward_squeeze()
test_forward_broadcast_axis()
test_forward_full()
test_forward_embedding()
test_forward_smooth_l1()
test_forward_take()
test_forward_gather_nd()
test_forward_bilinear_resize()
| 42.65642 | 134 | 0.60117 |
acf5a7a03d5956a12236594a468f2cd75a4b8b4e | 147 | py | Python | tests/chdir_test/chdir_test.py | usnistgov/CDE | 05137888a8ad67b0796814170ba61deef51bec03 | [
"BSD-3-Clause"
] | 4 | 2020-07-28T19:11:07.000Z | 2021-09-24T07:00:39.000Z | tests/chdir_test/chdir_test.py | usnistgov/CDE | 05137888a8ad67b0796814170ba61deef51bec03 | [
"BSD-3-Clause"
] | null | null | null | tests/chdir_test/chdir_test.py | usnistgov/CDE | 05137888a8ad67b0796814170ba61deef51bec03 | [
"BSD-3-Clause"
] | 2 | 2021-05-13T18:32:27.000Z | 2021-11-15T09:07:33.000Z | import os
os.chdir('/home/pgbovine/')
f = open('hello.txt', 'w')
f.write('hello')
f.close()
f = open('hello.txt', 'r')
print f.read()
f.close()
| 12.25 | 27 | 0.598639 |
acf5a981eecdee9fffa8d65da0d8b3d0d32d5558 | 17,157 | py | Python | src/rprblender/properties/view_layer.py | DagerD/RadeonProRenderBlenderAddon | 188756291a0662f85b91b61aec276794785d75bd | [
"Apache-2.0"
] | null | null | null | src/rprblender/properties/view_layer.py | DagerD/RadeonProRenderBlenderAddon | 188756291a0662f85b91b61aec276794785d75bd | [
"Apache-2.0"
] | null | null | null | src/rprblender/properties/view_layer.py | DagerD/RadeonProRenderBlenderAddon | 188756291a0662f85b91b61aec276794785d75bd | [
"Apache-2.0"
] | null | null | null | #**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import bpy
from bpy.props import (
BoolProperty,
PointerProperty,
BoolVectorProperty,
EnumProperty,
IntProperty,
FloatProperty,
)
import pyrpr
import math
from rprblender.utils import logging
from . import RPR_Properties
log = logging.Log(tag='properties.view_layer')
class RPR_ContourProperties(RPR_Properties):
""" Propoerties to do a contour pass """
# CONTOUR render mode settings
use_object_id: BoolProperty(
name="Use Object ID",
description="Use Object ID for Contour rendering",
default=True,
)
use_material_id: BoolProperty(
name="Use Material Index",
description="Use Material Index for Contour rendering",
default=True,
)
use_shading_normal: BoolProperty(
name="Use Shading Normal",
description="Use Shading Normal for Contour rendering",
default=True,
)
use_uv: BoolProperty(
name="Use UV",
description="Use UV extraction for Contour rendering",
default=True,
)
object_id_line_width: FloatProperty(
name="Line Width Object",
description="Line width for Object ID contours",
min=1.0, max=10.0,
default=1.0,
)
material_id_line_width: FloatProperty(
name="Line Width Material",
description="Line width for Material Index contours",
min=1.0, max=10.0,
default=1.0,
)
shading_normal_line_width: FloatProperty(
name="Line Width Normal",
description="Line width for Shading Normal contours",
min=1.0, max=10.0,
default=1.0,
)
uv_line_width: FloatProperty(
name="Line Width UV",
min=1.0, max=10.0,
default=1.0,
)
normal_threshold: FloatProperty(
name="Normal Threshold",
description="Threshold for normals, in degrees",
subtype='ANGLE',
min=0.0, max=math.radians(180.0),
default=math.radians(45.0),
)
uv_threshold: FloatProperty(
name="Threshold UV",
min=0.0, max=1.0,
default=1.0,
)
antialiasing: FloatProperty(
name="Antialiasing",
min=0.0, max=1.0,
default=1.0,
)
def export_contour_settings(self, rpr_context):
""" set Contour render mode parameters """
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_USE_OBJECTID, self.use_object_id)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_USE_MATERIALID, self.use_material_id)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_USE_NORMAL, self.use_shading_normal)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_USE_UV, self.use_uv)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_LINEWIDTH_OBJECTID, self.object_id_line_width)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_LINEWIDTH_MATERIALID, self.material_id_line_width)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_LINEWIDTH_NORMAL, self.shading_normal_line_width)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_LINEWIDTH_UV, self.uv_line_width)
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_NORMAL_THRESHOLD, math.degrees(self.normal_threshold))
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_UV_THRESHOLD, math.radians(self.uv_threshold * 180))
rpr_context.set_parameter(pyrpr.CONTEXT_CONTOUR_ANTIALIASING, self.antialiasing)
class RPR_DenoiserProperties(RPR_Properties):
""" Denoiser properties. This is a child property in RPR_ViewLayerProperties """
enable: BoolProperty(
description="Enable RPR Denoiser",
default=False,
)
# only enable ML denoiser on windows
items = (
('BILATERAL', "Bilateral", "Bilateral", 0),
('LWR', "Local Weighted Regression", "Local Weighted Regression", 1),
('EAW', "Edge Avoiding Wavelets", "Edge Avoiding Wavelets", 2),
('ML', "Machine Learning", "Machine Learning", 3)
)
filter_type: EnumProperty(
name="Filter Type",
items=items,
description="Filter type",
default='ML'
)
scale_by_iterations: BoolProperty(
name="Scale Denoising Iterations",
description="Scale the amount of denoiser blur by number of iterations. "
"This will give more blur for renders with less samples, "
"and become sharper as more samples are added",
default=True
)
# bilateral props
radius: IntProperty(
name="Radius",
description="Radius",
min = 1, max = 50, default = 1
)
p_sigma: FloatProperty(
name="Position Sigma",
description="Threshold for detecting position differences",
min = 0.0, soft_max = 1.0, default = .1
)
# EAW props
color_sigma: FloatProperty(
name="Color Sigma",
description="Threshold for detecting color differences",
min = 0.0, soft_max = 1.0, default = .75
)
normal_sigma: FloatProperty(
name="Normal Sigma",
description="Threshold for detecting normal differences",
min = 0.0, soft_max = 1.0, default = .01
)
depth_sigma: FloatProperty(
name="Depth Sigma",
description="Threshold for detecting z depth differences",
min = 0.0, soft_max = 1.0, default = .01
)
trans_sigma: FloatProperty(
name="ID Sigma",
description="Threshold for detecting Object ID differences",
min = 0.0, soft_max = 1.0, default = .01
)
# LWR props
samples: IntProperty(
name="Samples",
description="Number of samples used, more will give better results while being longer",
min = 2, soft_max = 10, max = 100, default = 4
)
half_window: IntProperty(
name="Filter radius",
description="The radius of pixels to sample from",
min = 1, soft_max = 10, max = 100, default = 4
)
bandwidth: FloatProperty(
name="Bandwidth",
description="Bandwidth of the filter, a samller value gives less noise, but may filter image detail",
min = 0.1, max = 1.0, default = .2
)
# ML props
ml_color_only: BoolProperty(
name="Use Color AOV only",
description="Use Color AOV only instead of using additional required AOVs",
default=False
)
ml_use_fp16_compute_type: BoolProperty(
name="Use 16-bit Compute",
description="Reduce precision to 16 bit. It uses less memory generally for similar quality.\n"
"Available only for viewport render",
default=True
)
def get_settings(self, scene, is_final_engine=True):
return {
'enable': self.enable and self.is_available(scene, is_final_engine),
'filter_type': self.filter_type,
'color_sigma': self.color_sigma,
'normal_sigma': self.normal_sigma,
'p_sigma': self.p_sigma,
'depth_sigma': self.depth_sigma,
'trans_sigma': self.trans_sigma,
'radius': self.radius,
'samples': self.samples,
'half_window': self.half_window,
'bandwidth': self.bandwidth,
'ml_color_only': self.ml_color_only,
'ml_use_fp16_compute_type': self.ml_use_fp16_compute_type,
}
def is_available(self, scene, is_final_engine=True):
return True
class RPR_ViewLayerProperites(RPR_Properties):
"""
Properties for view layer with AOVs
"""
aovs_info = (
{
'rpr': pyrpr.AOV_COLOR,
'name': "Combined",
'channel': 'RGBA'
},
{
'rpr': pyrpr.AOV_DEPTH,
'name': "Depth",
'channel': 'Z'
},
{
'rpr': pyrpr.AOV_COLOR,
'name': "Color",
'channel': 'RGBA'
},
{
'rpr': pyrpr.AOV_UV,
'name': "UV",
'channel': 'UVA'
},
{
'rpr': pyrpr.AOV_OBJECT_ID,
'name': "Object Index",
'channel': 'X'
},
{
'rpr': pyrpr.AOV_MATERIAL_ID,
'name': "Material Index",
'channel': 'X'
},
{
'rpr': pyrpr.AOV_WORLD_COORDINATE,
'name': "World Coordinate",
'channel': 'XYZ'
},
{
'rpr': pyrpr.AOV_GEOMETRIC_NORMAL,
'name': "Geometric Normal",
'channel': 'XYZ'
},
{
'rpr': pyrpr.AOV_SHADING_NORMAL,
'name': "Shading Normal",
'channel': 'XYZ'
},
{
'rpr': pyrpr.AOV_CAMERA_NORMAL,
'name': "Camera Normal",
'channel': 'XYZ'
},
{
'rpr': pyrpr.AOV_OBJECT_GROUP_ID,
'name': "Group Index",
'channel': 'X'
},
{
'rpr': pyrpr.AOV_SHADOW_CATCHER,
'name': "Shadow Catcher",
'channel': 'A'
},
{
'rpr': pyrpr.AOV_REFLECTION_CATCHER,
'name': "Reflection Catcher",
'channel': 'A'
},
{
'rpr': pyrpr.AOV_BACKGROUND,
'name': "Background",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_EMISSION,
'name': "Emission",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_VELOCITY,
'name': "Velocity",
'channel': 'XYZ'
},
{
'rpr': pyrpr.AOV_DIRECT_ILLUMINATION,
'name': "Direct Illumination",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_INDIRECT_ILLUMINATION,
'name': "Indirect Illumination",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_AO,
'name': "Ambient Occlusion",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_DIRECT_DIFFUSE,
'name': "Direct Diffuse",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_DIRECT_REFLECT,
'name': "Direct Reflect",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_INDIRECT_DIFFUSE,
'name': "Indirect Diffuse",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_INDIRECT_REFLECT,
'name': "Indirect Reflect",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_REFRACT,
'name': "Refraction",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_VOLUME,
'name': "Volume",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_OPACITY,
'name': "Opacity",
'channel': 'A'
},
{
'rpr': pyrpr.AOV_LIGHT_GROUP0,
'name': "Light Group 1",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_LIGHT_GROUP1,
'name': "Light Group 2",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_LIGHT_GROUP2,
'name': "Light Group 3",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_LIGHT_GROUP3,
'name': "Light Group 4",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_VARIANCE,
'name': "Color Variance",
'channel': 'RGB'
},
{
'rpr': pyrpr.AOV_DIFFUSE_ALBEDO,
'name': "Diffuse Albedo",
'channel': 'RGB'
},
)
# we went over 32 aovs so these must be separated
cryptomatte_aovs_info = (
{
'rpr': pyrpr.AOV_CRYPTOMATTE_MAT0,
'name': "Cryptomatte Mat0",
'channel': 'RGBA'
},
{
'rpr': pyrpr.AOV_CRYPTOMATTE_MAT1,
'name': "Cryptomatte Mat1",
'channel': 'RGBA'
},
{
'rpr': pyrpr.AOV_CRYPTOMATTE_MAT2,
'name': "Cryptomatte Mat2",
'channel': 'RGBA'
},
{
'rpr': pyrpr.AOV_CRYPTOMATTE_OBJ0,
'name': "Cryptomatte Obj0",
'channel': 'RGBA'
},
{
'rpr': pyrpr.AOV_CRYPTOMATTE_OBJ1,
'name': "Cryptomatte Obj1",
'channel': 'RGBA'
},
{
'rpr': pyrpr.AOV_CRYPTOMATTE_OBJ2,
'name': "Cryptomatte Obj2",
'channel': 'RGBA'
},
)
contour_info = {
'rpr': pyrpr.AOV_COLOR,
'name': "Outline",
'channel': 'RGBA'
}
def aov_enabled_changed(self, context):
""" Request update of active render passes for Render Layers compositor input node """
context.view_layer.update_render_passes()
enable_aovs: BoolVectorProperty(
name="Render Passes (AOVs)",
description="Render passes (Arbitrary output variables)",
size=len(aovs_info),
default=tuple(aov['name'] in ["Combined", "Depth"] for aov in aovs_info),
update=aov_enabled_changed,
)
crytomatte_aov_object: BoolProperty(
name="Cryptomatte Object AOVs",
description="Enable Object Cryptomatte AOVs",
default=False,
update=aov_enabled_changed,
)
crytomatte_aov_material: BoolProperty(
name="Cryptomatte Material AOVs",
description="Enable Material Cryptomatte AOVs",
default=False,
update=aov_enabled_changed,
)
# TODO: Probably better to create each aov separately like: aov_depth: BoolProperty(...)
denoiser: PointerProperty(type=RPR_DenoiserProperties)
use_contour_render: BoolProperty(
name="Contour",
description="Use Contour rendering mode. Final render only",
default=False,
update=aov_enabled_changed,
)
contour: PointerProperty(type=RPR_ContourProperties)
def export_aovs(self, view_layer: bpy.types.ViewLayer, rpr_context, rpr_engine, enable_adaptive, cryptomatte_allowed):
"""
Exports AOVs settings. Also adds required passes to rpr_engine
Note: view_layer here is parent of self, but it is not available from self.id_data
"""
log(f"Syncing view layer: {view_layer.name}")
# should always be enabled
rpr_context.enable_aov(pyrpr.AOV_COLOR)
rpr_context.enable_aov(pyrpr.AOV_DEPTH)
for i, enable_aov in enumerate(self.enable_aovs):
if not enable_aov:
continue
aov = self.aovs_info[i]
if aov['rpr'] == pyrpr.AOV_VARIANCE and not enable_adaptive:
continue
if aov['name'] not in ["Combined", "Depth"]:
# TODO this seems to assume that combine and depth enabled already?
rpr_engine.add_pass(aov['name'], len(aov['channel']), aov['channel'], layer=view_layer.name)
rpr_context.enable_aov(aov['rpr'])
if cryptomatte_allowed:
if self.crytomatte_aov_material:
for i in range(3):
aov = self.cryptomatte_aovs_info[i]
rpr_engine.add_pass(aov['name'], len(aov['channel']), aov['channel'], layer=view_layer.name)
rpr_context.enable_aov(aov['rpr'])
if self.crytomatte_aov_object:
for i in range(3, 6):
aov = self.cryptomatte_aovs_info[i]
rpr_engine.add_pass(aov['name'], len(aov['channel']), aov['channel'], layer=view_layer.name)
rpr_context.enable_aov(aov['rpr'])
if self.use_contour_render:
aov = self.contour_info
rpr_engine.add_pass(aov['name'], len(aov['channel']), aov['channel'], layer=view_layer.name)
def enable_aov_by_name(self, name):
''' Enables a give aov name '''
for i, aov_info in enumerate(self.aovs_info):
if aov_info['name'] == name:
self.enable_aovs[i] = True
return
@classmethod
def register(cls):
log("Register")
bpy.types.ViewLayer.rpr = PointerProperty(
name="RPR ViewLayer Settings",
description="RPR view layer settings",
type=cls,
)
@classmethod
def unregister(cls):
log("Unregister")
del bpy.types.ViewLayer.rpr
| 31.137931 | 122 | 0.560296 |
acf5aa7e0a88095ab885b090eec3c84fa73ad9ed | 2,677 | py | Python | src/snowflake/connector/time_util.py | stevebeck89/snowflake-connector-python | 739f9c2604116e1060268c437c1696c4e38862e2 | [
"Apache-2.0"
] | null | null | null | src/snowflake/connector/time_util.py | stevebeck89/snowflake-connector-python | 739f9c2604116e1060268c437c1696c4e38862e2 | [
"Apache-2.0"
] | null | null | null | src/snowflake/connector/time_util.py | stevebeck89/snowflake-connector-python | 739f9c2604116e1060268c437c1696c4e38862e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import random
import time
from logging import getLogger
from typing import Any, Callable, Optional
logger = getLogger(__name__)
try:
from threading import _Timer as Timer
except ImportError:
from threading import Timer
DEFAULT_MASTER_VALIDITY_IN_SECONDS = 4 * 60 * 60 # seconds
class HeartBeatTimer(Timer):
"""A thread which executes a function every client_session_keep_alive_heartbeat_frequency seconds."""
def __init__(
self, client_session_keep_alive_heartbeat_frequency: int, f: Callable
) -> None:
interval = client_session_keep_alive_heartbeat_frequency
super(HeartBeatTimer, self).__init__(interval, f)
# Mark this as a daemon thread, so that it won't prevent Python from exiting.
self.daemon = True
def run(self) -> None:
while not self.finished.is_set():
self.finished.wait(self.interval)
if not self.finished.is_set():
try:
self.function()
except Exception as e:
logger.debug("failed to heartbeat: %s", e)
def get_time_millis() -> int:
"""Returns the current time in milliseconds."""
return int(time.time() * 1000)
class DecorrelateJitterBackoff:
# Decorrelate Jitter backoff
# https://www.awsarchitectureblog.com/2015/03/backoff.html
def __init__(self, base: int, cap: int) -> None:
self._base = base
self._cap = cap
def next_sleep(self, _: Any, sleep: int) -> int:
return min(self._cap, random.randint(self._base, sleep * 3))
class TimerContextManager:
"""Context manager class to easily measure execution of a code block.
Once the context manager finishes, the class should be cast into an int to retrieve
result.
Example:
with TimerContextManager() as measured_time:
pass
download_metric = measured_time.get_timing_millis()
"""
def __init__(self) -> None:
self._start: Optional[int] = None
self._end: Optional[int] = None
def __enter__(self) -> "TimerContextManager":
self._start = get_time_millis()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self._end = get_time_millis()
def get_timing_millis(self) -> int:
"""Get measured timing in milliseconds."""
if self._start is None or self._end is None:
raise Exception(
"Trying to get timing before TimerContextManager has finished"
)
return self._end - self._start
| 29.744444 | 105 | 0.655211 |
acf5aab091d2d5742b9069228c32fb10505793de | 1,351 | py | Python | src/image_quality/bin/train_koniq_mos.py | junyongyou/phiqnet | f0e10299eed47390cec79af68a2aade4f64bc5f2 | [
"MIT"
] | 3 | 2021-01-23T06:01:32.000Z | 2021-07-29T03:30:17.000Z | src/image_quality/bin/train_koniq_mos.py | junyongyou/aihiqnet | 9b23be61c43332b107065cb26fbb8ec28c006a9f | [
"MIT"
] | 1 | 2021-01-07T09:16:31.000Z | 2021-01-07T12:13:13.000Z | src/image_quality/bin/train_koniq_mos.py | junyongyou/phiqnet | f0e10299eed47390cec79af68a2aade4f64bc5f2 | [
"MIT"
] | 1 | 2022-02-05T03:19:34.000Z | 2022-02-05T03:19:34.000Z | from image_quality.train.train import train_main
# def main():
if __name__ == '__main__':
args = {}
args['multi_gpu'] = 1
args['gpu'] = 0
args['result_folder'] = r'..\databases\results\phiqnet_koniq_mos'
args['n_quality_levels'] = 1
# Choose between 'resnet50', 'densnet121', 'vgg16'
args['backbone'] = 'resnet50'
# args['backbone'] = 'densnet121'
# args['backbone'] = 'vgg16'
# Choose between False and True, default: False
args['naive_backbone'] = False
# Image and score must be provided
args['images_scores_file'] = r'..\databases\train_val_test_koniq.pkl'
args['image_folder'] = r'..\databases\koniq_all'
args['initial_epoch'] = 0
args['lr_base'] = 1e-4/2
args['lr_schedule'] = True
args['batch_size'] = 8
args['epochs'] = 100
args['feature_fusion'] = True
args['attention_module'] = True
args['image_aug'] = True
# Depending on which backbone is used, choose the corresponding ImageNet pretrained weights file, set to None is no pretrained weights to be used.
# args['weights'] = r'..\pretrained_weights\vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
args['weights'] = r'..\pretrained_weights\resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
# args['weights'] = None
args['do_finetune'] = True
train_main(args)
| 30.022222 | 150 | 0.671355 |
acf5ab35075e870cb80e39bb6872d7df32278080 | 1,322 | py | Python | questions/linked-list-random-node/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 141 | 2017-12-12T21:45:53.000Z | 2022-03-25T07:03:39.000Z | questions/linked-list-random-node/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 32 | 2015-10-05T14:09:52.000Z | 2021-05-30T10:28:41.000Z | questions/linked-list-random-node/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 56 | 2015-09-30T05:23:28.000Z | 2022-03-08T07:57:11.000Z | """
Given a singly linked list, return a random node's value from the linked list. Each node must have the same probability of being chosen.
Example 1:
Input
["Solution", "getRandom", "getRandom", "getRandom", "getRandom", "getRandom"]
[[[1, 2, 3]], [], [], [], [], []]
Output
[null, 1, 3, 2, 2, 3]
Explanation
Solution solution = new Solution([1, 2, 3]);
solution.getRandom(); // return 1
solution.getRandom(); // return 3
solution.getRandom(); // return 2
solution.getRandom(); // return 2
solution.getRandom(); // return 3
// getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.
Constraints:
The number of nodes in the linked list will be in the range [1, 104]
-104 <= Node.val <= 104
At most 104 calls will be made to getRandom.
Follow up:
What if the linked list is extremely large and its length is unknown to you?
Could you solve this efficiently without using extra space?
"""
class Solution:
def __init__(self, head):
self.head = head
def getRandom(self):
n, k = 1, 1
head, ans = self.head, self.head
while head.next:
n += 1
head = head.next
if random.random() < k/n:
ans = ans.next
k += 1
return ans.val | 24.943396 | 136 | 0.624054 |
acf5ac21064c1a48d6df8667c47f8e308812001d | 3,228 | py | Python | tests/test_protocol.py | berezovskyi/python-o365 | aaad538b729e9e30c43300768d99e5ec0dec5e45 | [
"Apache-2.0"
] | 1 | 2019-11-10T18:59:06.000Z | 2019-11-10T18:59:06.000Z | tests/test_protocol.py | berezovskyi/python-o365 | aaad538b729e9e30c43300768d99e5ec0dec5e45 | [
"Apache-2.0"
] | 2 | 2019-11-11T16:43:00.000Z | 2019-11-12T03:56:52.000Z | tests/test_protocol.py | berezovskyi/python-o365 | aaad538b729e9e30c43300768d99e5ec0dec5e45 | [
"Apache-2.0"
] | 1 | 2022-02-04T19:38:42.000Z | 2022-02-04T19:38:42.000Z | import pytest
import json
from pytz import UnknownTimeZoneError
from tzlocal import get_localzone
from O365.connection import Connection, Protocol, MSGraphProtocol, MSOffice365Protocol, DEFAULT_SCOPES
TEST_SCOPES = ['Contacts.Read.Shared', 'Mail.Send.Shared', 'User.Read', 'Contacts.ReadWrite.Shared', 'Mail.ReadWrite.Shared', 'Mail.Read.Shared', 'Contacts.Read', 'Sites.ReadWrite.All', 'Mail.Send', 'Mail.ReadWrite', 'offline_access', 'Mail.Read', 'Contacts.ReadWrite', 'Files.ReadWrite.All', 'Calendars.ReadWrite', 'User.ReadBasic.All']
class TestProtocol:
def setup_class(self):
self.proto = Protocol(protocol_url="testing", api_version="0.0")
def teardown_class(self):
pass
def test_blank_protocol(self):
with pytest.raises(ValueError):
p = Protocol()
def test_to_api_case(self):
assert(self.proto.to_api_case("CaseTest") == "case_test")
def test_get_scopes_for(self):
with pytest.raises(ValueError):
self.proto.get_scopes_for(123) # should error sicne it's not a list or tuple.
assert(self.proto.get_scopes_for(['mailbox']) == ['mailbox'])
assert(self.proto.get_scopes_for(None) == [])
assert(self.proto.get_scopes_for('mailbox') == ['mailbox'])
self.proto._oauth_scopes = DEFAULT_SCOPES
assert(self.proto.get_scopes_for(['mailbox']) == ['Mail.Read'])
# This test verifies that the scopes in the default list don't change
#without us noticing. It makes sure that all the scopes we get back are
#in the current set of scopes we expect. And all the scopes that we are
#expecting are in the scopes we are getting back. The list contains the
#same stuff but may not be in the same order and are therefore not equal
scopes = self.proto.get_scopes_for(None)
for scope in scopes:
assert(scope in TEST_SCOPES)
for scope in TEST_SCOPES:
assert(scope in scopes)
assert(self.proto.get_scopes_for('mailbox') == ['Mail.Read'])
def test_prefix_scope(self):
assert(self.proto.prefix_scope('Mail.Read') == 'Mail.Read')
assert(self.proto.prefix_scope(('Mail.Read',)) == 'Mail.Read')
self.proto.protocol_scope_prefix = 'test_prefix_'
assert(self.proto.prefix_scope(('Mail.Read',)) == 'Mail.Read')
assert(self.proto.prefix_scope('test_prefix_Mail.Read') == 'test_prefix_Mail.Read')
assert(self.proto.prefix_scope('Mail.Read') == 'test_prefix_Mail.Read')
def test_decendant_MSOffice365Protocol(self):
# Basically we just test that it can create the class w/o erroring.
msp = MSOffice365Protocol()
# Make sure these don't change without going noticed.
assert(msp.keyword_data_store['message_type'] == 'Microsoft.OutlookServices.Message')
assert(msp.keyword_data_store['file_attachment_type'] == '#Microsoft.OutlookServices.FileAttachment')
assert(msp.keyword_data_store['item_attachment_type'] == '#Microsoft.OutlookServices.ItemAttachment')
assert(msp.max_top_value == 999)
| 43.04 | 337 | 0.664808 |
acf5acad9ee8e1cf6270109e6b8b0b173b600b55 | 1,768 | py | Python | apps/base/views/activate.py | picsldev/pyerp | e998e3e99a4e45033d54a6b1df50697f7288f67f | [
"MIT"
] | null | null | null | apps/base/views/activate.py | picsldev/pyerp | e998e3e99a4e45033d54a6b1df50697f7288f67f | [
"MIT"
] | null | null | null | apps/base/views/activate.py | picsldev/pyerp | e998e3e99a4e45033d54a6b1df50697f7288f67f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Vistas de la aplicación globales
"""
# Librerias Django
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.utils.translation import ugettext_lazy as _
from django.views.generic import RedirectView
# Librerias en carpetas locales
from ..models import PyUser
from ..tokens import ACCOUNT_ACTIVATION_TOKEN
# ========================================================================== #
class ActivateView(RedirectView):
"""Esta clase activa a la persona cuando confirma el link enviado desde
su correo
"""
url = 'base:login'
def get(self, request, *args, **kwargs):
uidb64 = self.kwargs['uidb64']
token = self.kwargs['token']
url = self.get_redirect_url(*args, **kwargs)
uid = force_text(urlsafe_base64_decode(uidb64))
try:
user = PyUser.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, PyUser.DoesNotExist):
user = None
token_valid = ACCOUNT_ACTIVATION_TOKEN.check_token(user, token)
if user is not None and token_valid:
user.is_active = True
user.save()
messages.success(
self.request,
_('Welcome, your account has been successfully activated. Please log in using your credentials.')
)
else:
messages.error(
self.request,
_('The sign up confirm link is invalid. If your account is not yet active, use the password recovery link.')
)
return HttpResponseRedirect(reverse_lazy(url))
| 32.740741 | 124 | 0.634615 |
acf5accfc70065bd9141f1e7525797d4c2f0fa7b | 36 | py | Python | src/python/grabcut/__init__.py | luiscarlosgph/grabcut | 78344bb039d921f0510344f1ab8fab85e71728f1 | [
"MIT"
] | 4 | 2021-02-12T09:56:19.000Z | 2021-02-23T22:25:20.000Z | src/python/grabcut/__init__.py | luiscarlosgph/grabcut | 78344bb039d921f0510344f1ab8fab85e71728f1 | [
"MIT"
] | 1 | 2021-02-14T18:35:40.000Z | 2021-02-15T13:55:00.000Z | src/python/grabcut/__init__.py | luiscarlosgph/grabcut | 78344bb039d921f0510344f1ab8fab85e71728f1 | [
"MIT"
] | null | null | null | from grabcut.grabcut import GrabCut
| 18 | 35 | 0.861111 |
acf5adc6619414d28429862c87fab944b15fa595 | 9,934 | py | Python | data/archive/download_rh_sigma995.py | Skye777/transformer | 177834bcb55e59f8ea0fbe666734c148effbec8d | [
"Apache-2.0"
] | null | null | null | data/archive/download_rh_sigma995.py | Skye777/transformer | 177834bcb55e59f8ea0fbe666734c148effbec8d | [
"Apache-2.0"
] | null | null | null | data/archive/download_rh_sigma995.py | Skye777/transformer | 177834bcb55e59f8ea0fbe666734c148effbec8d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#################################################################
# Python Script to retrieve 164 online Data files of 'ds131.2',
# total 3.46G. This script uses 'requests' to download data.
#
# Highlight this script by Select All, Copy and Paste it into a file;
# make the file executable and run it on command line.
#
# You need pass in your password as a parameter to execute
# this script; or you can set an environment variable RDAPSWD
# if your Operating System supports it.
#
# Contact rpconroy@ucar.edu (Riley Conroy) for further assistance.
#################################################################
import sys, os
import requests
def check_file_status(filepath, filesize):
sys.stdout.write('\r')
sys.stdout.flush()
size = int(os.stat(filepath).st_size)
percent_complete = (size / filesize) * 100
sys.stdout.write('%.3f %s' % (percent_complete, '% Completed'))
sys.stdout.flush()
# Try to get password
if len(sys.argv) < 2 and not 'RDAPSWD' in os.environ:
try:
import getpass
input = getpass.getpass
except:
try:
input = raw_input
except:
pass
pswd = input('Password: ')
else:
try:
pswd = sys.argv[1]
except:
pswd = os.environ['RDAPSWD']
url = 'https://rda.ucar.edu/cgi-bin/login'
values = {'email': '1811017@tongji.edu.cn', 'passwd': pswd, 'action': 'login'}
# Authenticate
ret = requests.post(url, data=values)
if ret.status_code != 200:
print('Bad Authentication')
print(ret.text)
exit(1)
dspath = 'https://rda.ucar.edu/data/ds131.2/'
filelist = [
'pgrbanl/pgrbanl_mean_1851_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1852_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1853_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1854_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1855_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1856_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1857_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1858_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1859_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1860_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1861_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1862_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1863_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1864_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1865_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1866_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1867_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1868_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1869_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1870_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1871_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1872_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1873_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1874_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1875_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1876_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1877_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1878_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1879_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1880_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1881_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1882_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1883_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1884_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1885_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1886_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1887_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1888_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1889_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1890_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1891_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1892_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1893_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1894_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1895_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1896_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1897_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1898_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1899_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1900_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1901_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1902_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1903_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1904_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1905_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1906_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1907_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1908_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1909_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1910_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1911_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1912_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1913_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1914_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1915_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1916_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1917_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1918_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1919_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1920_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1921_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1922_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1923_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1924_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1925_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1926_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1927_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1928_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1929_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1930_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1931_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1932_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1933_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1934_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1935_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1936_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1937_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1938_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1939_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1940_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1941_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1942_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1943_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1944_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1945_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1946_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1947_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1948_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1949_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1950_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1951_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1952_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1953_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1954_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1955_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1956_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1957_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1958_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1959_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1960_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1961_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1962_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1963_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1964_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1965_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1966_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1967_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1968_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1969_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1970_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1971_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1972_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1973_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1974_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1975_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1976_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1977_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1978_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1979_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1980_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1981_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1982_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1983_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1984_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1985_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1986_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1987_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1988_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1989_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1990_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1991_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1992_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1993_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1994_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1995_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1996_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1997_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1998_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_1999_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2000_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2001_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2002_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2003_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2004_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2005_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2006_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2007_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2008_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2009_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2010_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2011_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2012_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2013_RH_sigma.grib',
'pgrbanl/pgrbanl_mean_2014_RH_sigma.grib']
for file in filelist:
filename = dspath + file
file_base = '../meta-data/rh/' + os.path.basename(file)
print('Downloading', file_base)
req = requests.get(filename, cookies=ret.cookies, allow_redirects=True, stream=True)
filesize = int(req.headers['Content-length'])
with open(file_base, 'wb') as outfile:
chunk_size = 1048576
for chunk in req.iter_content(chunk_size=chunk_size):
outfile.write(chunk)
if chunk_size < filesize:
check_file_status(file_base, filesize)
check_file_status(file_base, filesize)
print()
| 42.09322 | 88 | 0.750252 |
acf5af0c79cb72102425f61cdab9601ba3c7f240 | 1,729 | py | Python | paccmann_generator/drug_evaluators/aromatic_ring.py | PaccMann/paccmann_generator | 135cce12f29fcb5d8665fe99a64a5176067d2ab2 | [
"MIT"
] | 5 | 2019-11-04T20:33:37.000Z | 2022-01-09T11:12:35.000Z | paccmann_generator/drug_evaluators/aromatic_ring.py | PaccMann/paccmann_generator | 135cce12f29fcb5d8665fe99a64a5176067d2ab2 | [
"MIT"
] | 5 | 2020-04-01T08:16:26.000Z | 2020-11-03T08:44:51.000Z | paccmann_generator/drug_evaluators/aromatic_ring.py | PaccMann/paccmann_generator | 135cce12f29fcb5d8665fe99a64a5176067d2ab2 | [
"MIT"
] | 3 | 2020-09-08T14:05:56.000Z | 2021-06-07T18:57:39.000Z | """Aromatic Ring evaluator."""
import logging
import rdkit
from rdkit import Chem
from .drug_evaluator import DrugEvaluator
logger = logging.getLogger(__name__)
class AromaticRing(DrugEvaluator):
"""
Evaluation class that assesses whether a molecule has an aromatic ring.
99% of drugs have at least one aromatic ring:
Roughley, Stephen D., and Allan M. Jordan. "The medicinal chemist’s
toolbox: an analysis of reactions used in the pursuit of drug
candidates." Journal of medicinal chemistry 54.10 (2011): 3451-3479.
"""
def __init__(self):
super(AromaticRing, self).__init__()
def __call__(self, mol):
"""
Returns 1 if mol has at least one aromatic ring and 0 otherwise.
Args:
mol - Union[str, rdkit.Chem.rdchem.Mol]: SMILES or RdKit molecule.
Returns:
float - 1. if aromatic ring was found, 0 else.
"""
# Error handling.
if type(mol) == rdkit.Chem.rdchem.Mol:
pass
elif type(mol) == str:
mol = Chem.MolFromSmiles(mol, sanitize=True)
if mol is None:
raise ValueError("Invalid SMILES string.")
else:
raise TypeError("Input must be from {str, rdkit.Chem.rdchem.Mol}")
try:
has_ring = False
for ring in mol.GetRingInfo().AtomRings():
if any([mol.GetAtomWithIdx(a).GetIsAromatic() for a in ring]):
has_ring = True
if has_ring:
break
return 1. if has_ring else 0.
except Exception:
logger.warn(f'Error in computing ring information for {mol}')
return 0.
| 28.816667 | 78 | 0.591093 |
acf5af130fc567ccb94d004be92bfc73e214f42e | 604 | py | Python | nima/emd_loss.py | ankerok1/nima.pytorch | bbdbeeb8c22d880205a4fa35cfc2a533d064ee5d | [
"MIT"
] | 300 | 2018-03-13T19:54:17.000Z | 2022-03-29T03:39:21.000Z | nima/emd_loss.py | wzj52501/nima.pytorch | bbdbeeb8c22d880205a4fa35cfc2a533d064ee5d | [
"MIT"
] | 25 | 2018-03-14T00:45:56.000Z | 2021-08-16T13:14:16.000Z | nima/emd_loss.py | wzj52501/nima.pytorch | bbdbeeb8c22d880205a4fa35cfc2a533d064ee5d | [
"MIT"
] | 77 | 2018-03-14T08:07:56.000Z | 2022-03-28T10:58:30.000Z | import torch
import torch.nn as nn
class EDMLoss(nn.Module):
def __init__(self):
super(EDMLoss, self).__init__()
def forward(self, p_target: torch.Tensor, p_estimate: torch.Tensor):
assert p_target.shape == p_estimate.shape
# cdf for values [1, 2, ..., 10]
cdf_target = torch.cumsum(p_target, dim=1)
# cdf for values [1, 2, ..., 10]
cdf_estimate = torch.cumsum(p_estimate, dim=1)
cdf_diff = cdf_estimate - cdf_target
samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff), 2)))
return samplewise_emd.mean()
| 33.555556 | 82 | 0.642384 |
acf5b3864f4f048de16dae42929f053e89f54447 | 2,395 | py | Python | src/models/dropouts.py | Valentyn1997/semi-supervised-certain-pseudo-labeling | 3e1d9999ae6e0f0ffa2dc38ec7ca6afd7c4e22d8 | [
"MIT"
] | 1 | 2021-09-23T19:20:25.000Z | 2021-09-23T19:20:25.000Z | src/models/dropouts.py | Valentyn1997/semi-supervised-certain-pseudo-labeling | 3e1d9999ae6e0f0ffa2dc38ec7ca6afd7c4e22d8 | [
"MIT"
] | null | null | null | src/models/dropouts.py | Valentyn1997/semi-supervised-certain-pseudo-labeling | 3e1d9999ae6e0f0ffa2dc38ec7ca6afd7c4e22d8 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class WeightDropConv2d(nn.Conv2d):
"""
Reimplementing baal version of WeightDropConv2d, because it doesn't support multi-GPU training
For details, see https://github.com/pytorch/pytorch/issues/8637
"""
def __init__(self, weight_dropout=0.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight_dropout = weight_dropout
def forward(self, input: torch.Tensor) -> torch.Tensor:
dropped_weight = torch.nn.functional.dropout(self.weight, p=self.weight_dropout, training=self.training)
if self.bias is not None:
dropped_bias = torch.nn.functional.dropout(self.bias, p=self.weight_dropout, training=self.training)
return self._conv_forward(input, dropped_weight, dropped_bias)
else:
return self._conv_forward(input, dropped_weight, self.bias)
def _conv_forward(self, input, weight, bias):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, bias, self.stride, _pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
class WeightDropLinear(nn.Linear):
"""
Reimplementing baal version of WeightDropLinear, because it doesn't support multi-GPU training
For details, see https://github.com/pytorch/pytorch/issues/8637
"""
def __init__(self, weight_dropout=0.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight_dropout = weight_dropout
def forward(self, input: torch.Tensor) -> torch.Tensor:
dropped_weight = torch.nn.functional.dropout(self.weight, p=self.weight_dropout, training=self.training)
dropped_bias = torch.nn.functional.dropout(self.bias, p=self.weight_dropout, training=self.training)
return F.linear(input, dropped_weight, dropped_bias)
def uniform_dropout(input, p=0.5, training=False, inplace=False):
beta = p
assert 0.0 < beta <= 1.0
if training:
out = input * (1.0 + torch.empty(input.shape, device=input.device).uniform_(-beta, beta))
if inplace:
raise NotImplementedError()
return out
else:
return input | 42.767857 | 112 | 0.688935 |
acf5b3f4d47a0ab7aa054ad1eddfb148f2380370 | 1,632 | py | Python | tests/unit-tests/test_sphinx_manpage.py | BenGale93/confluencebuilder | 93556f974ac482a8d21e95a686fee397d35ed7cd | [
"BSD-2-Clause"
] | null | null | null | tests/unit-tests/test_sphinx_manpage.py | BenGale93/confluencebuilder | 93556f974ac482a8d21e95a686fee397d35ed7cd | [
"BSD-2-Clause"
] | null | null | null | tests/unit-tests/test_sphinx_manpage.py | BenGale93/confluencebuilder | 93556f974ac482a8d21e95a686fee397d35ed7cd | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2016-2021 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from tests.lib import build_sphinx
from tests.lib import parse
from tests.lib import prepare_conf
import os
import unittest
class TestConfluenceSphinxManpage(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = prepare_conf()
test_dir = os.path.dirname(os.path.realpath(__file__))
cls.dataset = os.path.join(test_dir, 'datasets', 'common')
cls.filenames = [
'manpage',
]
def test_storage_sphinx_manpage_config(self):
config = dict(self.config)
config['manpages_url'] = 'https://manpages.example.com/{path}'
out_dir = build_sphinx(self.dataset, config=config,
filenames=self.filenames)
with parse('manpage', out_dir) as data:
em = data.find('em')
self.assertIsNotNone(em)
link = em.find('a', recursive=False)
self.assertIsNotNone(link)
self.assertTrue(link.has_attr('href'))
self.assertEqual(link['href'], 'https://manpages.example.com/ls(1)')
self.assertEqual(link.text, 'ls(1)')
def test_storage_sphinx_manpage_noconfig(self):
out_dir = build_sphinx(self.dataset, config=self.config,
filenames=self.filenames)
with parse('manpage', out_dir) as data:
em = data.find('em')
self.assertIsNotNone(em)
self.assertEqual(em.text, 'ls(1)')
link = data.find('a')
self.assertIsNone(link)
| 31.384615 | 80 | 0.626225 |
acf5b40471bff5b41a2a87096fbda7e038ce070b | 1,065 | py | Python | src/pyrfc/__init__.py | pendantautomation/PyRFC | 4097f19e42bfede0a47607d5e09928d06d177bc4 | [
"Apache-2.0"
] | null | null | null | src/pyrfc/__init__.py | pendantautomation/PyRFC | 4097f19e42bfede0a47607d5e09928d06d177bc4 | [
"Apache-2.0"
] | null | null | null | src/pyrfc/__init__.py | pendantautomation/PyRFC | 4097f19e42bfede0a47607d5e09928d06d177bc4 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2013 SAP SE Srdjan Boskovic <srdjan.boskovic@sap.com>
#
# SPDX-License-Identifier: Apache-2.0
# import from internal modules that they could be directly imported from
# the pyrfc package
# Set DLL path, due to https://docs.python.org/3.8/whatsnew/3.8.html#bpo-36085-whatsnew
import os
if os.name == "nt":
try:
os.add_dll_directory(os.path.join(os.environ["SAPNWRFC_HOME"], "lib"))
except Exception:
pass
from ._exception import (
RFCError,
RFCLibError,
CommunicationError,
LogonError,
ABAPApplicationError,
ABAPRuntimeError,
ExternalAuthorizationError,
ExternalApplicationError,
ExternalRuntimeError,
)
from .pyrfc import (
get_nwrfclib_version,
set_ini_file_directory,
Connection,
Throughput,
TypeDescription,
FunctionDescription,
Server,
ConnectionParameters,
__VERSION__,
)
__author__: str = """"Srdjan Boskovic"""
__email__: str = "srdjan.boskovic@sap.com"
__version__: str = __VERSION__
| 23.666667 | 88 | 0.687324 |
acf5b490a03052e6bf872688628489b80ee1e333 | 18,269 | py | Python | mozillians/users/admin.py | justinpotts/mozillians | efa5cbdfe4992d2ba1c1d85bfbb5b09b2215cc44 | [
"BSD-3-Clause"
] | null | null | null | mozillians/users/admin.py | justinpotts/mozillians | efa5cbdfe4992d2ba1c1d85bfbb5b09b2215cc44 | [
"BSD-3-Clause"
] | null | null | null | mozillians/users/admin.py | justinpotts/mozillians | efa5cbdfe4992d2ba1c1d85bfbb5b09b2215cc44 | [
"BSD-3-Clause"
] | null | null | null | from socket import error as socket_error
from django import forms
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin import SimpleListFilter
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.forms import ValidationError
from django.http import HttpResponseRedirect
import autocomplete_light
from celery.task.sets import TaskSet
from functools import update_wrapper
from import_export.admin import ExportMixin
from import_export.fields import Field
from import_export.resources import ModelResource
from sorl.thumbnail.admin import AdminImageMixin
import mozillians.users.tasks
from mozillians.common.helpers import get_datetime
from mozillians.groups.models import GroupMembership, Skill
from mozillians.users.cron import index_all_profiles
from mozillians.users.models import (PUBLIC, Language, ExternalAccount, Vouch,
UserProfile, UsernameBlacklist)
admin.site.unregister(Group)
Q_PUBLIC_PROFILES = Q()
for field in UserProfile.privacy_fields():
key = 'privacy_%s' % field
Q_PUBLIC_PROFILES |= Q(**{key: PUBLIC})
def subscribe_to_basket_action():
"""Subscribe to Basket action."""
def subscribe_to_basket(modeladmin, request, queryset):
"""Subscribe to Basket or update details of already subscribed."""
ts = [(mozillians.users.tasks.update_basket_task
.subtask(args=[userprofile.id]))
for userprofile in queryset]
TaskSet(ts).apply_async()
messages.success(request, 'Basket update started.')
subscribe_to_basket.short_description = 'Subscribe to or Update Basket'
return subscribe_to_basket
def unsubscribe_from_basket_action():
"""Unsubscribe from Basket action."""
def unsubscribe_from_basket(modeladmin, request, queryset):
"""Unsubscribe from Basket."""
ts = [(mozillians.users.tasks.unsubscribe_from_basket_task
.subtask(args=[userprofile.user.email, userprofile.basket_token]))
for userprofile in queryset]
TaskSet(ts).apply_async()
messages.success(request, 'Basket update started.')
unsubscribe_from_basket.short_description = 'Unsubscribe from Basket'
return unsubscribe_from_basket
def update_vouch_flags_action():
"""Update can_vouch, is_vouched flag action."""
def update_vouch_flags(modeladmin, request, queryset):
for profile in queryset:
vouches_received = profile.vouches_received.count()
profile.can_vouch = vouches_received >= settings.CAN_VOUCH_THRESHOLD
profile.is_vouched = vouches_received > 0
profile.save()
update_vouch_flags.short_description = 'Update vouch flags'
return update_vouch_flags
class SuperUserFilter(SimpleListFilter):
"""Admin filter for superusers."""
title = 'has access to admin interface'
parameter_name = 'superuser'
def lookups(self, request, model_admin):
return (('False', 'No'),
('True', 'Yes'))
def queryset(self, request, queryset):
if self.value() is None:
return queryset
value = self.value() == 'True'
return queryset.filter(user__is_staff=value)
class PublicProfileFilter(SimpleListFilter):
"""Admin filter for public profiles."""
title = 'public profile'
parameter_name = 'public_profile'
def lookups(self, request, model_admin):
return (('False', 'No'),
('True', 'Yes'))
def queryset(self, request, queryset):
if self.value() is None:
return queryset
if self.value() == 'True':
return queryset.filter(Q_PUBLIC_PROFILES)
return queryset.exclude(Q_PUBLIC_PROFILES)
class CompleteProfileFilter(SimpleListFilter):
"""Admin filter for complete profiles."""
title = 'complete profile'
parameter_name = 'complete_profile'
def lookups(self, request, model_admin):
return (('False', 'Incomplete'),
('True', 'Complete'))
def queryset(self, request, queryset):
if self.value() is None:
return queryset
elif self.value() == 'True':
return queryset.exclude(full_name='')
else:
return queryset.filter(full_name='')
class DateJoinedFilter(SimpleListFilter):
"""Admin filter for date joined."""
title = 'date joined'
parameter_name = 'date_joined'
def lookups(self, request, model_admin):
return map(lambda x: (str(x.year), x.year),
User.objects.datetimes('date_joined', 'year'))
def queryset(self, request, queryset):
if self.value() is None:
return queryset
else:
return queryset.filter(user__date_joined__year=self.value())
return queryset
class LastLoginFilter(SimpleListFilter):
"""Admin filter for last login."""
title = 'last login'
parameter_name = 'last_login'
def lookups(self, request, model_admin):
# Number is in days
return (('<7', 'Less than a week'),
('<30', 'Less than a month'),
('<90', 'Less than 3 months'),
('<180', 'Less than 6 months'),
('>180', 'Between 6 and 12 months'),
('>360', 'More than a year'))
def queryset(self, request, queryset):
if self.value() == '<7':
return queryset.filter(user__last_login__gte=get_datetime(-7))
elif self.value() == '<30':
return queryset.filter(user__last_login__gte=get_datetime(-30))
elif self.value() == '<90':
return queryset.filter(user__last_login__gte=get_datetime(-90))
elif self.value() == '<180':
return queryset.filter(user__last_login__gte=get_datetime(-180))
elif self.value() == '>180':
return queryset.filter(user__last_login__lt=get_datetime(-180),
user__last_login__gt=get_datetime(-360))
elif self.value() == '>360':
return queryset.filter(user__last_login__lt=get_datetime(-360))
return queryset
class AlternateEmailFilter(SimpleListFilter):
"""Admin filter for users with alternate emails."""
title = 'alternate email'
parameter_name = 'alternate_email'
def lookups(self, request, model_admin):
return(('False', 'No'), ('True', 'Yes'))
def queryset(self, request, queryset):
if self.value() is None:
return queryset
if self.value() == 'True':
return queryset.filter(externalaccount__type=ExternalAccount.TYPE_EMAIL)
return queryset.exclude(externalaccount__type=ExternalAccount.TYPE_EMAIL)
class LegacyVouchFilter(SimpleListFilter):
"""Admin filter for profiles with new or legacy vouch type."""
title = 'vouch type'
parameter_name = 'vouch_type'
def lookups(self, request, model_admin):
return (('legacy', 'Legacy'),
('new', 'New'))
def queryset(self, request, queryset):
vouched = queryset.filter(is_vouched=True)
newvouches = (Vouch.objects
.exclude(description='')
.values_list('vouchee', flat=True)
.distinct())
# Load into memory
newvouches = list(newvouches)
if self.value() == 'legacy':
return vouched.exclude(pk__in=newvouches)
elif self.value() == 'new':
return vouched.filter(pk__in=newvouches)
return queryset
class UsernameBlacklistAdmin(ExportMixin, admin.ModelAdmin):
"""UsernameBlacklist Admin."""
save_on_top = True
search_fields = ['value']
list_filter = ['is_regex']
list_display = ['value', 'is_regex']
admin.site.register(UsernameBlacklist, UsernameBlacklistAdmin)
class LanguageAdmin(ExportMixin, admin.ModelAdmin):
search_fields = ['userprofile__full_name', 'userprofile__user__email', 'code']
list_display = ['code', 'userprofile']
list_filter = ['code']
admin.site.register(Language, LanguageAdmin)
class SkillInline(admin.TabularInline):
model = Skill
extra = 1
class GroupMembershipInline(admin.TabularInline):
model = GroupMembership
extra = 1
form = autocomplete_light.modelform_factory(GroupMembership)
class LanguageInline(admin.TabularInline):
model = Language
extra = 1
class ExternalAccountInline(admin.TabularInline):
model = ExternalAccount
extra = 1
def queryset(self, request):
"""Exclude alternate emails from external accounts"""
qs = super(ExternalAccountInline, self).queryset(request)
return qs.exclude(type=ExternalAccount.TYPE_EMAIL)
class AlternateEmailForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.type = ExternalAccount.TYPE_EMAIL
return super(AlternateEmailForm, self).save(*args, **kwargs)
class Meta:
model = ExternalAccount
exclude = ['type']
class AlternateEmailInline(admin.TabularInline):
form = AlternateEmailForm
model = ExternalAccount
extra = 1
verbose_name = 'Alternate Email'
verbose_name_plural = 'Alternate Emails'
def queryset(self, request):
"""Limit queryset to alternate emails."""
qs = super(AlternateEmailInline, self).queryset(request)
return qs.filter(type=ExternalAccount.TYPE_EMAIL)
class UserProfileAdminForm(forms.ModelForm):
username = forms.CharField()
email = forms.CharField()
last_login = forms.DateTimeField(required=False)
date_joined = forms.DateTimeField(required=False)
def __init__(self, *args, **kwargs):
self.instance = kwargs.get('instance')
if self.instance:
self.base_fields['username'].initial = self.instance.user.username
self.base_fields['email'].initial = self.instance.user.email
super(UserProfileAdminForm, self).__init__(*args, **kwargs)
def clean_username(self):
username = self.cleaned_data['username']
if (User.objects.exclude(pk=self.instance.user.pk)
.filter(username=username).exists()):
raise ValidationError('Username already exists')
return username
def clean_email(self):
email = self.cleaned_data['email']
if (User.objects.exclude(pk=self.instance.user.pk)
.filter(email=email).exists()):
raise ValidationError('Email already exists')
return email
def save(self, *args, **kwargs):
if self.instance:
self.instance.user.username = self.cleaned_data.get('username')
self.instance.user.email = self.cleaned_data.get('email')
self.instance.user.save()
return super(UserProfileAdminForm, self).save(*args, **kwargs)
class Meta:
model = UserProfile
class UserProfileResource(ModelResource):
"""django-import-export UserProfile Resource."""
username = Field(attribute='user__username')
email = Field(attribute='user__email')
class Meta:
model = UserProfile
class UserProfileAdmin(AdminImageMixin, ExportMixin, admin.ModelAdmin):
resource_class = UserProfileResource
inlines = [LanguageInline, GroupMembershipInline, ExternalAccountInline,
AlternateEmailInline]
search_fields = ['full_name', 'user__email', 'user__username', 'ircname',
'geo_country__name', 'geo_region__name', 'geo_city__name']
readonly_fields = ['date_vouched', 'vouched_by', 'user', 'date_joined', 'last_login',
'is_vouched', 'can_vouch', 'referral_source']
form = UserProfileAdminForm
list_filter = ['is_vouched', 'can_vouch', DateJoinedFilter,
LastLoginFilter, LegacyVouchFilter, SuperUserFilter,
CompleteProfileFilter, PublicProfileFilter, AlternateEmailFilter,
'externalaccount__type', 'referral_source']
save_on_top = True
list_display = ['full_name', 'email', 'username', 'geo_country', 'is_vouched', 'can_vouch',
'number_of_vouchees']
list_display_links = ['full_name', 'email', 'username']
actions = [subscribe_to_basket_action(), unsubscribe_from_basket_action(),
update_vouch_flags_action()]
fieldsets = (
('Account', {
'fields': ('full_name', 'username', 'email', 'photo',)
}),
(None, {
'fields': ('title', 'bio', 'tshirt', 'ircname', 'date_mozillian',)
}),
('Important dates', {
'fields': ('date_joined', 'last_login')
}),
('Vouch Info', {
'fields': ('date_vouched', 'is_vouched', 'can_vouch')
}),
('Location', {
'fields': ('geo_country', 'geo_region', 'geo_city',
'lng', 'lat', 'timezone')
}),
('Services', {
'fields': ('allows_community_sites', 'allows_mozilla_sites')
}),
('Privacy Settings', {
'fields': ('privacy_photo', 'privacy_full_name', 'privacy_ircname',
'privacy_email', 'privacy_bio',
'privacy_geo_city', 'privacy_geo_region', 'privacy_geo_country',
'privacy_groups', 'privacy_skills', 'privacy_languages',
'privacy_date_mozillian', 'privacy_timezone',
'privacy_tshirt', 'privacy_title'),
'classes': ('collapse',)
}),
('Basket', {
'fields': ('basket_token',),
'classes': ('collapse',)
}),
('Skills', {
'fields': ('skills',)
}),
('Referral Source', {
'fields': ('referral_source',)
}),
)
def queryset(self, request):
qs = super(UserProfileAdmin, self).queryset(request)
qs = qs.annotate(Count('vouches_made'))
return qs
def email(self, obj):
return obj.user.email
email.admin_order_field = 'user__email'
def username(self, obj):
return obj.user.username
username.admin_order_field = 'user__username'
def is_vouched(self, obj):
return obj.userprofile.is_vouched
is_vouched.boolean = True
is_vouched.admin_order_field = 'is_vouched'
def vouched_by(self, obj):
voucher = obj.vouched_by
if voucher:
voucher_url = reverse('admin:auth_user_change', args=[voucher.id])
return '<a href="%s">%s</a>' % (voucher_url, voucher)
vouched_by.admin_order_field = 'vouched_by'
vouched_by.allow_tags = True
def number_of_vouchees(self, obj):
"""Return the number of vouchees for obj."""
return obj.vouches_made.count()
number_of_vouchees.admin_order_field = 'vouches_made__count'
def last_login(self, obj):
return obj.user.last_login
def date_joined(self, obj):
return obj.user.date_joined
def get_actions(self, request):
"""Return bulk actions for UserAdmin without bulk delete."""
actions = super(UserProfileAdmin, self).get_actions(request)
actions.pop('delete_selected', None)
return actions
def index_profiles(self, request):
"""Fire an Elastic Search Index Profiles task."""
index_all_profiles()
messages.success(request, 'Profile indexing started.')
return HttpResponseRedirect(reverse('admin:users_userprofile_changelist'))
def check_celery(self, request):
try:
investigator = mozillians.users.tasks.check_celery.delay()
except socket_error as e:
messages.error(request, 'Cannot connect to broker: %s' % e)
return HttpResponseRedirect(reverse('admin:users_userprofile_changelist'))
try:
investigator.get(timeout=5)
except investigator.TimeoutError as e:
messages.error(request, 'Worker timeout: %s' % e)
else:
messages.success(request, 'Celery is OK')
finally:
return HttpResponseRedirect(reverse('admin:users_userprofile_changelist'))
def get_urls(self):
"""Return custom and UserProfileAdmin urls."""
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
urls = super(UserProfileAdmin, self).get_urls()
my_urls = patterns(
'',
url(r'index_profiles', wrap(self.index_profiles), name='users_index_profiles'),
url(r'check_celery', wrap(self.check_celery), name='users_check_celery')
)
return my_urls + urls
admin.site.register(UserProfile, UserProfileAdmin)
class NullProfileFilter(SimpleListFilter):
"""Admin filter for null profiles."""
title = 'has user profile'
parameter_name = 'has_user_profile'
def lookups(self, request, model_admin):
return (('False', 'No'),
('True', 'Yes'))
def queryset(self, request, queryset):
if not self.value():
return queryset
value = self.value() != 'True'
return queryset.filter(userprofile__isnull=value)
class UserAdmin(UserAdmin):
list_filter = [NullProfileFilter]
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
class GroupAdmin(ExportMixin, GroupAdmin):
pass
admin.site.register(Group, GroupAdmin)
class VouchAdminForm(forms.ModelForm):
class Meta:
model = Vouch
widgets = {
'voucher': autocomplete_light.ChoiceWidget('UserProfiles'),
'vouchee': autocomplete_light.ChoiceWidget('UserProfiles'),
}
class VouchAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ['voucher__user__username', 'voucher__full_name',
'vouchee__user__username', 'vouchee__full_name']
list_display = ['vouchee', 'voucher', 'date', 'autovouch']
list_filter = ['autovouch']
form = VouchAdminForm
admin.site.register(Vouch, VouchAdmin)
| 33.831481 | 95 | 0.646067 |
acf5b4eca7b24bbf0b15f2f8e94f6a3c3a42daea | 1,494 | py | Python | lec1-5[gridsearch].py | cutz-j/Statistics-for-ML | ef01d52ab82dc00effbc72a87d75ae41745435e9 | [
"MIT"
] | null | null | null | lec1-5[gridsearch].py | cutz-j/Statistics-for-ML | ef01d52ab82dc00effbc72a87d75ae41745435e9 | [
"MIT"
] | null | null | null | lec1-5[gridsearch].py | cutz-j/Statistics-for-ML | ef01d52ab82dc00effbc72a87d75ae41745435e9 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.pipeline import Pipeline
input_data = pd.read_csv("Chapter01/ad.csv", header=None)
#print(input_data)
X_col = set(input_data.columns.values)
y = input_data[len(input_data.columns.values)-1]
X_col.remove(len(input_data.columns.values)-1)
X = input_data[list(X_col)]
# split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, random_state=77)
# pipeline ??
pipeline = Pipeline([('clf', DecisionTreeClassifier(criterion='entropy'))])
# 탐색 조합 (dict)
parameters = {'clf__max_depth': (50, 100, 150),
'clf__min_samples_split': (2, 3),
'clf__min_samples_leaf': (1, 2, 3)}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy')
grid_search.fit(X_train, y_train)
y_pred = grid_search.predict(X_test)
print ('\n Best score: \n', grid_search.best_score_)
print ('\n Best parameters set: \n')
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print ('\t%s: %r' % (param_name, best_parameters[param_name]))
print ("\n Confusion Matrix on Test data \n",confusion_matrix(y_test,y_pred))
print ("\n Test Accuracy \n",accuracy_score(y_test,y_pred))
print ("\nPrecision Recall f1 table \n",classification_report(y_test, y_pred)) | 39.315789 | 90 | 0.75502 |
acf5b6100a5708971d1a4ae04604895ae33cbbd2 | 805 | py | Python | test/test_add_contact.py | mikhaylov2a/python_training | 18ee7a5dd5189506d037e5d65facd1a2ec26e19f | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | mikhaylov2a/python_training | 18ee7a5dd5189506d037e5d65facd1a2ec26e19f | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | mikhaylov2a/python_training | 18ee7a5dd5189506d037e5d65facd1a2ec26e19f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app):
app.session.login("admin", "secret")
app.contact.create_contact(Contact(firstname="john", middlename="j", lastname="smith", nickname="neo",
title="title", companyname="RealCo", firstaddress="1st Street",
homephone="666666", mobilephone="+79657766664", workphone="666666 6",
faxphone="666666 66", firstemail="jphnsmith@email.com",
secondemail="johnsmith2@email.com", thirdemail="johnsmith3@email.com",
homepage="https://smith.net", secondaddress="2nd Street", phone2="3rd NY",
notes="Just New Guy"))
app.session.logout()
| 53.666667 | 106 | 0.561491 |
acf5b808b8a4a5211a83ec60e999e76b3dbe5a8c | 4,098 | py | Python | server/openapi_server/controllers/theory_guided_model_controller.py | mintproject/MINT-ModelCatalogIngestionAPI | 026d3495483a3e48ea3c1364d0dda09beeea69e4 | [
"Apache-2.0"
] | 2 | 2019-05-30T21:33:43.000Z | 2019-09-27T21:04:38.000Z | server/openapi_server/controllers/theory_guided_model_controller.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | 82 | 2019-10-08T16:35:34.000Z | 2022-03-15T18:25:27.000Z | server/openapi_server/controllers/theory_guided_model_controller.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | null | null | null | import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import THEORYGUIDEDMODEL_TYPE_NAME, THEORYGUIDEDMODEL_TYPE_URI
from openapi_server.models.theory_guided_model import TheoryGuidedModel # noqa: E501
from openapi_server import util
def theory_guidedmodels_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of Theory-GuidedModel
Gets a list of all instances of Theory-GuidedModel (more information in https://w3id.org/okn/o/sdm#Theory-GuidedModel) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[TheoryGuidedModel]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=THEORYGUIDEDMODEL_TYPE_URI,
rdf_type_name=THEORYGUIDEDMODEL_TYPE_NAME,
kls=TheoryGuidedModel)
def theory_guidedmodels_id_delete(id, user=None): # noqa: E501
"""Delete an existing Theory-GuidedModel
Delete an existing Theory-GuidedModel (more information in https://w3id.org/okn/o/sdm#Theory-GuidedModel) # noqa: E501
:param id: The ID of the Theory-GuidedModel to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=THEORYGUIDEDMODEL_TYPE_URI,
rdf_type_name=THEORYGUIDEDMODEL_TYPE_NAME,
kls=TheoryGuidedModel)
def theory_guidedmodels_id_get(id, username=None): # noqa: E501
"""Get a single Theory-GuidedModel by its id
Gets the details of a given Theory-GuidedModel (more information in https://w3id.org/okn/o/sdm#Theory-GuidedModel) # noqa: E501
:param id: The ID of the Theory-GuidedModel to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: TheoryGuidedModel
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=THEORYGUIDEDMODEL_TYPE_URI,
rdf_type_name=THEORYGUIDEDMODEL_TYPE_NAME,
kls=TheoryGuidedModel)
def theory_guidedmodels_id_put(id, user=None, theory_guided_model=None): # noqa: E501
"""Update an existing Theory-GuidedModel
Updates an existing Theory-GuidedModel (more information in https://w3id.org/okn/o/sdm#Theory-GuidedModel) # noqa: E501
:param id: The ID of the Theory-GuidedModel to be retrieved
:type id: str
:param user: Username
:type user: str
:param theory_guided_model: An old Theory-GuidedModelto be updated
:type theory_guided_model: dict | bytes
:rtype: TheoryGuidedModel
"""
if connexion.request.is_json:
theory_guided_model = TheoryGuidedModel.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=theory_guided_model,
rdf_type_uri=THEORYGUIDEDMODEL_TYPE_URI,
rdf_type_name=THEORYGUIDEDMODEL_TYPE_NAME,
kls=TheoryGuidedModel)
def theory_guidedmodels_post(user=None, theory_guided_model=None): # noqa: E501
"""Create one Theory-GuidedModel
Create a new instance of Theory-GuidedModel (more information in https://w3id.org/okn/o/sdm#Theory-GuidedModel) # noqa: E501
:param user: Username
:type user: str
:param theory_guided_model: Information about the Theory-GuidedModelto be created
:type theory_guided_model: dict | bytes
:rtype: TheoryGuidedModel
"""
if connexion.request.is_json:
theory_guided_model = TheoryGuidedModel.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=theory_guided_model,
rdf_type_uri=THEORYGUIDEDMODEL_TYPE_URI,
rdf_type_name=THEORYGUIDEDMODEL_TYPE_NAME,
kls=TheoryGuidedModel)
| 33.317073 | 135 | 0.7245 |
acf5b8115e7acace7c8b8863bbdd212ca95c67cb | 3,037 | py | Python | scripts/generate_maze2d_datasets.py | michaelrzhang/d4rl | e2466c0cfa4571c4c69197cba9859247f8299c41 | [
"Apache-2.0"
] | null | null | null | scripts/generate_maze2d_datasets.py | michaelrzhang/d4rl | e2466c0cfa4571c4c69197cba9859247f8299c41 | [
"Apache-2.0"
] | null | null | null | scripts/generate_maze2d_datasets.py | michaelrzhang/d4rl | e2466c0cfa4571c4c69197cba9859247f8299c41 | [
"Apache-2.0"
] | null | null | null | import logging
from offline_rl.pointmaze import waypoint_controller
from offline_rl.pointmaze import maze_model
import numpy as np
import pickle
import gzip
import h5py
import argparse
def reset_data():
return {'observations': [],
'actions': [],
'terminals': [],
'rewards': [],
'infos/goal': [],
'infos/qpos': [],
'infos/qvel': [],
}
def append_data(data, s, a, tgt, done, env_data):
data['observations'].append(s)
data['actions'].append(a)
data['rewards'].append(0.0)
data['terminals'].append(done)
data['infos/goal'].append(tgt)
data['infos/qpos'].append(env_data.qpos.ravel().copy())
data['infos/qvel'].append(env_data.qvel.ravel().copy())
def npify(data):
for k in data:
if k == 'terminals':
dtype = np.bool_
else:
dtype = np.float32
data[k] = np.array(data[k], dtype=dtype)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--render', action='store_true', help='Render trajectories')
parser.add_argument('--noisy', action='store_true', help='Noisy actions')
parser.add_argument('--maze', type=str, default='umaze', help='Maze type. small or default')
parser.add_argument('--num_samples', type=int, default=int(1e6), help='Num samples to collect')
args = parser.parse_args()
if args.maze == 'umaze':
maze = maze_model.U_MAZE
max_episode_steps = 150
elif args.maze == 'open':
maze = maze_model.OPEN
max_episode_steps = 150
elif args.maze == 'medium':
maze = maze_model.MEDIUM_MAZE
max_episode_steps = 250
else:
maze = maze_model.LARGE_MAZE
max_episode_steps = 600
controller = waypoint_controller.WaypointController(maze)
env = maze_model.MazeEnv(maze)
env.set_target()
s = env.reset()
act = env.action_space.sample()
done = False
data = reset_data()
ts = 0
for _ in range(args.num_samples):
position = s[0:2]
velocity = s[2:4]
act, done = controller.get_action(position, velocity, env._target)
if args.noisy:
act = act + np.random.randn(*act.shape)*0.5
act = np.clip(act, -1.0, 1.0)
if ts >= max_episode_steps:
done = True
append_data(data, s, act, env._target, done, env.sim.data)
ns, _, _, _ = env.step(act)
if len(data['observations']) % 10000 == 0:
print(len(data['observations']))
ts += 1
if done:
env.set_target()
done = False
ts = 0
else:
s = ns
if args.render:
env.render()
if args.noisy:
fname = 'maze2d-%s-noisy.hdf5' % args.maze
else:
fname = 'maze2d-%s.hdf5' % args.maze
dataset = h5py.File(fname, 'w')
npify(data)
for k in data:
dataset.create_dataset(k, data=data[k], compression='gzip')
if __name__ == "__main__":
main()
| 27.36036 | 99 | 0.580836 |
acf5b85c50db8195722fdb010724283632876964 | 2,719 | py | Python | xyz2neu.py | purpleskyfall/XYZ2BLH | 9fb51988d6b089603e7045611a7234718988aae9 | [
"MIT"
] | 5 | 2019-11-03T07:25:03.000Z | 2021-11-18T09:13:45.000Z | xyz2neu.py | purpleskyfall/XYZ2BLH | 9fb51988d6b089603e7045611a7234718988aae9 | [
"MIT"
] | null | null | null | xyz2neu.py | purpleskyfall/XYZ2BLH | 9fb51988d6b089603e7045611a7234718988aae9 | [
"MIT"
] | 7 | 2019-03-23T11:42:37.000Z | 2022-03-26T06:18:27.000Z | #!/usr/bin/env python3
#coding=utf-8
"""Convert cartesian coordinate system to site-center system.
:author: Jon Jiang
:date: 2018-01-15
"""
from __future__ import division, print_function
import argparse
import math
from xyz2blh import xyz2blh
A = 6378137.0
E2 = 0.00669438
E4 = 0.0067394968
B = 0.1
def xyz2neu(x0, y0, z0, x, y, z):
"""Convert cartesian coordinate system to site-center system.
Input paraments:
- x0, y0, z0: coordinate of centra site,
- x, y, z: coordinate to be converted.
Example: Use coordinate of BJFS IGS site
>>> north, east, up = xyz2neu(-2148747.998, 4426652.444, 4044675.151,
... -2148745.727, 4426649.545, 4044668.469)
>>> round(north, 2), round(east, 2), round(up, 2)
(-2.85, -0.78, -7.03)
"""
# calculate the lat, lon and height of center site
lat, lon, _ = xyz2blh(x0, y0, z0)
# convert angle unit to radians
lat, lon = math.radians(lat), math.radians(lon)
# calculate NEU
north = (-math.sin(lat) * math.cos(lon) * (x - x0) -
math.sin(lat) * math.sin(lon) * (y - y0) +
math.cos(lat) * (z - z0))
east = -math.sin(lon) * (x - x0) + math.cos(lon) * (y - y0)
up = (math.cos(lat) * math.cos(lon) * (x- x0) +
math.cos(lat) * math.sin(lon) * (y - y0) +
math.sin(lat) * (z - z0))
return north, east, up
def main():
"""Main function."""
args = init_args()
north, east, up = xyz2neu(args.x0, args.y0, args.z0, args.x, args.y, args.z)
message = ('Center: {0:.2f}, {1:.2f}, {2:.2f}\n'
'North: {3:.2f}, East: {4:.2f}, Up: {5:.2f}')
print(message.format(args.x0, args.y0, args.z0, north, east, up))
return 0
def init_args():
"""Initialze user input."""
parser = argparse.ArgumentParser(
description='Convert cartesian coordinate system to site-center NEU.')
# add paraments
parser.add_argument('-x0', metavar='<x0>', dest='x0', type=float,
help='topocentric X coordinate.')
parser.add_argument('-y0', metavar='<y0>', dest='y0', type=float,
help='topocentric Y coordinate.')
parser.add_argument('-z0', metavar='<z0>', dest='z0', type=float,
help='topocentricZ coordinate.')
parser.add_argument('-x', metavar='<x>', dest='x', type=float,
help='X coordinate will convert.')
parser.add_argument('-y', metavar='<y>', dest='y', type=float,
help='Y coordinate will convert.')
parser.add_argument('-z', metavar='<z>', dest='z', type=float,
help='Z coordinate will convert.')
return parser.parse_args()
if __name__ == '__main__':
main()
| 32.759036 | 80 | 0.580728 |
acf5b88c7dae65f17ae7ad7a4067388092e00dc1 | 16,444 | py | Python | src/sage/quadratic_forms/quadratic_form__local_normal_form.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | null | null | null | src/sage/quadratic_forms/quadratic_form__local_normal_form.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | null | null | null | src/sage/quadratic_forms/quadratic_form__local_normal_form.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | 1 | 2020-07-24T12:20:37.000Z | 2020-07-24T12:20:37.000Z | """
Local Normal Form
"""
#*****************************************************************************
# Copyright (C) 2007 William Stein and Jonathan Hanke
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import copy
from sage.rings.infinity import Infinity
from sage.rings.integer_ring import IntegerRing, ZZ
from sage.rings.rational_field import QQ
from sage.rings.arith import GCD, valuation, is_prime
def find_entry_with_minimal_scale_at_prime(self, p):
"""
Finds the entry of the quadratic form with minimal scale at the
prime p, preferring diagonal entries in case of a tie. (I.e. If
we write the quadratic form as a symmetric matrix M, then this
entry M[i,j] has the minimal valuation at the prime p.)
Note: This answer is independent of the kind of matrix (Gram or
Hessian) associated to the form.
INPUT:
`p` -- a prime number > 0
OUTPUT:
a pair of integers >= 0
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [6, 2, 20]); Q
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 6 2 ]
[ * 20 ]
sage: Q.find_entry_with_minimal_scale_at_prime(2)
(0, 1)
sage: Q.find_entry_with_minimal_scale_at_prime(3)
(1, 1)
sage: Q.find_entry_with_minimal_scale_at_prime(5)
(0, 0)
"""
n = self.dim()
min_val = Infinity
ij_index = None
val_2 = valuation(2, p)
for d in range(n): ## d = difference j-i
for e in range(n - d): ## e is the length of the diagonal with value d.
## Compute the valuation of the entry
if d == 0:
tmp_val = valuation(self[e, e+d], p)
else:
tmp_val = valuation(self[e, e+d], p) - val_2
## Check if it's any smaller than what we have
if tmp_val < min_val:
ij_index = (e,e+d)
min_val = tmp_val
## Return the result
return ij_index
def local_normal_form(self, p):
"""
Returns the a locally integrally equivalent quadratic form over
the p-adic integers Z_p which gives the Jordan decomposition. The
Jordan components are written as sums of blocks of size <= 2 and
are arranged by increasing scale, and then by increasing norm.
(This is equivalent to saying that we put the 1x1 blocks before
the 2x2 blocks in each Jordan component.)
INPUT:
`p` -- a positive prime number.
OUTPUT:
a quadratic form over ZZ
WARNING: Currently this only works for quadratic forms defined over ZZ.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [10,4,1])
sage: Q.local_normal_form(5)
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 0 ]
[ * 6 ]
::
sage: Q.local_normal_form(3)
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 10 0 ]
[ * 15 ]
sage: Q.local_normal_form(2)
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 0 ]
[ * 6 ]
"""
## Sanity Checks
if (self.base_ring() != IntegerRing()):
raise NotImplementedError("Oops! This currently only works for quadratic forms defined over IntegerRing(). =(")
if not ((p>=2) and is_prime(p)):
raise TypeError("Oops! p is not a positive prime number. =(")
## Some useful local variables
Q = copy.deepcopy(self)
Q.__init__(self.base_ring(), self.dim(), self.coefficients())
## Prepare the final form to return
Q_Jordan = copy.deepcopy(self)
Q_Jordan.__init__(self.base_ring(), 0)
while Q.dim() > 0:
n = Q.dim()
## Step 1: Find the minimally p-divisible matrix entry, preferring diagonals
## -------------------------------------------------------------------------
(min_i, min_j) = Q.find_entry_with_minimal_scale_at_prime(p)
if min_i == min_j:
min_val = valuation(2 * Q[min_i, min_j], p)
else:
min_val = valuation(Q[min_i, min_j], p)
## Error if we still haven't seen non-zero coefficients!
if (min_val == Infinity):
raise RuntimeError("Oops! The original matrix is degenerate. =(")
## Step 2: Arrange for the upper leftmost entry to have minimal valuation
## ----------------------------------------------------------------------
if (min_i == min_j):
block_size = 1
Q.swap_variables(0, min_i, in_place = True)
else:
## Work in the upper-left 2x2 block, and replace it by its 2-adic equivalent form
Q.swap_variables(0, min_i, in_place = True)
Q.swap_variables(1, min_j, in_place = True)
## 1x1 => make upper left the smallest
if (p != 2):
block_size = 1;
Q.add_symmetric(1, 0, 1, in_place = True)
## 2x2 => replace it with the appropriate 2x2 matrix
else:
block_size = 2
## DIAGNOSTIC
#print "\n Finished Step 2 \n";
#print "\n Q is: \n" + str(Q) + "\n";
#print " p is: " + str(p)
#print " min_val is: " + str( min_val)
#print " block_size is: " + str(block_size)
#print "\n Starting Step 3 \n"
## Step 3: Clear out the remaining entries
## ---------------------------------------
min_scale = p ** min_val ## This is the minimal valuation of the Hessian matrix entries.
##DIAGNOSTIC
#print "Starting Step 3:"
#print "----------------"
#print " min_scale is: " + str(min_scale)
## Perform cancellation over Z by ensuring divisibility
if (block_size == 1):
a = 2 * Q[0,0]
for j in range(block_size, n):
b = Q[0, j]
g = GCD(a, b)
## DIAGNSOTIC
#print "Cancelling from a 1x1 block:"
#print "----------------------------"
#print " Cancelling entry with index (" + str(upper_left) + ", " + str(j) + ")"
#print " entry = " + str(b)
#print " gcd = " + str(g)
#print " a = " + str(a)
#print " b = " + str(b)
#print " a/g = " + str(a/g) + " (used for stretching)"
#print " -b/g = " + str(-b/g) + " (used for cancelling)"
## Sanity Check: a/g is a p-unit
if valuation (g, p) != valuation(a, p):
raise RuntimeError("Oops! We have a problem with our rescaling not preserving p-integrality!")
Q.multiply_variable(ZZ(a/g), j, in_place = True) ## Ensures that the new b entry is divisible by a
Q.add_symmetric(ZZ(-b/g), j, 0, in_place = True) ## Performs the cancellation
elif (block_size == 2):
a1 = 2 * Q[0,0]
a2 = Q[0, 1]
b1 = Q[1, 0] ## This is the same as a2
b2 = 2 * Q[1, 1]
big_det = (a1*b2 - a2*b1)
small_det = big_det / (min_scale * min_scale)
## Cancels out the rows/columns of the 2x2 block
for j in range(block_size, n):
a = Q[0, j]
b = Q[1, j]
## Ensures an integral result (scale jth row/column by big_det)
Q.multiply_variable(big_det, j, in_place = True)
## Performs the cancellation (by producing -big_det * jth row/column)
Q.add_symmetric(ZZ(-(a*b2 - b*a2)), j, 0, in_place = True)
Q.add_symmetric(ZZ(-(-a*b1 + b*a1)), j, 1, in_place = True)
## Now remove the extra factor (non p-unit factor) in big_det we introduced above
Q.divide_variable(ZZ(min_scale * min_scale), j, in_place = True)
## DIAGNOSTIC
#print "Cancelling out a 2x2 block:"
#print "---------------------------"
#print " a1 = " + str(a1)
#print " a2 = " + str(a2)
#print " b1 = " + str(b1)
#print " b2 = " + str(b2)
#print " big_det = " + str(big_det)
#print " min_scale = " + str(min_scale)
#print " small_det = " + str(small_det)
#print " Q = \n", Q
## Uses Cassels's proof to replace the remaining 2 x 2 block
if (((1 + small_det) % 8) == 0):
Q[0, 0] = 0
Q[1, 1] = 0
Q[0, 1] = min_scale
elif (((5 + small_det) % 8) == 0):
Q[0, 0] = min_scale
Q[1, 1] = min_scale
Q[0, 1] = min_scale
else:
raise RuntimeError("Error in LocalNormal: Impossible behavior for a 2x2 block! \n")
## Check that the cancellation worked, extract the upper-left block, and trim Q to handle the next block.
for i in range(block_size):
for j in range(block_size, n):
if Q[i,j] != 0:
raise RuntimeError("Oops! The cancellation didn't work properly at entry (" + str(i) + ", " + str(j) + ").")
Q_Jordan = Q_Jordan + Q.extract_variables(range(block_size))
Q = Q.extract_variables(range(block_size, n))
return Q_Jordan
def jordan_blocks_by_scale_and_unimodular(self, p, safe_flag=True):
"""
Returns a list of pairs `(s_i, L_i)` where `L_i` is a maximal
`p^{s_i}`-unimodular Jordan component which is further decomposed into
block diagonals of block size `\le 2`. For each `L_i` the 2x2 blocks are
listed after the 1x1 blocks (which follows from the convention of the
:meth:`local_normal_form` method).
..note ::
The decomposition of each `L_i` into smaller block is not unique!
The ``safe_flag`` argument allows us to select whether we want a copy of
the output, or the original output. By default ``safe_flag = True``, so we
return a copy of the cached information. If this is set to ``False``, then
the routine is much faster but the return values are vulnerable to being
corrupted by the user.
INPUT:
- `p` -- a prime number > 0.
OUTPUT:
A list of pairs `(s_i, L_i)` where:
- `s_i` is an integer,
- `L_i` is a block-diagonal unimodular quadratic form over `\ZZ_p`.
.. note::
These forms `L_i` are defined over the `p`-adic integers, but by a
matrix over `\ZZ` (or `\QQ`?).
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,9,5,7])
sage: Q.jordan_blocks_by_scale_and_unimodular(3)
[(0, Quadratic form in 3 variables over Integer Ring with coefficients:
[ 1 0 0 ]
[ * 5 0 ]
[ * * 7 ]), (2, Quadratic form in 1 variables over Integer Ring with coefficients:
[ 1 ])]
::
sage: Q2 = QuadraticForm(ZZ, 2, [1,1,1])
sage: Q2.jordan_blocks_by_scale_and_unimodular(2)
[(-1, Quadratic form in 2 variables over Integer Ring with coefficients:
[ 2 2 ]
[ * 2 ])]
sage: Q = Q2 + Q2.scale_by_factor(2)
sage: Q.jordan_blocks_by_scale_and_unimodular(2)
[(-1, Quadratic form in 2 variables over Integer Ring with coefficients:
[ 2 2 ]
[ * 2 ]), (0, Quadratic form in 2 variables over Integer Ring with coefficients:
[ 2 2 ]
[ * 2 ])]
"""
## Try to use the cached result
try:
if safe_flag:
return copy.deepcopy(self.__jordan_blocks_by_scale_and_unimodular_dict[p])
else:
return self.__jordan_blocks_by_scale_and_unimodular_dict[p]
except Exception:
## Initialize the global dictionary if it doesn't exist
if not hasattr(self, '__jordan_blocks_by_scale_and_unimodular_dict'):
self.__jordan_blocks_by_scale_and_unimodular_dict = {}
## Deal with zero dim'l forms
if self.dim() == 0:
return []
## Find the Local Normal form of Q at p
Q1 = self.local_normal_form(p)
## Parse this into Jordan Blocks
n = Q1.dim()
tmp_Jordan_list = []
i = 0
start_ind = 0
if (n >= 2) and (Q1[0,1] != 0):
start_scale = valuation(Q1[0,1], p) - 1
else:
start_scale = valuation(Q1[0,0], p)
while (i < n):
## Determine the size of the current block
if (i == n-1) or (Q1[i,i+1] == 0):
block_size = 1
else:
block_size = 2
## Determine the valuation of the current block
if block_size == 1:
block_scale = valuation(Q1[i,i], p)
else:
block_scale = valuation(Q1[i,i+1], p) - 1
## Process the previous block if the valuation increased
if block_scale > start_scale:
tmp_Jordan_list += [(start_scale, Q1.extract_variables(range(start_ind, i)).scale_by_factor(ZZ(1) / (QQ(p)**(start_scale))))]
start_ind = i
start_scale = block_scale
## Increment the index
i += block_size
## Add the last block
tmp_Jordan_list += [(start_scale, Q1.extract_variables(range(start_ind, n)).scale_by_factor(ZZ(1) / QQ(p)**(start_scale)))]
## Cache the result
self.__jordan_blocks_by_scale_and_unimodular_dict[p] = tmp_Jordan_list
## Return the result
return tmp_Jordan_list
def jordan_blocks_in_unimodular_list_by_scale_power(self, p):
"""
Returns a list of Jordan components, whose component at index i
should be scaled by the factor p^i.
This is only defined for integer-valued quadratic forms
(i.e. forms with base_ring ZZ), and the indexing only works
correctly for p=2 when the form has an integer Gram matrix.
INPUT:
self -- a quadratic form over ZZ, which has integer Gram matrix if p == 2
`p` -- a prime number > 0
OUTPUT:
a list of p-unimodular quadratic forms
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [2, -2, 0, 3, -5, 4])
sage: Q.jordan_blocks_in_unimodular_list_by_scale_power(2)
Traceback (most recent call last):
...
TypeError: Oops! The given quadratic form has a Jordan component with a negative scale exponent!
This routine requires an integer-matrix quadratic form for the output indexing to work properly!
sage: Q.scale_by_factor(2).jordan_blocks_in_unimodular_list_by_scale_power(2)
[Quadratic form in 2 variables over Integer Ring with coefficients:
[ 0 2 ]
[ * 0 ], Quadratic form in 0 variables over Integer Ring with coefficients:
, Quadratic form in 1 variables over Integer Ring with coefficients:
[ 345 ]]
sage: Q.jordan_blocks_in_unimodular_list_by_scale_power(3)
[Quadratic form in 2 variables over Integer Ring with coefficients:
[ 2 0 ]
[ * 10 ], Quadratic form in 1 variables over Integer Ring with coefficients:
[ 2 ]]
"""
## Sanity Check
if self.base_ring() != ZZ:
raise TypeError("Oops! This method only makes sense for integer-valued quadratic forms (i.e. defined over ZZ).")
## Deal with zero dim'l forms
if self.dim() == 0:
return []
## Find the Jordan Decomposition
list_of_jordan_pairs = self.jordan_blocks_by_scale_and_unimodular(p)
scale_list = [P[0] for P in list_of_jordan_pairs]
s_max = max(scale_list)
if min(scale_list) < 0:
raise TypeError("Oops! The given quadratic form has a Jordan component with a negative scale exponent!\n" \
+ "This routine requires an integer-matrix quadratic form for the output indexing to work properly!")
## Make the new list of unimodular Jordan components
zero_form = copy.deepcopy(self)
zero_form.__init__(ZZ, 0)
list_by_scale = [zero_form for _ in range(s_max+1)]
for P in list_of_jordan_pairs:
list_by_scale[P[0]] = P[1]
## Return the new list
return list_by_scale
| 35.439655 | 137 | 0.570847 |
acf5b8f81a5f78f29826417172bb29195d42f6b1 | 36,435 | py | Python | lib-python/2.7/test/test_array.py | clach04/bb_import_jython | 4529a0f578186a1c33c476300294ab42658eaf7c | [
"CNRI-Jython"
] | 70 | 2015-08-04T09:39:43.000Z | 2021-09-04T23:28:39.000Z | lib-python/2.7/test/test_array.py | clach04/bb_import_jython | 4529a0f578186a1c33c476300294ab42658eaf7c | [
"CNRI-Jython"
] | 8 | 2016-04-28T16:05:09.000Z | 2019-04-02T07:21:23.000Z | lib-python/2.7/test/test_array.py | clach04/bb_import_jython | 4529a0f578186a1c33c476300294ab42658eaf7c | [
"CNRI-Jython"
] | 299 | 2015-01-23T10:06:24.000Z | 2022-02-02T06:34:51.000Z | #! /usr/bin/env python
"""Test the arraymodule.
Roger E. Masse
"""
import unittest
import warnings
from test import test_support
from weakref import proxy
import array, cStringIO
from cPickle import loads, dumps, HIGHEST_PROTOCOL
class ArraySubclass(array.array):
pass
class ArraySubclassWithKwargs(array.array):
def __init__(self, typecode, newarg=None):
array.array.__init__(self, typecode)
tests = [] # list to accumulate all tests
typecodes = "cubBhHiIlLfd"
class BadConstructorTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, array.array)
self.assertRaises(TypeError, array.array, spam=42)
self.assertRaises(TypeError, array.array, 'xx')
self.assertRaises(ValueError, array.array, 'x')
tests.append(BadConstructorTest)
class BaseTest(unittest.TestCase):
# Required class attributes (provided by subclasses
# typecode: the typecode to test
# example: an initializer usable in the constructor for this type
# smallerexample: the same length as example, but smaller
# biggerexample: the same length as example, but bigger
# outside: An entry that is not in example
# minitemsize: the minimum guaranteed itemsize
def assertEntryEqual(self, entry1, entry2):
self.assertEqual(entry1, entry2)
def badtypecode(self):
# Return a typecode that is different from our own
return typecodes[(typecodes.index(self.typecode)+1) % len(typecodes)]
def test_constructor(self):
a = array.array(self.typecode)
self.assertEqual(a.typecode, self.typecode)
self.assertTrue(a.itemsize>=self.minitemsize)
self.assertRaises(TypeError, array.array, self.typecode, None)
def test_len(self):
a = array.array(self.typecode)
a.append(self.example[0])
self.assertEqual(len(a), 1)
a = array.array(self.typecode, self.example)
self.assertEqual(len(a), len(self.example))
def test_buffer_info(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assertIsInstance(bi, tuple)
self.assertEqual(len(bi), 2)
self.assertIsInstance(bi[0], (int, long))
self.assertIsInstance(bi[1], int)
self.assertEqual(bi[1], len(a))
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
b.byteswap()
self.assertEqual(a, b)
def test_copy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.copy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_deepcopy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.deepcopy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_pickle(self):
for protocol in range(HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode, self.example)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode, self.example)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_pickle_for_empty_array(self):
for protocol in range(HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_insert(self):
a = array.array(self.typecode, self.example)
a.insert(0, self.example[0])
self.assertEqual(len(a), 1+len(self.example))
self.assertEqual(a[0], a[1])
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
self.assertRaises(TypeError, a.insert, 0, None)
a = array.array(self.typecode, self.example)
a.insert(-1, self.example[0])
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:-1] + self.example[:1] + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a.insert(-1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a.insert(1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[:1])
)
def test_tofromfile(self):
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.tofile)
self.assertRaises(TypeError, a.tofile, cStringIO.StringIO())
test_support.unlink(test_support.TESTFN)
f = open(test_support.TESTFN, 'wb')
try:
a.tofile(f)
f.close()
b = array.array(self.typecode)
f = open(test_support.TESTFN, 'rb')
self.assertRaises(TypeError, b.fromfile)
self.assertRaises(
TypeError,
b.fromfile,
cStringIO.StringIO(), len(self.example)
)
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
self.assertRaises(EOFError, b.fromfile, f, 1)
f.close()
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_fromfile_ioerror(self):
# Issue #5395: Check if fromfile raises a proper IOError
# instead of EOFError.
a = array.array(self.typecode)
f = open(test_support.TESTFN, 'wb')
try:
self.assertRaises(IOError, a.fromfile, f, len(self.example))
finally:
f.close()
test_support.unlink(test_support.TESTFN)
def test_filewrite(self):
a = array.array(self.typecode, 2*self.example)
f = open(test_support.TESTFN, 'wb')
try:
f.write(a)
f.close()
b = array.array(self.typecode)
f = open(test_support.TESTFN, 'rb')
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
f.close()
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_tofromlist(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tolist, 42)
self.assertRaises(TypeError, b.fromlist)
self.assertRaises(TypeError, b.fromlist, 42)
self.assertRaises(TypeError, b.fromlist, [None])
b.fromlist(a.tolist())
self.assertEqual(a, b)
def test_tofromstring(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tostring, 42)
self.assertRaises(TypeError, b.fromstring)
self.assertRaises(TypeError, b.fromstring, 42)
b.fromstring(a.tostring())
self.assertEqual(a, b)
if a.itemsize>1:
self.assertRaises(ValueError, b.fromstring, "x")
def test_repr(self):
a = array.array(self.typecode, 2*self.example)
self.assertEqual(a, eval(repr(a), {"array": array.array}))
a = array.array(self.typecode)
self.assertEqual(repr(a), "array('%s')" % self.typecode)
def test_str(self):
a = array.array(self.typecode, 2*self.example)
str(a)
def test_cmp(self):
a = array.array(self.typecode, self.example)
self.assertTrue((a == 42) is False)
self.assertTrue((a != 42) is True)
self.assertTrue((a == a) is True)
self.assertTrue((a != a) is False)
self.assertTrue((a < a) is False)
self.assertTrue((a <= a) is True)
self.assertTrue((a > a) is False)
self.assertTrue((a >= a) is True)
al = array.array(self.typecode, self.smallerexample)
ab = array.array(self.typecode, self.biggerexample)
self.assertTrue((a == 2*a) is False)
self.assertTrue((a != 2*a) is True)
self.assertTrue((a < 2*a) is True)
self.assertTrue((a <= 2*a) is True)
self.assertTrue((a > 2*a) is False)
self.assertTrue((a >= 2*a) is False)
self.assertTrue((a == al) is False)
self.assertTrue((a != al) is True)
self.assertTrue((a < al) is False)
self.assertTrue((a <= al) is False)
self.assertTrue((a > al) is True)
self.assertTrue((a >= al) is True)
self.assertTrue((a == ab) is False)
self.assertTrue((a != ab) is True)
self.assertTrue((a < ab) is True)
self.assertTrue((a <= ab) is True)
self.assertTrue((a > ab) is False)
self.assertTrue((a >= ab) is False)
def test_add(self):
a = array.array(self.typecode, self.example) \
+ array.array(self.typecode, self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[::-1])
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__add__, "bad")
def test_iadd(self):
a = array.array(self.typecode, self.example[::-1])
b = a
a += array.array(self.typecode, 2*self.example)
self.assertTrue(a is b)
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1]+2*self.example)
)
a = array.array(self.typecode, self.example)
a += a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__iadd__, "bad")
def test_mul(self):
a = 5*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a = array.array(self.typecode, self.example)*5
self.assertEqual(
a,
array.array(self.typecode, self.example*5)
)
a = 0*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = (-1)*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
self.assertRaises(TypeError, a.__mul__, "bad")
def test_imul(self):
a = array.array(self.typecode, self.example)
b = a
a *= 5
self.assertTrue(a is b)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a *= 0
self.assertTrue(a is b)
self.assertEqual(a, array.array(self.typecode))
a *= 1000
self.assertTrue(a is b)
self.assertEqual(a, array.array(self.typecode))
a *= -1
self.assertTrue(a is b)
self.assertEqual(a, array.array(self.typecode))
a = array.array(self.typecode, self.example)
a *= -1
self.assertEqual(a, array.array(self.typecode))
self.assertRaises(TypeError, a.__imul__, "bad")
def test_getitem(self):
a = array.array(self.typecode, self.example)
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[0L], self.example[0])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[-1L], self.example[-1])
self.assertEntryEqual(a[len(self.example)-1], self.example[-1])
self.assertEntryEqual(a[-len(self.example)], self.example[0])
self.assertRaises(TypeError, a.__getitem__)
self.assertRaises(IndexError, a.__getitem__, len(self.example))
self.assertRaises(IndexError, a.__getitem__, -len(self.example)-1)
def test_setitem(self):
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[0L] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1L] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[len(self.example)-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-len(self.example)] = a[-1]
self.assertEntryEqual(a[0], a[-1])
self.assertRaises(TypeError, a.__setitem__)
self.assertRaises(TypeError, a.__setitem__, None)
self.assertRaises(TypeError, a.__setitem__, 0, None)
self.assertRaises(
IndexError,
a.__setitem__,
len(self.example), self.example[0]
)
self.assertRaises(
IndexError,
a.__setitem__,
-len(self.example)-1, self.example[0]
)
def test_delitem(self):
a = array.array(self.typecode, self.example)
del a[0]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
a = array.array(self.typecode, self.example)
del a[-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[len(self.example)-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[-len(self.example)]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
self.assertRaises(TypeError, a.__delitem__)
self.assertRaises(TypeError, a.__delitem__, None)
self.assertRaises(IndexError, a.__delitem__, len(self.example))
self.assertRaises(IndexError, a.__delitem__, -len(self.example)-1)
def test_getslice(self):
a = array.array(self.typecode, self.example)
self.assertEqual(a[:], a)
self.assertEqual(
a[1:],
array.array(self.typecode, self.example[1:])
)
self.assertEqual(
a[:1],
array.array(self.typecode, self.example[:1])
)
self.assertEqual(
a[:-1],
array.array(self.typecode, self.example[:-1])
)
self.assertEqual(
a[-1:],
array.array(self.typecode, self.example[-1:])
)
self.assertEqual(
a[-1:-1],
array.array(self.typecode)
)
self.assertEqual(
a[2:1],
array.array(self.typecode)
)
self.assertEqual(
a[1000:],
array.array(self.typecode)
)
self.assertEqual(a[-1000:], a)
self.assertEqual(a[:1000], a)
self.assertEqual(
a[:-1000],
array.array(self.typecode)
)
self.assertEqual(a[-1000:1000], a)
self.assertEqual(
a[2000:1000],
array.array(self.typecode)
)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing
# (Assumes list conversion works correctly, too)
a = array.array(self.typecode, self.example)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
self.assertEqual(list(a[start:stop:step]),
list(a)[start:stop:step])
def test_setslice(self):
a = array.array(self.typecode, self.example)
a[:1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[:-1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[-1:])
)
a = array.array(self.typecode, self.example)
a[-1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:-1] = a
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:1] + self.example + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a[1000:] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[-1000:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:1000] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:-1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[1:0] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[2000:1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setslice__, 0, 0, None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__setslice__, 0, 0, b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
a = array.array(self.typecode, self.example)
L = list(a)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
a[start:stop:step] = array.array(self.typecode, data)
self.assertEqual(a, array.array(self.typecode, L))
del L[start:stop:step]
del a[start:stop:step]
self.assertEqual(a, array.array(self.typecode, L))
def test_index(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.index)
for x in example:
self.assertEqual(a.index(x), example.index(x))
self.assertRaises(ValueError, a.index, None)
self.assertRaises(ValueError, a.index, self.outside)
def test_count(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.count)
for x in example:
self.assertEqual(a.count(x), example.count(x))
self.assertEqual(a.count(self.outside), 0)
self.assertEqual(a.count(None), 0)
def test_remove(self):
for x in self.example:
example = 2*self.example
a = array.array(self.typecode, example)
pos = example.index(x)
example2 = example[:pos] + example[pos+1:]
a.remove(x)
self.assertEqual(a, array.array(self.typecode, example2))
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.remove, self.outside)
self.assertRaises(ValueError, a.remove, None)
def test_pop(self):
a = array.array(self.typecode)
self.assertRaises(IndexError, a.pop)
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.pop, 42, 42)
self.assertRaises(TypeError, a.pop, None)
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
self.assertEntryEqual(a.pop(0), self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:]+self.example)
)
self.assertEntryEqual(a.pop(1), self.example[2])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
)
self.assertEntryEqual(a.pop(0), self.example[1])
self.assertEntryEqual(a.pop(), self.example[-1])
self.assertEqual(
a,
array.array(self.typecode, self.example[3:]+self.example[:-1])
)
def test_reverse(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.reverse, 42)
a.reverse()
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1])
)
def test_extend(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.extend)
a.extend(array.array(self.typecode, self.example[::-1]))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
a = array.array(self.typecode, self.example)
a.extend(a)
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.extend, b)
a = array.array(self.typecode, self.example)
a.extend(self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
def test_constructor_with_iterable_argument(self):
a = array.array(self.typecode, iter(self.example))
b = array.array(self.typecode, self.example)
self.assertEqual(a, b)
# non-iterable argument
self.assertRaises(TypeError, array.array, self.typecode, 10)
# pass through errors raised in __iter__
class A:
def __iter__(self):
raise UnicodeError
self.assertRaises(UnicodeError, array.array, self.typecode, A())
# pass through errors raised in next()
def B():
raise UnicodeError
yield None
self.assertRaises(UnicodeError, array.array, self.typecode, B())
def test_coveritertraverse(self):
try:
import gc
except ImportError:
return
a = array.array(self.typecode)
l = [iter(a)]
l.append(l)
gc.collect()
def test_buffer(self):
a = array.array(self.typecode, self.example)
with test_support.check_py3k_warnings():
b = buffer(a)
self.assertEqual(b[0], a.tostring()[0])
def test_weakref(self):
s = array.array(self.typecode, self.example)
p = proxy(s)
self.assertEqual(p.tostring(), s.tostring())
s = None
self.assertRaises(ReferenceError, len, p)
def test_bug_782369(self):
import sys
if hasattr(sys, "getrefcount"):
for i in range(10):
b = array.array('B', range(64))
rc = sys.getrefcount(10)
for i in range(10):
b = array.array('B', range(64))
self.assertEqual(rc, sys.getrefcount(10))
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
with warnings.catch_warnings():
warnings.filterwarnings("ignore", '', DeprecationWarning)
ArraySubclassWithKwargs('b', newarg=1)
class StringTest(BaseTest):
def test_setitem(self):
super(StringTest, self).test_setitem()
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, 0, self.example[:2])
class CharacterTest(StringTest):
typecode = 'c'
example = '\x01azAZ\x00\xfe'
smallerexample = '\x01azAY\x00\xfe'
biggerexample = '\x01azAZ\x00\xff'
outside = '\x33'
minitemsize = 1
def test_subbclassing(self):
class EditableString(array.array):
def __new__(cls, s, *args, **kwargs):
return array.array.__new__(cls, 'c', s)
def __init__(self, s, color='blue'):
self.color = color
def strip(self):
self[:] = array.array('c', self.tostring().strip())
def __repr__(self):
return 'EditableString(%r)' % self.tostring()
s = EditableString("\ttest\r\n")
s.strip()
self.assertEqual(s.tostring(), "test")
self.assertEqual(s.color, "blue")
s.color = "red"
self.assertEqual(s.color, "red")
self.assertEqual(s.__dict__.keys(), ["color"])
def test_nounicode(self):
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.fromunicode, unicode(''))
self.assertRaises(ValueError, a.tounicode)
tests.append(CharacterTest)
if test_support.have_unicode:
class UnicodeTest(StringTest):
typecode = 'u'
example = unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')
smallerexample = unicode(r'\x01\u263a\x00\ufefe', 'unicode-escape')
biggerexample = unicode(r'\x01\u263a\x01\ufeff', 'unicode-escape')
outside = unicode('\x33')
minitemsize = 2
def test_unicode(self):
self.assertRaises(TypeError, array.array, 'b', unicode('foo', 'ascii'))
a = array.array('u', unicode(r'\xa0\xc2\u1234', 'unicode-escape'))
a.fromunicode(unicode(' ', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode(r'\x11abc\xff\u1234', 'unicode-escape'))
s = a.tounicode()
self.assertEqual(
s,
unicode(r'\xa0\xc2\u1234 \x11abc\xff\u1234', 'unicode-escape')
)
s = unicode(r'\x00="\'a\\b\x80\xff\u0000\u0001\u1234', 'unicode-escape')
a = array.array('u', s)
self.assertEqual(
repr(a),
r"""array('u', u'\x00="\'a\\b\x80\xff\x00\x01\u1234')"""
)
self.assertRaises(TypeError, a.fromunicode)
tests.append(UnicodeTest)
class NumberTest(BaseTest):
def test_extslice(self):
a = array.array(self.typecode, range(5))
self.assertEqual(a[::], a)
self.assertEqual(a[::2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1::2], array.array(self.typecode, [1,3]))
self.assertEqual(a[::-1], array.array(self.typecode, [4,3,2,1,0]))
self.assertEqual(a[::-2], array.array(self.typecode, [4,2,0]))
self.assertEqual(a[3::-2], array.array(self.typecode, [3,1]))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1000:2000:2], array.array(self.typecode, []))
self.assertEqual(a[-1000:-2000:-2], array.array(self.typecode, []))
def test_delslice(self):
a = array.array(self.typecode, range(5))
del a[::2]
self.assertEqual(a, array.array(self.typecode, [1,3]))
a = array.array(self.typecode, range(5))
del a[1::2]
self.assertEqual(a, array.array(self.typecode, [0,2,4]))
a = array.array(self.typecode, range(5))
del a[1::-2]
self.assertEqual(a, array.array(self.typecode, [0,2,3,4]))
a = array.array(self.typecode, range(10))
del a[::1000]
self.assertEqual(a, array.array(self.typecode, [1,2,3,4,5,6,7,8,9]))
# test issue7788
a = array.array(self.typecode, range(10))
del a[9::1<<333]
def test_assignment(self):
a = array.array(self.typecode, range(10))
a[::2] = array.array(self.typecode, [42]*5)
self.assertEqual(a, array.array(self.typecode, [42, 1, 42, 3, 42, 5, 42, 7, 42, 9]))
a = array.array(self.typecode, range(10))
a[::-4] = array.array(self.typecode, [10]*3)
self.assertEqual(a, array.array(self.typecode, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = array.array(self.typecode, range(4))
a[::-1] = a
self.assertEqual(a, array.array(self.typecode, [3, 2, 1, 0]))
a = array.array(self.typecode, range(10))
b = a[:]
c = a[:]
ins = array.array(self.typecode, range(2))
a[2:3] = ins
b[slice(2,3)] = ins
c[2:3:] = ins
def test_iterationcontains(self):
a = array.array(self.typecode, range(10))
self.assertEqual(list(a), range(10))
b = array.array(self.typecode, [20])
self.assertEqual(a[-1] in a, True)
self.assertEqual(b[0] not in a, True)
def check_overflow(self, lower, upper):
# method to be used by subclasses
# should not overflow assigning lower limit
a = array.array(self.typecode, [lower])
a[0] = lower
# should overflow assigning less than lower limit
self.assertRaises(OverflowError, array.array, self.typecode, [lower-1])
self.assertRaises(OverflowError, a.__setitem__, 0, lower-1)
# should not overflow assigning upper limit
a = array.array(self.typecode, [upper])
a[0] = upper
# should overflow assigning more than upper limit
self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
def test_subclassing(self):
typecode = self.typecode
class ExaggeratingArray(array.array):
__slots__ = ['offset']
def __new__(cls, typecode, data, offset):
return array.array.__new__(cls, typecode, data)
def __init__(self, typecode, data, offset):
self.offset = offset
def __getitem__(self, i):
return array.array.__getitem__(self, i) + self.offset
a = ExaggeratingArray(self.typecode, [3, 6, 7, 11], 4)
self.assertEntryEqual(a[0], 7)
self.assertRaises(AttributeError, setattr, a, "color", "blue")
class SignedNumberTest(NumberTest):
example = [-1, 0, 1, 42, 0x7f]
smallerexample = [-1, 0, 1, 42, 0x7e]
biggerexample = [-1, 0, 1, 43, 0x7f]
outside = 23
def test_overflow(self):
a = array.array(self.typecode)
lower = -1 * long(pow(2, a.itemsize * 8 - 1))
upper = long(pow(2, a.itemsize * 8 - 1)) - 1L
self.check_overflow(lower, upper)
class UnsignedNumberTest(NumberTest):
example = [0, 1, 17, 23, 42, 0xff]
smallerexample = [0, 1, 17, 23, 42, 0xfe]
biggerexample = [0, 1, 17, 23, 43, 0xff]
outside = 0xaa
def test_overflow(self):
a = array.array(self.typecode)
lower = 0
upper = long(pow(2, a.itemsize * 8)) - 1L
self.check_overflow(lower, upper)
class ByteTest(SignedNumberTest):
typecode = 'b'
minitemsize = 1
tests.append(ByteTest)
class UnsignedByteTest(UnsignedNumberTest):
typecode = 'B'
minitemsize = 1
tests.append(UnsignedByteTest)
class ShortTest(SignedNumberTest):
typecode = 'h'
minitemsize = 2
tests.append(ShortTest)
class UnsignedShortTest(UnsignedNumberTest):
typecode = 'H'
minitemsize = 2
tests.append(UnsignedShortTest)
class IntTest(SignedNumberTest):
typecode = 'i'
minitemsize = 2
tests.append(IntTest)
class UnsignedIntTest(UnsignedNumberTest):
typecode = 'I'
minitemsize = 2
tests.append(UnsignedIntTest)
class LongTest(SignedNumberTest):
typecode = 'l'
minitemsize = 4
tests.append(LongTest)
class UnsignedLongTest(UnsignedNumberTest):
typecode = 'L'
minitemsize = 4
tests.append(UnsignedLongTest)
class FPTest(NumberTest):
example = [-42.0, 0, 42, 1e5, -1e10]
smallerexample = [-42.0, 0, 42, 1e5, -2e10]
biggerexample = [-42.0, 0, 42, 1e5, 1e10]
outside = 23
def assertEntryEqual(self, entry1, entry2):
self.assertAlmostEqual(entry1, entry2)
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
# On alphas treating the byte swapped bit patters as
# floats/doubles results in floating point exceptions
# => compare the 8bit string values instead
self.assertNotEqual(a.tostring(), b.tostring())
b.byteswap()
self.assertEqual(a, b)
class FloatTest(FPTest):
typecode = 'f'
minitemsize = 4
tests.append(FloatTest)
class DoubleTest(FPTest):
typecode = 'd'
minitemsize = 8
def test_alloc_overflow(self):
from sys import maxsize
a = array.array('d', [-1]*65536)
try:
a *= maxsize//65536 + 1
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
b = array.array('d', [ 2.71828183, 3.14159265, -1])
try:
b * (maxsize//3 + 1)
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
tests.append(DoubleTest)
def test_main(verbose=None):
import sys
test_support.run_unittest(*tests)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*tests)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| 33.152866 | 92 | 0.574393 |
acf5b92416a45889b5c3627026125ed39a253cf0 | 3,250 | py | Python | app.py | kenzedoo/dice-game | fe44d868888115aad15b88aefe4e2d9978eab7c6 | [
"Apache-2.0"
] | null | null | null | app.py | kenzedoo/dice-game | fe44d868888115aad15b88aefe4e2d9978eab7c6 | [
"Apache-2.0"
] | null | null | null | app.py | kenzedoo/dice-game | fe44d868888115aad15b88aefe4e2d9978eab7c6 | [
"Apache-2.0"
] | null | null | null | import random
# GitHub Access Token: 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
authed_players = ["Tom", "Mac", "Seth", "Kyle"] #Array
winner_name = "" #String
players = {} #Map
num_of_games = 5 #Int
total_scores = {} #Map
# function to sign in user
def sign_in():
username = input("What is your name player: ").capitalize()
if username in authed_players:
players[username] = []
print("%s logged in" % username)
else:
print("invalid sign in")
sign_in()
# function to initialise game for current user
def play_game(current_player):
print("Do you want to roll {}? (Y/n)".format(current_player))
player_input = input()
if player_input.lower() == "y":
players[current_player].append(roll_dice(current_player))
else:
print("Too bad sucker")
players[current_player].append(roll_dice(current_player))
# function to roll dice for current user
def roll_dice(current_player):
score = 0
dice_roll1 = random.randint(1, 6)
dice_roll2 = random.randint(1, 6)
dice_roll3 = random.randint(1, 6)
dice_score = dice_roll1 + dice_roll2
print("Roll1 {} Roll2 {}".format(dice_roll1, dice_roll2))
if dice_score % 2 == 0:
score += (dice_score + 10)
print("+10 points")
if dice_score % 2 != 0:
score += (dice_score - 5)
print("-5 points")
if dice_score < 0:
score = 0
print("Score is negative value, setting to 0")
if dice_roll1 == dice_roll2:
score += dice_roll3
print("Double trouble adding {} to score".format(dice_roll3))
print("{} has scored: [{}]".format(current_player, score))
return score
# keep looping sign_in function while keys in players map is less than 2
while len(players.keys()) < 2:
sign_in()
# keep looping games for num_of_games for each player
for game in range(num_of_games):
print("Starting game: {}".format(game+1))
for player in players.keys():
play_game(player)
# build map of total scores for each user
for player in players.keys():
total_scores[player] = sum(players[player])
# get winning scores
winners = [key for m in [max(total_scores.values())] for key, val in total_scores.items() if val == m]
# if winners are greater than 1 then game was a draw else print winner
if len(winners) > 1:
print("Its time to d d d d d d d duel")
duel_scores = 0
current_player = []
for player in players.keys():
current_player.append(player)
roll = random.randint(1, 6)
print("{} has rolled {}".format(player, roll))
if duel_scores != 0:
if duel_scores == roll:
print("The game is a draw!")
elif duel_scores < roll:
print("The winner is {} with a score of {}".format(player, total_scores[player]))
else:
print("The winner is {} with a score of {}".format(current_player[0], total_scores[current_player[0]]))
duel_scores = roll
else:
with open("scores.txt", "a") as file:
print("The winner is {} with a score of {}".format(winners[0], total_scores[winners[0]]))
file.write("{},{}\n".format(winners[0], total_scores[winners[0]]))
file.close()
| 30.952381 | 119 | 0.630462 |
acf5b9a235f63ae4c7a3471295588cfd87a1a45e | 2,284 | py | Python | web/formatting.py | JoshKarpel/chtc-bot | 84e2e8ab01b8f162253b5d90a8b54a7c8de5e424 | [
"Apache-2.0"
] | null | null | null | web/formatting.py | JoshKarpel/chtc-bot | 84e2e8ab01b8f162253b5d90a8b54a7c8de5e424 | [
"Apache-2.0"
] | 48 | 2020-04-15T16:22:47.000Z | 2020-08-28T18:02:37.000Z | web/formatting.py | JoshKarpel/chtc-bot | 84e2e8ab01b8f162253b5d90a8b54a7c8de5e424 | [
"Apache-2.0"
] | null | null | null | def plural(collection):
"""Return an 's' if the collection has any number of elements but 1."""
return "" if len(collection) == 1 else "s"
def bold(text):
"""Wrap text in markdown bold text markers."""
return f"*{text}*"
def italic(text):
"""Wrap text in markdown italic text markers."""
return f"_{text}_"
def fixed(text):
"""Wrap text in markdown inline fixed text markers."""
return f"`{text}`"
def fixed_block(text):
"""Wrap text in markdown block fixed text markers."""
return f"```\n{text}\n```"
def link(url, text=None):
"""Construct a Slack-encoded link. If text is None, the raw URL is returned."""
if text is not None:
return f"<{url}|{text}>"
else:
return url
def compress_whitespace(text):
"""Convert all whitespaces into a single space between words."""
return " ".join(text.split()).replace("<br />", "\n")
def inplace_convert_em_to_underscores(soup, selector="em"):
for em in soup.find_all(selector):
em.string = italic(em.string)
em.unwrap()
def inplace_convert_inline_code_to_backticks(
soup, selector="code.docutils.literal.notranslate > span.pre"
):
for span in soup.select(selector):
span.string = fixed(span.string)
span.parent.unwrap()
span.unwrap()
def inplace_convert_strong_to_stars(soup, selector="strong"):
for em in soup.find_all(selector):
em.string = bold(em.string)
em.unwrap()
def inplace_convert_internal_links_to_links(soup, base_url, inner_span_classes):
for span in soup.select(f"a.reference.internal > span.{inner_span_classes}"):
href = span.parent.get("href")
url = f"{base_url.rstrip('/')}{'/' if not href.startswith('#') else ''}{href}"
span.string = link(url, span.string)
span.parent.unwrap()
span.unwrap()
def inplace_convert_code_block_to_code_block(
soup, selector="div.highlight-default.notranslate > div.highlight > pre"
):
codes = soup.select(selector)
for code in codes:
code.replace_with(f"```{code.text}```".replace("\n", "<br />"))
def inplace_convert_external_links_to_links(soup):
for href in soup.select("a"):
href.string = link(href.get("href"), href.text)
href.unwrap()
| 28.197531 | 86 | 0.648862 |
acf5b9eb9dbefd3081b97bb9c73ea42b2920fb87 | 4,417 | py | Python | mysmartusb.py | mbuesch/avrmakelib | 6a3bbe97f0fb98272a5b3e33b066e1e4325b00b4 | [
"Apache-2.0",
"MIT"
] | null | null | null | mysmartusb.py | mbuesch/avrmakelib | 6a3bbe97f0fb98272a5b3e33b066e1e4325b00b4 | [
"Apache-2.0",
"MIT"
] | null | null | null | mysmartusb.py | mbuesch/avrmakelib | 6a3bbe97f0fb98272a5b3e33b066e1e4325b00b4 | [
"Apache-2.0",
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
# Copyright (C) 2012-2021 Michael Buesch <m@bues.ch>
#
# Licensed under the Apache License version 2.0
# or the MIT license, at your option.
# SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import sys
import getopt
import time
try:
from serial import *
except ImportError:
print("ERROR: pyserial module not available.")
print("On Debian Linux please do: apt-get install python3-serial")
sys.exit(1)
def str2bool(string):
if string.lower() in ("true", "on", "yes"):
return True
if string.lower() in ("false", "off", "no"):
return False
try:
return bool(int(string))
except ValueError as e:
pass
return False
def hexdump(data):
ret = []
for c in data:
ret.append("%02X" % c)
return "".join(ret)
class MySmartUsbError(Exception): pass
class MySmartUsb(object):
PREFIX = b"\xE6\xB5\xBA\xB9\xB2\xB3\xA9"
MODE_PROG = b'p'
MODE_DATA = b'd'
MODE_QUIET = b'q'
def __init__(self, ttyDev, debug=False):
self.debug = debug
self.serial = Serial(ttyDev, 19200, 8, PARITY_NONE, 1)
self.serial.flushInput()
self.serial.flushOutput()
def resetBoard(self):
self.__sendCmd(b'r')
def resetProg(self):
self.__sendCmd(b'R')
def power(self, on):
self.__sendCmd(b'+' if on else b'-')
def setMode(self, mode):
if self.getMode() == mode:
return
self.__sendCmd(mode)
time.sleep(0.5)
if self.getMode() == mode:
return
raise MySmartUsbError("Failed to set mode")
def getMode(self):
return self.__sendCmd(b'i')
def getVersion(self):
b = self.__sendCmd(b'v')
try:
return b.decode('ASCII')
except UnicodeError:
raise MySmartUsbError("Failed to read version string")
def close(self):
self.serial.flush()
self.serial.close()
def __sendCmd(self, cmd):
data = self.PREFIX + cmd
if self.debug:
print("Sending command: " + hexdump(data))
self.serial.write(data)
self.serial.flush()
if cmd == b'R':
return
if cmd == b'v':
time.sleep(0.25)
count = self.serial.inWaiting()
ret = self.serial.read(count)
ret = ret[ret.find(b'\xF7')+1:]
ret = ret[:ret.find(b'\xF7')]
return ret
ret = self.serial.read(5)
while ret[0:2] != b"\xF7\xB1" or ret[3:5] != b"\x0D\x0A":
ret = ret[1:] + self.serial.read(1)
if self.debug:
print("Command returned: " + hexdump(ret))
if cmd != b'i' and ret[2] != ord(cmd):
raise MySmartUsbError(
"Invalid command return: %02X" %\
(ret[2]))
return bytes( (ret[2], ) )
def usage():
print("mysmartusb [OPTIONS] /dev/ttyUSBx")
print("")
print("Options:")
print(" -r|--reset-board Reset the board")
print(" -R|--reset-prog Reset the programmer")
print(" -p|--power 1/0 Turn on board power on/off")
print(" -m|--mode p/d/q Enter progmode/datamode/quietmode")
print(" -V|--getversion Read and display the mysmartusb version")
print("")
print(" -D|--debug Enable debugging")
def main():
actions = []
debug = False
try:
(opts, args) = getopt.getopt(sys.argv[1:],
"hrRp:m:VD",
[ "help", "reset-board", "reset-prog", "prog=",
"mode=", "getversion", "debug", ])
except getopt.GetoptError:
usage()
return 1
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-r", "--reset-board"):
actions.append( ("reset-board",) )
if o in ("-R", "--reset-prog"):
actions.append( ("reset-prog",) )
if o in ("-p", "--power"):
actions.append( ("power", str2bool(v)) )
if o in ("-m", "--mode"):
if v.lower() == "p":
mode = MySmartUsb.MODE_PROG
elif v.lower() == "d":
mode = MySmartUsb.MODE_DATA
elif v.lower() == "q":
mode = MySmartUsb.MODE_QUIET
else:
print("Invalid mode: " + v)
return 1
actions.append( ("mode", mode) )
if o in ("-V", "--getversion"):
actions.append( ("getversion",) )
if o in ("-D", "--debug"):
debug = True
if len(args) != 1:
usage()
return 1
dev = args[0]
try:
msu = MySmartUsb(dev, debug)
for action in actions:
if action[0] == "reset-board":
msu.resetBoard()
elif action[0] == "reset-prog":
msu.resetProg()
elif action[0] == "power":
msu.power(action[1])
elif action[0] == "mode":
msu.setMode(action[1])
elif action[0] == "getversion":
print(msu.getVersion())
else:
assert(0)
msu.close()
except MySmartUsbError as e:
print("ERROR: " + str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| 23.620321 | 75 | 0.617161 |
acf5b9ffc61864a189310247e640d8fa1fd880f9 | 5,667 | py | Python | sdk/compute/azure-mgmt-imagebuilder/azure/mgmt/imagebuilder/operations/_operations.py | jalauzon-msft/azure-sdk-for-python | 15967f5c6d3376f2334a382486ba86339786e028 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/compute/azure-mgmt-imagebuilder/azure/mgmt/imagebuilder/operations/_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-imagebuilder/azure/mgmt/imagebuilder/operations/_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-02-14") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.VirtualMachineImages/operations")
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.imagebuilder.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.OperationListResult"]:
"""Lists available operations for the Microsoft.VirtualMachineImages provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.imagebuilder.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-14") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.VirtualMachineImages/operations"} # type: ignore
| 39.354167 | 133 | 0.653256 |
acf5bab9c973cf252d7f60dce07b15fd2680287e | 2,862 | py | Python | kemlglearn/cluster/PIC.py | gonzalorecio/kemlglearn | 146fa3e899a8ec661bfbcceaba289ee7d80ed96b | [
"MIT"
] | null | null | null | kemlglearn/cluster/PIC.py | gonzalorecio/kemlglearn | 146fa3e899a8ec661bfbcceaba289ee7d80ed96b | [
"MIT"
] | null | null | null | kemlglearn/cluster/PIC.py | gonzalorecio/kemlglearn | 146fa3e899a8ec661bfbcceaba289ee7d80ed96b | [
"MIT"
] | null | null | null | import numpy as np
from scipy.stats import mode
from sklearn.cluster import KMeans
from scipy.spatial import distance
from sklearn.neighbors import kneighbors_graph
import matplotlib.pyplot as plt
class PowerIterationClustering:
labels_ = None
cluster_centroids_ = None
embedding_ = None
def __init__(self, n_clusters, max_iter=500):
self.n_clusters = n_clusters
self.max_iter = max_iter
def fit(self, X, affinity='rbf', k_nn=10):
"""Compute Power Iteration Clustering (PIC).
Parameters
----------
X : array-like, shape=[n_samples, n_features]
affinity: similarity metric for the affinity matrix
"""
if affinity == 'rbf':
A = np.exp(-distance.cdist(X, X, 'seuclidean')
** 2/(2*np.var(X))).astype(np.float64)
elif affinity == 'n_neighbors':
A = kneighbors_graph(X, k_nn, mode='distance',
include_self=True).toarray()
else:
A = 1.0 - distance.cdist(X, X, metric=affinity).astype(np.float64)
# D = np.zeros(A.shape)
# np.fill_diagonal(D, np.sum(A, axis=1))
# W = np.linalg.inv(D) @ A
arr = 1/np.sum(A, axis=1)
W = A*arr.reshape((len(arr), 1))
v0 = np.sum(A, axis=1) / np.sum(A)
# Run Power Iteration method
embedding = self.PI(v0, W)
self.embedding_ = embedding
# Copute K-Means on the embedding computed from PI
kmeans = KMeans(n_clusters=self.n_clusters).fit(
embedding.reshape(-1, 1))
self.labels_ = kmeans.labels_
self.cluster_centroids_ = kmeans.cluster_centers_
return self
def PI(self, v, W):
""" Power Iteration method
v - Initial iteration vector
W - Normalized affinity matrix
"""
tol = 1e-5
epsilon = 1e-5/len(W)
delta = float('inf')
d_prev = float('inf')
v_prev = v.copy()
for _ in range(self.max_iter):
aux = W@v
v = aux / np.linalg.norm(aux, ord=2)
delta = np.linalg.norm(v-v_prev, ord=1)
if abs(delta-d_prev) < epsilon:
break
v_prev = v.copy()
d_prev = delta.copy()
return v
def plot_embedding(self, y_true, ax=None):
assert self.embedding_ is not None
v = self.embedding_
if ax:
ax.scatter(range(len(v)), v, c=y_true)
ax.set_ylim(top=sorted(v)[-1], bottom=sorted(v)[0])
plt.tight_layout()
else:
plt.scatter(range(len(v)), v, c=y_true)
plt.ylim(top=sorted(v)[-1], bottom=sorted(v)[0])
plt.tight_layout()
plt.show()
def fit_predict(self, X, similarity='euclidean'):
return self.fit(X, similarity).labels_
| 32.896552 | 78 | 0.561495 |
acf5bd2c8e121293831eeaad0aa42d57d1df2738 | 1,927 | py | Python | preprocess.py | Fuad-Hellscream/PatternCNN | 917345c72d4cafa0b3dc824016ac1e38039beb86 | [
"Apache-2.0"
] | 64 | 2017-04-06T11:04:22.000Z | 2022-03-04T12:52:34.000Z | preprocess.py | Fuad-Hellscream/PatternCNN | 917345c72d4cafa0b3dc824016ac1e38039beb86 | [
"Apache-2.0"
] | 2 | 2019-02-23T18:54:22.000Z | 2019-11-09T01:30:32.000Z | preprocess.py | Fuad-Hellscream/PatternCNN | 917345c72d4cafa0b3dc824016ac1e38039beb86 | [
"Apache-2.0"
] | 35 | 2019-02-08T02:00:31.000Z | 2022-03-01T23:17:00.000Z | '''Dataset preprocessing.'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import concurrent.futures
import os
import numpy as np
import wavenet.utils as utils
BATCH = 10240
RATE = 8000
CHUNK = 1600
def split_into(data, n):
res = []
for i in range(n):
res.append(data[i::n])
return res
def process_files(files, id, output, rate, chunk_length, batch):
data = []
ofilename = os.path.join(output, 'vctk_{}'.format(id))
with open(ofilename, 'wb') as ofile:
for filename in files:
for chunk in utils._preprocess(filename, rate, chunk_length):
data.append(chunk)
if len(data) >= batch:
np.save(ofile, np.array(data))
data.clear()
np.save(ofile, np.array(data))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default=os.getcwd())
parser.add_argument('--output', type=str, default='')
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--rate', type=int, default=RATE)
parser.add_argument('--stacks_num', type=int, default=5)
parser.add_argument('--layers_num', type=int, default=10)
parser.add_argument('--target_length', type=int, default=CHUNK)
parser.add_argument('--flush_every', type=int, default=BATCH)
args = parser.parse_args()
files = list(utils.wav_files_in(args.data))
file_groups = split_into(files, args.workers)
size = utils.receptive_field_size(args.layers_num, args.stacks_num) + args.target_length
with concurrent.futures.ThreadPoolExecutor(max_workers=args.workers) as pool:
for i in range(args.workers):
pool.submit(process_files, file_groups[i], i, args.output, args.rate,
size, args.flush_every)
if __name__ == '__main__':
main()
| 30.109375 | 92 | 0.654385 |
acf5bd71450c2b7d168ba548bec981b18fdb6922 | 735 | py | Python | env_viewer.py | Healthcare-Robotics/assistive-vr-gym | 4b009843b3de36a8348c5447a054bcccb65b13fe | [
"MIT"
] | 9 | 2020-07-10T22:33:16.000Z | 2021-01-18T05:41:29.000Z | env_viewer.py | Healthcare-Robotics/assistive-vr-gym | 4b009843b3de36a8348c5447a054bcccb65b13fe | [
"MIT"
] | null | null | null | env_viewer.py | Healthcare-Robotics/assistive-vr-gym | 4b009843b3de36a8348c5447a054bcccb65b13fe | [
"MIT"
] | 3 | 2020-09-12T07:37:29.000Z | 2022-02-22T08:12:06.000Z | import gym, sys, argparse
import numpy as np
import assistive_gym
if sys.version_info < (3, 0):
print('Please use Python 3')
exit()
parser = argparse.ArgumentParser(description='Assistive Gym Environment Viewer')
parser.add_argument('--env', default='ScratchItchJaco-v0',
help='Environment to test (default: ScratchItchJaco-v0)')
args = parser.parse_args()
env = gym.make(args.env)
while True:
done = False
env.render()
observation = env.reset()
action = env.action_space.sample()
print('Observation size:', np.shape(observation), 'Action size:', np.shape(action))
while not done:
env.render()
observation, reward, done, info = env.step(env.action_space.sample()) | 30.625 | 87 | 0.681633 |
acf5bdf82277bfc81ae9d52b726ab41bc773a634 | 5,576 | py | Python | acquire/measurement/views_startstop.py | Youngcius/quagent | 1c452f21913d5f3eb972af6a2911f7944e72dca8 | [
"Apache-2.0"
] | 1 | 2022-03-21T02:04:07.000Z | 2022-03-21T02:04:07.000Z | acquire/measurement/views_startstop.py | Youngcius/quagent | 1c452f21913d5f3eb972af6a2911f7944e72dca8 | [
"Apache-2.0"
] | null | null | null | acquire/measurement/views_startstop.py | Youngcius/quagent | 1c452f21913d5f3eb972af6a2911f7944e72dca8 | [
"Apache-2.0"
] | null | null | null | """
Data acquisition in StartStop mode
"""
from django.shortcuts import render
from django.http import HttpRequest, HttpResponse, JsonResponse, FileResponse, Http404
from django.http import HttpResponseServerError # 5xx error
from django.views.decorators.csrf import csrf_exempt
import uuid
import datetime
from pyecharts import options as opts
from pyecharts import charts
import copy
from ..utils import *
from ..models import *
from ..globvar import tagger, usr_stsp_map
from jinja2 import Environment, FileSystemLoader
from pyecharts.globals import CurrentConfig
CurrentConfig.GLOBAL_ENV = Environment(loader=FileSystemLoader("./template/pyecharts"))
def startstop_page(request):
"""
Data acquisition and visualization page for Correlation measurement mode
"""
global tagger
if tagger is None:
try:
tagger = tt.createTimeTagger()
except RuntimeError:
return HttpResponseServerError('Sorry, The Time Tagger on server is not available now!')
# query the global routing table
avail_channels = get_avail_ch(request.user) # e.g. [1, 3], or [5,7,8]
if len(avail_channels) == 0:
return HttpResponseServerError(
'There is not available detection channel(s) for you.\nYou should book some of them first.')
else:
return render(request, 'measurement/startstop.html', {'channels': avail_channels})
@csrf_exempt
def update_config(request):
"""
Update measurement parameters
---
- click channel
- start channel
- binwidth
"""
username = request.user.username
binwidth = int(request.POST.get('binwidth'))
ch_click, ch_start = int(request.POST.get('ch_click')), int(request.POST.get('ch_start'))
# create user-specific Correlation instance
user_start_stop = UserDetector(username, 'StartStop')
user_start_stop.set_measure_config(**{
'binwidth': binwidth,
'click_channel': ch_click,
'start_channel': ch_start,
})
user_start_stop.create_detector(tagger)
usr_stsp_map[username] = user_start_stop
return HttpResponse('update successfully')
def start(request):
"""
Start data acquisition and displaying
"""
username = request.user.username
if username in usr_stsp_map.keys():
usr_stsp_map[username].detector.start()
return HttpResponse('Has started the StartStop measurement instance')
else:
return HttpResponse('There is no StartStop measurement instance!')
def stop(request):
"""
Stop data acquisition and displaying
"""
username = request.user.username
if username in usr_stsp_map.keys():
usr_stsp_map[username].detector.stop()
return HttpResponse('Has stopped the StartStop measurement instance')
else:
return HttpResponse('There is not StartStop measurement instance!')
def download(request):
"""
Download real-time data according to designated time interval
"""
username = request.user.username
if username in usr_stsp_map.keys():
stsp_config = usr_stsp_map[username].config
startstop = usr_stsp_map[username].detector
else:
return Http404('no StartStop instance running')
T = float(request.GET.get('T')) # unit: s
time.sleep(T)
data_with_config = copy.deepcopy(stsp_config)
data_with_config['binwidth'] /= 1e12 # ps --> s
data_with_config['username'] = username
data_with_config['measure-mode'] = 'StartStop'
# An array of tuples (array of shape Nx2)
data_with_config['time'] = T
data_with_config['data'] = startstop.getData().tolist()
data_with_config['timestamp'] = str(datetime.datetime.now())
response = FileResponse(json.dumps(data_with_config)) # dict --> str
response['Content-Type'] = 'application/octet-stream' # header information, to tell Web browser this is a file
filename = '_'.join(['data', username, str(datetime.date.today()), str(uuid.uuid1()) + '.json'])
response['Content-Disposition'] = 'attachment;filename={}'.format(filename)
return response
def startstop_fig(username: str, x_unit: str = 'ps') -> str:
"""
An array of tuples (array of shape Nx2) containing the times (in ps) and counts of each bin
:param username: to acquire user-specific measurement instance
:param x_unit: to set x-axis unit, e.g., ns
"""
hist = charts.Bar()
x_scale = 1e-12 / unit_to_num[x_unit]
ymax, ymin = None, None
if username in usr_stsp_map.keys():
startstop = usr_stsp_map[username].detector
if startstop.isRunning():
data = startstop.getData() # size [N ,2]
# ymax, ymin = cal_max_min_limits(data[:, 1]) data长度可能不超过1
vals = np.abs(np.random.randn(len(data))) # TODO, delete this
hist.add_xaxis((data[:, 0] * x_scale).tolist())
hist.add_yaxis(
series_name='time difference',
# y_axis=data[:, 1].tolist(),
y_axis=vals.tolist(), # TODO, delete this
label_opts=opts.LabelOpts(is_show=False)
)
hist.set_global_opts(
title_opts=opts.TitleOpts(title='StartStop Counting'),
xaxis_opts=opts.AxisOpts(name='Time ({})'.format(x_unit)),
yaxis_opts=opts.AxisOpts(type_='value', name='Count', min_=ymin, max_=ymax)
)
fig_str = hist.dump_options_with_quotes()
return fig_str
def startstop_chart_view(request):
return JsonResponse(json.loads(startstop_fig(
request.user.username,
request.POST.get('unit_name')
)))
| 35.069182 | 115 | 0.679161 |
acf5be7253dfb2f29d5de2566b1d05d772c4fbf1 | 307 | py | Python | feedback_form/app_settings.py | joebos/django-feedback-form | 522dd907c06b231b2c663ee9abe2d2c003ea10e1 | [
"MIT"
] | 4 | 2017-04-25T23:03:36.000Z | 2021-07-27T06:55:13.000Z | feedback_form/app_settings.py | joebos/django-feedback-form | 522dd907c06b231b2c663ee9abe2d2c003ea10e1 | [
"MIT"
] | 1 | 2021-06-10T23:15:16.000Z | 2021-06-10T23:15:16.000Z | feedback_form/app_settings.py | joebos/django-feedback-form | 522dd907c06b231b2c663ee9abe2d2c003ea10e1 | [
"MIT"
] | 3 | 2015-02-09T09:32:42.000Z | 2016-09-30T14:43:16.000Z | """Settings of the ``feedback_form``` application."""
from django.conf import settings
FEEDBACK_FORM_COLOR = getattr(settings, 'FEEDBACK_FORM_COLOR', '#6caec9')
FEEDBACK_FORM_TEXTCOLOR = getattr(settings, 'FEEDBACK_FORM_TEXTCOLOR', '#fff')
FEEDBACK_FORM_TEXT = getattr(settings, 'FEEDBACK_FORM_TEXT', '')
| 38.375 | 78 | 0.778502 |
acf5bf34b9a8585fb2777fc08c8b23223f474b70 | 9,995 | py | Python | detectron2/data/transforms/transform.py | tjdah1123/BBox | 3b037d2e5571e1bf5d91f22092491ae38466a5cc | [
"Apache-2.0"
] | null | null | null | detectron2/data/transforms/transform.py | tjdah1123/BBox | 3b037d2e5571e1bf5d91f22092491ae38466a5cc | [
"Apache-2.0"
] | null | null | null | detectron2/data/transforms/transform.py | tjdah1123/BBox | 3b037d2e5571e1bf5d91f22092491ae38466a5cc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# File: transform.py
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.transforms.transform import (
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
TransformList,
)
from PIL import Image
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
__all__ = ["ExtentTransform", "ResizeTransform", "RotationTransform"]
class ExtentTransform(Transform):
"""
Extracts a subregion from the source image and scales it to the output size.
The fill color is used to map pixels from the source rect that fall outside
the source image.
See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
"""
def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
"""
Args:
src_rect (x0, y0, x1, y1): src coordinates
output_size (h, w): dst image size
interp: PIL interpolation methods
fill: Fill color used when src_rect extends outside image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img, interp=None):
h, w = self.output_size
ret = Image.fromarray(img).transform(
size=(w, h),
method=Image.EXTENT,
data=self.src_rect,
resample=interp if interp else self.interp,
fill=self.fill,
)
return np.asarray(ret)
def apply_coords(self, coords):
# Transform image center from source coordinates into output coordinates
# and then map the new origin to the corner of the output image.
h, w = self.output_size
x0, y0, x1, y1 = self.src_rect
new_coords = coords.astype(np.float32)
new_coords[:, 0] -= 0.5 * (x0 + x1)
new_coords[:, 1] -= 0.5 * (y0 + y1)
new_coords[:, 0] *= w / (x1 - x0)
new_coords[:, 1] *= h / (y1 - y0)
new_coords[:, 0] += 0.5 * w
new_coords[:, 1] += 0.5 * h
return new_coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
class ResizeTransform(Transform):
"""
Resize the image to a target size.
"""
def __init__(self, h, w, new_h, new_w, interp=None):
"""
Args:
h, w (int): original image size
new_h, new_w (int): new image size
interp: PIL interpolation methods, defaults to bilinear.
"""
# TODO decide on PIL vs opencv
super().__init__()
if interp is None:
interp = Image.BILINEAR
self._set_attributes(locals())
def apply_image(self, img, interp=None):
assert img.shape[:2] == (self.h, self.w)
assert len(img.shape) <= 4
if img.dtype == np.uint8:
pil_image = Image.fromarray(img)
interp_method = interp if interp is not None else self.interp
pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
ret = np.asarray(pil_image)
else:
# PIL only supports uint8
if not torch.is_tensor(img):
img = torch.from_numpy(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
img = F.interpolate(img, (self.new_h, self.new_w), mode=mode, align_corners=False)
shape[:2] = (self.new_h, self.new_w)
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
def inverse(self):
return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
class RotationTransform(Transform):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around its center.
"""
def __init__(self, h, w, angle, expand=True, center=None, interp=None):
"""
Args:
h, w (int): original image size
angle (float): degrees for rotation
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (tuple (width, height)): coordinates of the rotation center
if left to None, the center will be fit to the center of each image
center has no effect if expand=True because it only affects shifting
interp: cv2 interpolation method, default cv2.INTER_LINEAR
"""
super().__init__()
image_center = np.array((w / 2, h / 2))
if center is None:
center = image_center
if interp is None:
interp = cv2.INTER_LINEAR
abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
if expand:
# find the new width and height bounds
bound_w, bound_h = np.rint(
[h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
).astype(int)
else:
bound_w, bound_h = w, h
self._set_attributes(locals())
self.rm_coords = self.create_rotation_matrix()
# Needed because of this problem https://github.com/opencv/opencv/issues/11784
self.rm_image = self.create_rotation_matrix(offset=-0.5)
def apply_image(self, img, interp=None):
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
if len(img) == 0 or self.angle % 360 == 0:
return img
assert img.shape[:2] == (self.h, self.w)
interp = interp if interp is not None else self.interp
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
def apply_coords(self, coords):
"""
coords should be a N * 2 array-like, containing N couples of (x, y) points
"""
coords = np.asarray(coords, dtype=float)
if len(coords) == 0 or self.angle % 360 == 0:
return coords
return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
return segmentation
def create_rotation_matrix(self, offset=0):
center = (self.center[0] + offset, self.center[1] + offset)
rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
if self.expand:
# Find the coordinates of the center of rotation in the new image
# The only point for which we know the future coordinates is the center of the image
rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
# shift the rotation center to the new coordinates
rm[:, 2] += new_center
return rm
def inverse(self):
"""
The inverse is to rotate it back with expand, and crop to get the original shape.
"""
if not self.expand: # Not possible to inverse if a part of the image is lost
raise NotImplementedError()
rotation = RotationTransform(
self.bound_h, self.bound_w, -self.angle, True, None, self.interp
)
crop = CropTransform(
(rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
)
return TransformList([rotation, crop])
def HFlip_rotated_box(transform, rotated_boxes):
"""
Apply the horizontal flip transform on rotated boxes.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
# Transform x_center
rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
# Transform angle
rotated_boxes[:, 4] = -rotated_boxes[:, 4]
return rotated_boxes
def Resize_rotated_box(transform, rotated_boxes):
"""
Apply the resizing transform on rotated boxes. For details of how these (approximation)
formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
scale_factor_x = transform.new_w * 1.0 / transform.w
scale_factor_y = transform.new_h * 1.0 / transform.h
rotated_boxes[:, 0] *= scale_factor_x
rotated_boxes[:, 1] *= scale_factor_y
theta = rotated_boxes[:, 4] * np.pi / 180.0
c = np.cos(theta)
s = np.sin(theta)
rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
return rotated_boxes
HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
ResizeTransform.register_type("rotated_box", Resize_rotated_box)
# not necessary any more with latest fvcore
NoOpTransform.register_type("rotated_box", lambda t, x: x)
| 37.716981 | 100 | 0.617109 |
acf5bf8754b97e397f2d77638e19e4ca60fd1dbc | 4,433 | py | Python | auth0/v3/test/management/test_clients.py | ozooxo/auth0-python | 9862afa717565b8d7880a29feec3281957f1bdf4 | [
"MIT"
] | null | null | null | auth0/v3/test/management/test_clients.py | ozooxo/auth0-python | 9862afa717565b8d7880a29feec3281957f1bdf4 | [
"MIT"
] | null | null | null | auth0/v3/test/management/test_clients.py | ozooxo/auth0-python | 9862afa717565b8d7880a29feec3281957f1bdf4 | [
"MIT"
] | null | null | null | import unittest
import mock
from ...management.clients import Clients
class TestClients(unittest.TestCase):
def test_init_with_optionals(self):
t = Clients(domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get("Auth0-Client", None)
self.assertEqual(telemetry_header, None)
@mock.patch("auth0.v3.management.clients.RestClient")
def test_all(self, mock_rc):
mock_instance = mock_rc.return_value
c = Clients(domain="domain", token="jwttoken")
# Default parameters are requested
c.all()
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/clients", args[0])
self.assertEqual(
kwargs["params"],
{"fields": None, "include_fields": "true", "page": None, "per_page": None},
)
# Fields filter
c.all(fields=["a", "b"], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/clients", args[0])
self.assertEqual(
kwargs["params"],
{
"fields": "a,b",
"include_fields": "false",
"page": None,
"per_page": None,
},
)
# Specific pagination
c.all(page=7, per_page=25)
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/clients", args[0])
self.assertEqual(
kwargs["params"],
{"fields": None, "include_fields": "true", "page": 7, "per_page": 25},
)
# Extra parameters
c.all(extra_params={"some_key": "some_value"})
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/clients", args[0])
self.assertEqual(
kwargs["params"],
{
"fields": None,
"include_fields": "true",
"page": None,
"per_page": None,
"some_key": "some_value",
},
)
@mock.patch("auth0.v3.management.clients.RestClient")
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = Clients(domain="domain", token="jwttoken")
c.create({"a": "b", "c": "d"})
mock_instance.post.assert_called_with(
"https://domain/api/v2/clients", data={"a": "b", "c": "d"}
)
@mock.patch("auth0.v3.management.clients.RestClient")
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = Clients(domain="domain", token="jwttoken")
c.get("this-id")
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/clients/this-id", args[0])
self.assertEqual(kwargs["params"], {"fields": None, "include_fields": "true"})
c.get("this-id", fields=["a", "b"], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/clients/this-id", args[0])
self.assertEqual(kwargs["params"], {"fields": "a,b", "include_fields": "false"})
@mock.patch("auth0.v3.management.clients.RestClient")
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
c = Clients(domain="domain", token="jwttoken")
c.delete("this-id")
mock_instance.delete.assert_called_with("https://domain/api/v2/clients/this-id")
@mock.patch("auth0.v3.management.clients.RestClient")
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
c = Clients(domain="domain", token="jwttoken")
c.update("this-id", {"a": "b", "c": "d"})
args, kwargs = mock_instance.patch.call_args
self.assertEqual("https://domain/api/v2/clients/this-id", args[0])
self.assertEqual(kwargs["data"], {"a": "b", "c": "d"})
@mock.patch("auth0.v3.management.clients.RestClient")
def test_rotate_secret(self, mock_rc):
mock_instance = mock_rc.return_value
c = Clients(domain="domain", token="jwttoken")
c.rotate_secret("this-id")
mock_instance.post.assert_called_with(
"https://domain/api/v2/clients/this-id/rotate-secret",
data={"id": "this-id"},
)
| 32.123188 | 88 | 0.588766 |
acf5bfdd35ff40ade215d75e98285aab10321c59 | 15,021 | py | Python | Scripts/SI_Cluster_script.py | nienkebrinkman/SS_MTI | 2632214f7df9caaa53d33432193ba0602470d21a | [
"BSD-3-Clause"
] | null | null | null | Scripts/SI_Cluster_script.py | nienkebrinkman/SS_MTI | 2632214f7df9caaa53d33432193ba0602470d21a | [
"BSD-3-Clause"
] | null | null | null | Scripts/SI_Cluster_script.py | nienkebrinkman/SS_MTI | 2632214f7df9caaa53d33432193ba0602470d21a | [
"BSD-3-Clause"
] | null | null | null | """
Interactive example to determine focal mechanism of the InSight station.
"""
__author__ = "Nienke Brinkman"
from os.path import join as pjoin
from os.path import exists as exist
from os import listdir as lsdir
import instaseis
import numpy as np
import matplotlib.pyplot as plt
import argparse
import toml
import mpi4py.MPI
from os import makedirs
import SS_MTI
import EventInterface
from SS_MTI import PostProcessing as _PostProcessing
def define_arguments():
helptext = "Determine focal mechanisms of Marsquake"
parser = argparse.ArgumentParser(description=helptext)
helptext = "Input toml file"
parser.add_argument("input_file", help=helptext)
return parser.parse_args()
if __name__ == "__main__":
Parallel = True
if Parallel:
print("Your inversion will be run in parallel")
# input_file = "/home/nienke/Documents/Research/SS_MTI/Input/TAYAK_BKE_tstar_update.toml"
args = define_arguments()
print(f"Inversion based on input file: {args.input_file}")
event_input = toml.load(args.input_file, _dict=dict)
save_folder = pjoin(
"/home/nienke/Data_2020/Test_2021/", args.input_file.split("/")[-1].strip(".toml")
)
if not exist(save_folder):
makedirs(save_folder)
# save_folder = "/home/nienke/Documents/Research/Data/MTI/Inversion/Result_2/Test/"
path = "/home/nienke/Data_2020/catalog"
# path = "/home/nienke/Documents/Research/Data/MTI/catalog"
path_to_inventory = pjoin(path, "inventory.xml")
path_to_catalog = pjoin(path, "catalog.xml")
""" Read the inventory and catalog file (the once that contain info about the marsquakes) """
inv = None # SS_MTI.DataGetter.read_inv(inv_path=path_to_inventory) # Inventory file
cat = SS_MTI.DataGetter.read_cat(cat_path=path_to_catalog) # Catalog file
""" Get the data into a list of obspy.Event objects """
events = SS_MTI.DataGetter.read_events_from_cat(
event_params=event_input,
cat=cat,
inv=inv,
local_folder=pjoin(save_folder, "event.mseed"),
host_name=None,
user_name=None,
remote_folder=None,
save_file_name=pjoin(save_folder, "event.mseed"),
)
""" Specify receiver """
lat_rec = 4.5 # 02384
lon_rec = 135.623447
rec = instaseis.Receiver(latitude=lat_rec, longitude=lon_rec)
""" """
depths = np.arange(5, 90, 3)
# depths = np.arange(29, 50, 3)
# depths = [59]
strikes = np.arange(0, 360, 15)
dips = np.arange(0, 91, 10)
rakes = np.arange(-180, 180, 15)
# strikes = [15.0116557194] # [132.395557582]
# dips = [59.551091053] # [51.9591191063]
# rakes = [-45.6275510954] # [-139.94976385]
# bazs = np.arange(0, 360, 20)
""" Define different velocity models"""
db_name_1 = "/opt/databases/TAYAK_15s_BKE"
npz_file_name_1 = "/home/nienke/Data_2020/npz_files/TAYAK_BKE.npz"
db_name_2 = "/opt/databases/TAYAK_shallow"
npz_file_name_2 = "/home/nienke/Data_2020/npz_files/TAYAK.npz"
# db_name_1 = "/mnt/marshost/instaseis2/databases/TAYAK_15s_BKE"
# npz_file_name_1 = "/home/nienke/Documents/Research/Data/npz_files/TAYAK_BKE.npz"
# db_name_2 = "/mnt/marshost/instaseis2/databases/TAYAK_shallow"
# npz_file_name_2 = "/home/nienke/Documents/Research/Data/npz_files/TAYAK.npz"
# db_name_3 = "/mnt/marshost/instaseis2/databases/TAYAK_1s_30km"
# npz_file_name_3 = "/home/nienke/Documents/Research/Data/npz_files/TAYAK_30km.npz"
db_names = [db_name_1] # , db_name_3, db_name_4, db_name_5]
npz_file_names = [npz_file_name_1]
""" Loop over events to invert for: """
event_nr = 0
for i, v in event_input.items():
event = events[event_nr]
print(event.name)
event_nr += 1
assert event.name == i, "Dictionary and events do not iterate correct"
# if event.name == "S0173a" or event.:
# pass
# else:
# continue
if event.name == "S0183a":
event.distance = 44.5
event.baz = 73
event.az = 253
event.latitude = 15.09
event.longitude = 179.59
elif event.name == "S0325a":
event.distance = 38.4
event.baz = 0.0
event.az = 180.0
from geographiclib.geodesic import Geodesic
radius = 3389.5
flattening = 0.0
dict = Geodesic(a=radius, f=flattening).ArcDirect(
lat1=rec.latitude,
lon1=rec.longitude,
azi1=event.baz,
a12=event.distance,
outmask=1929,
)
event.latitude = dict["lat2"]
event.longitude = dict["lon2"]
""" Define forward modeler """
forward_method = "INSTASEIS"
# db_path = v["db_path"]
# npz_file = v["npz_file"]
db_nr = 0
for db_path, npz_file in zip(db_names, npz_file_names):
db_nr += 0
# mnt_folder = "/mnt/marshost/"
# if not lsdir(mnt_folder):
# print(f"{mnt_folder} is still empty, mounting now...")
# SS_MTI.DataGetter.mnt_remote_folder(
# host_ip="marshost.ethz.ch",
# host_usr="sysop",
# remote_folder="/data/",
# mnt_folder=mnt_folder,
# )
db = instaseis.open_db(db_path)
# SS_MTI.DataGetter.unmnt_remote_folder(mnt_folder=mnt_folder)
""" Define misfit function """
misfit_method = "L2"
weights = v["weights"]
start_weight_len = v["start_weight_len"]
dt = v["dt"]
""" Define inversion method """
inv_method = "GS"
phases = v["phases"]
components = v["components"]
amplitude_correction = v["amplitude_correction"]
t_pre = v["t_pre"]
t_post = v["t_post"]
phase_corrs = v["phase_corrs"]
tstars = v["tstars"]
fmin = v["fmin"]
fmax = v["fmax"]
zerophase = v["zerophase"]
output_folder = save_folder
ylims = v["ylims"]
""" Extra phases to plot:"""
extra_phases = None # [
# "PP",
# "sP",
# "pP",
# ]
if forward_method == "INSTASEIS":
fwd = SS_MTI.Forward.Instaseis(
instaseis_db=db,
taup_model=npz_file,
or_time=event.origin_time,
dt=dt,
start_cut=100.0,
end_cut=800.0,
)
elif forward_method == "REFLECTIVITY":
fwd = SS_MTI.Forward.reflectivity()
else:
raise ValueError(
"forward_method can be either INSTASEIS or REFLECTIVITY in [FORWARD] of .toml file"
)
if misfit_method == "L2":
misfit = SS_MTI.Misfit.L2(
weights=weights, start_weight_len=start_weight_len, dt=dt
)
elif misfit_method == "CC":
misfit = SS_MTI.Misfit.CC(shift_samples=128)
elif misfit_method == "POL":
misfit = SS_MTI.Misfit.Pol(
components=components,
start_weight_len=start_weight_len,
weights=weights,
dt=dt,
)
else:
raise ValueError("misfit can be L2, CC or POL in [MISFIT] of .toml file")
""" Start inversion """
# for baz in bazs:
# event.baz = baz
SS_MTI.Inversion.Grid_Search_run(
fwd=fwd,
misfit=misfit,
event=event,
rec=rec,
phases=phases,
components=components,
t_pre=t_pre,
t_post=t_post,
depths=depths,
strikes=strikes,
dips=dips,
rakes=rakes,
phase_corrs=phase_corrs,
tstars=tstars,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
list_to_correct_M0=amplitude_correction,
output_folder=output_folder,
plot=True,
plot_extra_phases=extra_phases,
color_plot="blue",
Ylims=ylims,
Parallel=Parallel,
)
SS_MTI.Inversion.Direct(
fwd=fwd,
misfit=misfit,
event=event,
rec=rec,
phases=phases,
components=components,
phase_corrs=phase_corrs,
t_pre=t_pre,
t_post=t_post,
depths=depths,
tstars=tstars,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
output_folder=output_folder,
plot=True,
plot_extra_phases=extra_phases,
color_plot="red",
Ylims=ylims,
Parallel=Parallel,
)
""" Post-processing """
# if Parallel:
# mpi4py.MPI.COMM_WORLD.Barrier()
# rank = mpi4py.MPI.COMM_WORLD.Get_rank()
# if not rank == 0:
# print(
# "rank {rank} will go to next simulation and does not doe post processing"
# )
# continue
""" (waveform plotting post inversion from generated files)"""
# _PostProcessing.post_waveform_plotting(
# h5_file_folder=output_folder,
# method="GS",
# misfit_name=misfit.name,
# misfit_weight_len=misfit.start_weight_len,
# fwd=fwd,
# event=event,
# rec=rec,
# phases=phases,
# components=components,
# t_pre=t_pre,
# t_post=t_post,
# depths=depths,
# phase_corrs=phase_corrs,
# fmin=fmin,
# fmax=fmax,
# zerophase=zerophase,
# tstars=tstars,
# plot_extra_phases=extra_phases,
# Ylims=ylims,
# )
# _PostProcessing.post_waveform_plotting(
# h5_file_folder=output_folder,
# method="Direct",
# misfit_name=misfit.name,
# misfit_weight_len=misfit.start_weight_len,
# fwd=fwd,
# event=event,
# rec=rec,
# phases=phases,
# components=components,
# t_pre=t_pre,
# t_post=t_post,
# depths=depths,
# phase_corrs=phase_corrs,
# fmin=fmin,
# fmax=fmax,
# zerophase=zerophase,
# tstars=tstars,
# plot_extra_phases=extra_phases,
# Ylims=ylims,
# )
""" (misfit vs depth analysis)"""
DOF = sum([int((x + y) / v["dt"]) for x, y in zip(v["t_pre"], v["t_post"])])
Moho_d = 24
fig = _PostProcessing.plot_misfit_vs_depth(
save_paths=[output_folder],
event_name=event.name,
DOF=DOF,
depths=depths,
misfit_name=misfit.name,
veloc_model=fwd.veloc_name,
true_depth=None,
Moho=Moho_d,
fmin=fmin,
fmax=fmax,
amount_of_phases=len(v["phases"]),
)
plt.tight_layout()
plt.savefig(
pjoin(
save_folder,
f"Misfit_vs_Depth_{event.name}_{fmin}_{fmax}_{misfit.name}_{fwd.veloc_name}.svg",
),
dpi=600,
)
plt.close()
""" (best MT vs depth phase arrivals) """
# depths_phases = depths[1::2] # np.array([23, 26, 29]) #
# t_pre = [5, 5]
# t_post = [30, 30]
# phases = [phases[0], phases[1]]
# components = [components[0], components[1]]
# phase_corrs = [phase_corrs[0], phase_corrs[1]]
# tstars = [tstars[0], tstars[1]]
# # tstars = [tstar_P, tstar_S]
# start_depth_range = 29 # 53 #
# end_depth_range = 41 # 69 #
# fig = _PostProcessing.plot_phases_vs_depth(
# h5_file_folder=output_folder,
# method="GS",
# misfit_name=misfit.name,
# fwd=fwd,
# event=event,
# rec=rec,
# phases=phases,
# components=components,
# t_pre=t_pre,
# t_post=t_post,
# depths=depths_phases,
# phase_corrs=phase_corrs,
# fmin=fmin,
# fmax=fmax,
# zerophase=zerophase,
# tstars=tstars,
# color_plot="blue",
# pref_depth_start=start_depth_range,
# pref_depth_end=end_depth_range,
# )
# # plt.tight_layout()
# plt.savefig(
# pjoin(
# save_folder,
# f"PhaseTracking_{event.name}_{fmin}_{fmax}_{misfit.name}_{fwd.veloc_name}.svg",
# ),
# dpi=600,
# )
# plt.close()
""" Uncertainty estimates:"""
# fig_sdr = _PostProcessing.Source_Uncertainty(
# h5_file_folder=output_folder,
# event_name=event.name,
# method="GS",
# misfit_name=misfit.name,
# fwd=fwd,
# phases=phases,
# components=components,
# depths=np.arange(53, 68, 3),
# DOF=DOF,
# fmin=fmin,
# fmax=fmax,
# )
# fig.tight_layout()
# fig.savefig(
# pjoin(
# save_folder,
# f"Uncertainties_FULL_{event.name}_{fmin}_{fmax}_{misfit.name}_{fwd.veloc_name}.svg",
# ),
# dpi=600,
# )
# plt.close(fig)
# fig_sdr.tight_layout()
# fig_sdr.savefig(
# pjoin(
# save_folder,
# f"Uncertainties_SDR_{event.name}_{fmin}_{fmax}_{misfit.name}_{fwd.veloc_name}.svg",
# ),
# dpi=600,
# )
# plt.close(fig_sdr)
| 34.372998 | 106 | 0.501897 |
acf5c054d1b2ba2ce929bafba73adee777db7b1d | 74,920 | py | Python | kubernetes/test/test_v1_pod_template_list.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_pod_template_list.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_pod_template_list.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v1_pod_template_list import V1PodTemplateList # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1PodTemplateList(unittest.TestCase):
"""V1PodTemplateList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1PodTemplateList
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.v1_pod_template_list.V1PodTemplateList() # noqa: E501
if include_optional :
return V1PodTemplateList(
api_version = '0',
items = [
kubernetes.client.models.v1/pod_template.v1.PodTemplate(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta.v1.ObjectMeta(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference.v1.OwnerReference(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
template = kubernetes.client.models.v1/pod_template_spec.v1.PodTemplateSpec(
spec = kubernetes.client.models.v1/pod_spec.v1.PodSpec(
active_deadline_seconds = 56,
affinity = kubernetes.client.models.v1/affinity.v1.Affinity(
node_affinity = kubernetes.client.models.v1/node_affinity.v1.NodeAffinity(
preferred_during_scheduling_ignored_during_execution = [
kubernetes.client.models.v1/preferred_scheduling_term.v1.PreferredSchedulingTerm(
preference = kubernetes.client.models.v1/node_selector_term.v1.NodeSelectorTerm(
match_expressions = [
kubernetes.client.models.v1/node_selector_requirement.v1.NodeSelectorRequirement(
key = '0',
operator = '0',
values = [
'0'
], )
],
match_fields = [
kubernetes.client.models.v1/node_selector_requirement.v1.NodeSelectorRequirement(
key = '0',
operator = '0', )
], ),
weight = 56, )
],
required_during_scheduling_ignored_during_execution = kubernetes.client.models.v1/node_selector.v1.NodeSelector(
node_selector_terms = [
kubernetes.client.models.v1/node_selector_term.v1.NodeSelectorTerm()
], ), ),
pod_affinity = kubernetes.client.models.v1/pod_affinity.v1.PodAffinity(),
pod_anti_affinity = kubernetes.client.models.v1/pod_anti_affinity.v1.PodAntiAffinity(), ),
automount_service_account_token = True,
containers = [
kubernetes.client.models.v1/container.v1.Container(
args = [
'0'
],
command = [
'0'
],
env = [
kubernetes.client.models.v1/env_var.v1.EnvVar(
name = '0',
value = '0',
value_from = kubernetes.client.models.v1/env_var_source.v1.EnvVarSource(
config_map_key_ref = kubernetes.client.models.v1/config_map_key_selector.v1.ConfigMapKeySelector(
key = '0',
name = '0',
optional = True, ),
field_ref = kubernetes.client.models.v1/object_field_selector.v1.ObjectFieldSelector(
api_version = '0',
field_path = '0', ),
resource_field_ref = kubernetes.client.models.v1/resource_field_selector.v1.ResourceFieldSelector(
container_name = '0',
divisor = '0',
resource = '0', ),
secret_key_ref = kubernetes.client.models.v1/secret_key_selector.v1.SecretKeySelector(
key = '0',
name = '0',
optional = True, ), ), )
],
env_from = [
kubernetes.client.models.v1/env_from_source.v1.EnvFromSource(
config_map_ref = kubernetes.client.models.v1/config_map_env_source.v1.ConfigMapEnvSource(
name = '0',
optional = True, ),
prefix = '0',
secret_ref = kubernetes.client.models.v1/secret_env_source.v1.SecretEnvSource(
name = '0',
optional = True, ), )
],
image = '0',
image_pull_policy = '0',
lifecycle = kubernetes.client.models.v1/lifecycle.v1.Lifecycle(
post_start = kubernetes.client.models.v1/handler.v1.Handler(
exec = kubernetes.client.models.v1/exec_action.v1.ExecAction(),
http_get = kubernetes.client.models.v1/http_get_action.v1.HTTPGetAction(
host = '0',
http_headers = [
kubernetes.client.models.v1/http_header.v1.HTTPHeader(
name = '0',
value = '0', )
],
path = '0',
port = kubernetes.client.models.port.port(),
scheme = '0', ),
tcp_socket = kubernetes.client.models.v1/tcp_socket_action.v1.TCPSocketAction(
host = '0',
port = kubernetes.client.models.port.port(), ), ),
pre_stop = kubernetes.client.models.v1/handler.v1.Handler(), ),
liveness_probe = kubernetes.client.models.v1/probe.v1.Probe(
failure_threshold = 56,
initial_delay_seconds = 56,
period_seconds = 56,
success_threshold = 56,
timeout_seconds = 56, ),
name = '0',
ports = [
kubernetes.client.models.v1/container_port.v1.ContainerPort(
container_port = 56,
host_ip = '0',
host_port = 56,
name = '0',
protocol = '0', )
],
readiness_probe = kubernetes.client.models.v1/probe.v1.Probe(
failure_threshold = 56,
initial_delay_seconds = 56,
period_seconds = 56,
success_threshold = 56,
timeout_seconds = 56, ),
resources = kubernetes.client.models.v1/resource_requirements.v1.ResourceRequirements(
limits = {
'key' : '0'
},
requests = {
'key' : '0'
}, ),
security_context = kubernetes.client.models.v1/security_context.v1.SecurityContext(
allow_privilege_escalation = True,
capabilities = kubernetes.client.models.v1/capabilities.v1.Capabilities(
add = [
'0'
],
drop = [
'0'
], ),
privileged = True,
proc_mount = '0',
read_only_root_filesystem = True,
run_as_group = 56,
run_as_non_root = True,
run_as_user = 56,
se_linux_options = kubernetes.client.models.v1/se_linux_options.v1.SELinuxOptions(
level = '0',
role = '0',
type = '0',
user = '0', ),
seccomp_profile = kubernetes.client.models.v1/seccomp_profile.v1.SeccompProfile(
localhost_profile = '0',
type = '0', ),
windows_options = kubernetes.client.models.v1/windows_security_context_options.v1.WindowsSecurityContextOptions(
gmsa_credential_spec = '0',
gmsa_credential_spec_name = '0',
run_as_user_name = '0', ), ),
startup_probe = kubernetes.client.models.v1/probe.v1.Probe(
failure_threshold = 56,
initial_delay_seconds = 56,
period_seconds = 56,
success_threshold = 56,
timeout_seconds = 56, ),
stdin = True,
stdin_once = True,
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
volume_devices = [
kubernetes.client.models.v1/volume_device.v1.VolumeDevice(
device_path = '0',
name = '0', )
],
volume_mounts = [
kubernetes.client.models.v1/volume_mount.v1.VolumeMount(
mount_path = '0',
mount_propagation = '0',
name = '0',
read_only = True,
sub_path = '0',
sub_path_expr = '0', )
],
working_dir = '0', )
],
dns_config = kubernetes.client.models.v1/pod_dns_config.v1.PodDNSConfig(
nameservers = [
'0'
],
options = [
kubernetes.client.models.v1/pod_dns_config_option.v1.PodDNSConfigOption(
name = '0',
value = '0', )
],
searches = [
'0'
], ),
dns_policy = '0',
enable_service_links = True,
ephemeral_containers = [
kubernetes.client.models.v1/ephemeral_container.v1.EphemeralContainer(
image = '0',
image_pull_policy = '0',
name = '0',
stdin = True,
stdin_once = True,
target_container_name = '0',
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
working_dir = '0', )
],
host_aliases = [
kubernetes.client.models.v1/host_alias.v1.HostAlias(
hostnames = [
'0'
],
ip = '0', )
],
host_ipc = True,
host_network = True,
host_pid = True,
hostname = '0',
image_pull_secrets = [
kubernetes.client.models.v1/local_object_reference.v1.LocalObjectReference(
name = '0', )
],
init_containers = [
kubernetes.client.models.v1/container.v1.Container(
image = '0',
image_pull_policy = '0',
name = '0',
stdin = True,
stdin_once = True,
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
working_dir = '0', )
],
node_name = '0',
node_selector = {
'key' : '0'
},
overhead = {
'key' : '0'
},
preemption_policy = '0',
priority = 56,
priority_class_name = '0',
readiness_gates = [
kubernetes.client.models.v1/pod_readiness_gate.v1.PodReadinessGate(
condition_type = '0', )
],
restart_policy = '0',
runtime_class_name = '0',
scheduler_name = '0',
security_context = kubernetes.client.models.v1/pod_security_context.v1.PodSecurityContext(
fs_group = 56,
fs_group_change_policy = '0',
run_as_group = 56,
run_as_non_root = True,
run_as_user = 56,
supplemental_groups = [
56
],
sysctls = [
kubernetes.client.models.v1/sysctl.v1.Sysctl(
name = '0',
value = '0', )
], ),
service_account = '0',
service_account_name = '0',
set_hostname_as_fqdn = True,
share_process_namespace = True,
subdomain = '0',
termination_grace_period_seconds = 56,
tolerations = [
kubernetes.client.models.v1/toleration.v1.Toleration(
effect = '0',
key = '0',
operator = '0',
toleration_seconds = 56,
value = '0', )
],
topology_spread_constraints = [
kubernetes.client.models.v1/topology_spread_constraint.v1.TopologySpreadConstraint(
label_selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(
match_labels = {
'key' : '0'
}, ),
max_skew = 56,
topology_key = '0',
when_unsatisfiable = '0', )
],
volumes = [
kubernetes.client.models.v1/volume.v1.Volume(
aws_elastic_block_store = kubernetes.client.models.v1/aws_elastic_block_store_volume_source.v1.AWSElasticBlockStoreVolumeSource(
fs_type = '0',
partition = 56,
read_only = True,
volume_id = '0', ),
azure_disk = kubernetes.client.models.v1/azure_disk_volume_source.v1.AzureDiskVolumeSource(
caching_mode = '0',
disk_name = '0',
disk_uri = '0',
fs_type = '0',
kind = '0',
read_only = True, ),
azure_file = kubernetes.client.models.v1/azure_file_volume_source.v1.AzureFileVolumeSource(
read_only = True,
secret_name = '0',
share_name = '0', ),
cephfs = kubernetes.client.models.v1/ceph_fs_volume_source.v1.CephFSVolumeSource(
monitors = [
'0'
],
path = '0',
read_only = True,
secret_file = '0',
user = '0', ),
cinder = kubernetes.client.models.v1/cinder_volume_source.v1.CinderVolumeSource(
fs_type = '0',
read_only = True,
volume_id = '0', ),
config_map = kubernetes.client.models.v1/config_map_volume_source.v1.ConfigMapVolumeSource(
default_mode = 56,
items = [
kubernetes.client.models.v1/key_to_path.v1.KeyToPath(
key = '0',
mode = 56,
path = '0', )
],
name = '0',
optional = True, ),
csi = kubernetes.client.models.v1/csi_volume_source.v1.CSIVolumeSource(
driver = '0',
fs_type = '0',
node_publish_secret_ref = kubernetes.client.models.v1/local_object_reference.v1.LocalObjectReference(
name = '0', ),
read_only = True,
volume_attributes = {
'key' : '0'
}, ),
downward_api = kubernetes.client.models.v1/downward_api_volume_source.v1.DownwardAPIVolumeSource(
default_mode = 56, ),
empty_dir = kubernetes.client.models.v1/empty_dir_volume_source.v1.EmptyDirVolumeSource(
medium = '0',
size_limit = '0', ),
ephemeral = kubernetes.client.models.v1/ephemeral_volume_source.v1.EphemeralVolumeSource(
read_only = True,
volume_claim_template = kubernetes.client.models.v1/persistent_volume_claim_template.v1.PersistentVolumeClaimTemplate(
spec = kubernetes.client.models.v1/persistent_volume_claim_spec.v1.PersistentVolumeClaimSpec(
access_modes = [
'0'
],
data_source = kubernetes.client.models.v1/typed_local_object_reference.v1.TypedLocalObjectReference(
api_group = '0',
kind = '0',
name = '0', ),
selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(),
storage_class_name = '0',
volume_mode = '0',
volume_name = '0', ), ), ),
fc = kubernetes.client.models.v1/fc_volume_source.v1.FCVolumeSource(
fs_type = '0',
lun = 56,
read_only = True,
target_ww_ns = [
'0'
],
wwids = [
'0'
], ),
flex_volume = kubernetes.client.models.v1/flex_volume_source.v1.FlexVolumeSource(
driver = '0',
fs_type = '0',
read_only = True, ),
flocker = kubernetes.client.models.v1/flocker_volume_source.v1.FlockerVolumeSource(
dataset_name = '0',
dataset_uuid = '0', ),
gce_persistent_disk = kubernetes.client.models.v1/gce_persistent_disk_volume_source.v1.GCEPersistentDiskVolumeSource(
fs_type = '0',
partition = 56,
pd_name = '0',
read_only = True, ),
git_repo = kubernetes.client.models.v1/git_repo_volume_source.v1.GitRepoVolumeSource(
directory = '0',
repository = '0',
revision = '0', ),
glusterfs = kubernetes.client.models.v1/glusterfs_volume_source.v1.GlusterfsVolumeSource(
endpoints = '0',
path = '0',
read_only = True, ),
host_path = kubernetes.client.models.v1/host_path_volume_source.v1.HostPathVolumeSource(
path = '0',
type = '0', ),
iscsi = kubernetes.client.models.v1/iscsi_volume_source.v1.ISCSIVolumeSource(
chap_auth_discovery = True,
chap_auth_session = True,
fs_type = '0',
initiator_name = '0',
iqn = '0',
iscsi_interface = '0',
lun = 56,
portals = [
'0'
],
read_only = True,
target_portal = '0', ),
name = '0',
nfs = kubernetes.client.models.v1/nfs_volume_source.v1.NFSVolumeSource(
path = '0',
read_only = True,
server = '0', ),
persistent_volume_claim = kubernetes.client.models.v1/persistent_volume_claim_volume_source.v1.PersistentVolumeClaimVolumeSource(
claim_name = '0',
read_only = True, ),
photon_persistent_disk = kubernetes.client.models.v1/photon_persistent_disk_volume_source.v1.PhotonPersistentDiskVolumeSource(
fs_type = '0',
pd_id = '0', ),
portworx_volume = kubernetes.client.models.v1/portworx_volume_source.v1.PortworxVolumeSource(
fs_type = '0',
read_only = True,
volume_id = '0', ),
projected = kubernetes.client.models.v1/projected_volume_source.v1.ProjectedVolumeSource(
default_mode = 56,
sources = [
kubernetes.client.models.v1/volume_projection.v1.VolumeProjection(
secret = kubernetes.client.models.v1/secret_projection.v1.SecretProjection(
name = '0',
optional = True, ),
service_account_token = kubernetes.client.models.v1/service_account_token_projection.v1.ServiceAccountTokenProjection(
audience = '0',
expiration_seconds = 56,
path = '0', ), )
], ),
quobyte = kubernetes.client.models.v1/quobyte_volume_source.v1.QuobyteVolumeSource(
group = '0',
read_only = True,
registry = '0',
tenant = '0',
user = '0',
volume = '0', ),
rbd = kubernetes.client.models.v1/rbd_volume_source.v1.RBDVolumeSource(
fs_type = '0',
image = '0',
keyring = '0',
monitors = [
'0'
],
pool = '0',
read_only = True,
user = '0', ),
scale_io = kubernetes.client.models.v1/scale_io_volume_source.v1.ScaleIOVolumeSource(
fs_type = '0',
gateway = '0',
protection_domain = '0',
read_only = True,
secret_ref = kubernetes.client.models.v1/local_object_reference.v1.LocalObjectReference(
name = '0', ),
ssl_enabled = True,
storage_mode = '0',
storage_pool = '0',
system = '0',
volume_name = '0', ),
secret = kubernetes.client.models.v1/secret_volume_source.v1.SecretVolumeSource(
default_mode = 56,
optional = True,
secret_name = '0', ),
storageos = kubernetes.client.models.v1/storage_os_volume_source.v1.StorageOSVolumeSource(
fs_type = '0',
read_only = True,
volume_name = '0',
volume_namespace = '0', ),
vsphere_volume = kubernetes.client.models.v1/vsphere_virtual_disk_volume_source.v1.VsphereVirtualDiskVolumeSource(
fs_type = '0',
storage_policy_id = '0',
storage_policy_name = '0',
volume_path = '0', ), )
], ), ), )
],
kind = '0',
metadata = kubernetes.client.models.v1/list_meta.v1.ListMeta(
continue = '0',
remaining_item_count = 56,
resource_version = '0',
self_link = '0', )
)
else :
return V1PodTemplateList(
items = [
kubernetes.client.models.v1/pod_template.v1.PodTemplate(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta.v1.ObjectMeta(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference.v1.OwnerReference(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
template = kubernetes.client.models.v1/pod_template_spec.v1.PodTemplateSpec(
spec = kubernetes.client.models.v1/pod_spec.v1.PodSpec(
active_deadline_seconds = 56,
affinity = kubernetes.client.models.v1/affinity.v1.Affinity(
node_affinity = kubernetes.client.models.v1/node_affinity.v1.NodeAffinity(
preferred_during_scheduling_ignored_during_execution = [
kubernetes.client.models.v1/preferred_scheduling_term.v1.PreferredSchedulingTerm(
preference = kubernetes.client.models.v1/node_selector_term.v1.NodeSelectorTerm(
match_expressions = [
kubernetes.client.models.v1/node_selector_requirement.v1.NodeSelectorRequirement(
key = '0',
operator = '0',
values = [
'0'
], )
],
match_fields = [
kubernetes.client.models.v1/node_selector_requirement.v1.NodeSelectorRequirement(
key = '0',
operator = '0', )
], ),
weight = 56, )
],
required_during_scheduling_ignored_during_execution = kubernetes.client.models.v1/node_selector.v1.NodeSelector(
node_selector_terms = [
kubernetes.client.models.v1/node_selector_term.v1.NodeSelectorTerm()
], ), ),
pod_affinity = kubernetes.client.models.v1/pod_affinity.v1.PodAffinity(),
pod_anti_affinity = kubernetes.client.models.v1/pod_anti_affinity.v1.PodAntiAffinity(), ),
automount_service_account_token = True,
containers = [
kubernetes.client.models.v1/container.v1.Container(
args = [
'0'
],
command = [
'0'
],
env = [
kubernetes.client.models.v1/env_var.v1.EnvVar(
name = '0',
value = '0',
value_from = kubernetes.client.models.v1/env_var_source.v1.EnvVarSource(
config_map_key_ref = kubernetes.client.models.v1/config_map_key_selector.v1.ConfigMapKeySelector(
key = '0',
name = '0',
optional = True, ),
field_ref = kubernetes.client.models.v1/object_field_selector.v1.ObjectFieldSelector(
api_version = '0',
field_path = '0', ),
resource_field_ref = kubernetes.client.models.v1/resource_field_selector.v1.ResourceFieldSelector(
container_name = '0',
divisor = '0',
resource = '0', ),
secret_key_ref = kubernetes.client.models.v1/secret_key_selector.v1.SecretKeySelector(
key = '0',
name = '0',
optional = True, ), ), )
],
env_from = [
kubernetes.client.models.v1/env_from_source.v1.EnvFromSource(
config_map_ref = kubernetes.client.models.v1/config_map_env_source.v1.ConfigMapEnvSource(
name = '0',
optional = True, ),
prefix = '0',
secret_ref = kubernetes.client.models.v1/secret_env_source.v1.SecretEnvSource(
name = '0',
optional = True, ), )
],
image = '0',
image_pull_policy = '0',
lifecycle = kubernetes.client.models.v1/lifecycle.v1.Lifecycle(
post_start = kubernetes.client.models.v1/handler.v1.Handler(
exec = kubernetes.client.models.v1/exec_action.v1.ExecAction(),
http_get = kubernetes.client.models.v1/http_get_action.v1.HTTPGetAction(
host = '0',
http_headers = [
kubernetes.client.models.v1/http_header.v1.HTTPHeader(
name = '0',
value = '0', )
],
path = '0',
port = kubernetes.client.models.port.port(),
scheme = '0', ),
tcp_socket = kubernetes.client.models.v1/tcp_socket_action.v1.TCPSocketAction(
host = '0',
port = kubernetes.client.models.port.port(), ), ),
pre_stop = kubernetes.client.models.v1/handler.v1.Handler(), ),
liveness_probe = kubernetes.client.models.v1/probe.v1.Probe(
failure_threshold = 56,
initial_delay_seconds = 56,
period_seconds = 56,
success_threshold = 56,
timeout_seconds = 56, ),
name = '0',
ports = [
kubernetes.client.models.v1/container_port.v1.ContainerPort(
container_port = 56,
host_ip = '0',
host_port = 56,
name = '0',
protocol = '0', )
],
readiness_probe = kubernetes.client.models.v1/probe.v1.Probe(
failure_threshold = 56,
initial_delay_seconds = 56,
period_seconds = 56,
success_threshold = 56,
timeout_seconds = 56, ),
resources = kubernetes.client.models.v1/resource_requirements.v1.ResourceRequirements(
limits = {
'key' : '0'
},
requests = {
'key' : '0'
}, ),
security_context = kubernetes.client.models.v1/security_context.v1.SecurityContext(
allow_privilege_escalation = True,
capabilities = kubernetes.client.models.v1/capabilities.v1.Capabilities(
add = [
'0'
],
drop = [
'0'
], ),
privileged = True,
proc_mount = '0',
read_only_root_filesystem = True,
run_as_group = 56,
run_as_non_root = True,
run_as_user = 56,
se_linux_options = kubernetes.client.models.v1/se_linux_options.v1.SELinuxOptions(
level = '0',
role = '0',
type = '0',
user = '0', ),
seccomp_profile = kubernetes.client.models.v1/seccomp_profile.v1.SeccompProfile(
localhost_profile = '0',
type = '0', ),
windows_options = kubernetes.client.models.v1/windows_security_context_options.v1.WindowsSecurityContextOptions(
gmsa_credential_spec = '0',
gmsa_credential_spec_name = '0',
run_as_user_name = '0', ), ),
startup_probe = kubernetes.client.models.v1/probe.v1.Probe(
failure_threshold = 56,
initial_delay_seconds = 56,
period_seconds = 56,
success_threshold = 56,
timeout_seconds = 56, ),
stdin = True,
stdin_once = True,
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
volume_devices = [
kubernetes.client.models.v1/volume_device.v1.VolumeDevice(
device_path = '0',
name = '0', )
],
volume_mounts = [
kubernetes.client.models.v1/volume_mount.v1.VolumeMount(
mount_path = '0',
mount_propagation = '0',
name = '0',
read_only = True,
sub_path = '0',
sub_path_expr = '0', )
],
working_dir = '0', )
],
dns_config = kubernetes.client.models.v1/pod_dns_config.v1.PodDNSConfig(
nameservers = [
'0'
],
options = [
kubernetes.client.models.v1/pod_dns_config_option.v1.PodDNSConfigOption(
name = '0',
value = '0', )
],
searches = [
'0'
], ),
dns_policy = '0',
enable_service_links = True,
ephemeral_containers = [
kubernetes.client.models.v1/ephemeral_container.v1.EphemeralContainer(
image = '0',
image_pull_policy = '0',
name = '0',
stdin = True,
stdin_once = True,
target_container_name = '0',
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
working_dir = '0', )
],
host_aliases = [
kubernetes.client.models.v1/host_alias.v1.HostAlias(
hostnames = [
'0'
],
ip = '0', )
],
host_ipc = True,
host_network = True,
host_pid = True,
hostname = '0',
image_pull_secrets = [
kubernetes.client.models.v1/local_object_reference.v1.LocalObjectReference(
name = '0', )
],
init_containers = [
kubernetes.client.models.v1/container.v1.Container(
image = '0',
image_pull_policy = '0',
name = '0',
stdin = True,
stdin_once = True,
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
working_dir = '0', )
],
node_name = '0',
node_selector = {
'key' : '0'
},
overhead = {
'key' : '0'
},
preemption_policy = '0',
priority = 56,
priority_class_name = '0',
readiness_gates = [
kubernetes.client.models.v1/pod_readiness_gate.v1.PodReadinessGate(
condition_type = '0', )
],
restart_policy = '0',
runtime_class_name = '0',
scheduler_name = '0',
security_context = kubernetes.client.models.v1/pod_security_context.v1.PodSecurityContext(
fs_group = 56,
fs_group_change_policy = '0',
run_as_group = 56,
run_as_non_root = True,
run_as_user = 56,
supplemental_groups = [
56
],
sysctls = [
kubernetes.client.models.v1/sysctl.v1.Sysctl(
name = '0',
value = '0', )
], ),
service_account = '0',
service_account_name = '0',
set_hostname_as_fqdn = True,
share_process_namespace = True,
subdomain = '0',
termination_grace_period_seconds = 56,
tolerations = [
kubernetes.client.models.v1/toleration.v1.Toleration(
effect = '0',
key = '0',
operator = '0',
toleration_seconds = 56,
value = '0', )
],
topology_spread_constraints = [
kubernetes.client.models.v1/topology_spread_constraint.v1.TopologySpreadConstraint(
label_selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(
match_labels = {
'key' : '0'
}, ),
max_skew = 56,
topology_key = '0',
when_unsatisfiable = '0', )
],
volumes = [
kubernetes.client.models.v1/volume.v1.Volume(
aws_elastic_block_store = kubernetes.client.models.v1/aws_elastic_block_store_volume_source.v1.AWSElasticBlockStoreVolumeSource(
fs_type = '0',
partition = 56,
read_only = True,
volume_id = '0', ),
azure_disk = kubernetes.client.models.v1/azure_disk_volume_source.v1.AzureDiskVolumeSource(
caching_mode = '0',
disk_name = '0',
disk_uri = '0',
fs_type = '0',
kind = '0',
read_only = True, ),
azure_file = kubernetes.client.models.v1/azure_file_volume_source.v1.AzureFileVolumeSource(
read_only = True,
secret_name = '0',
share_name = '0', ),
cephfs = kubernetes.client.models.v1/ceph_fs_volume_source.v1.CephFSVolumeSource(
monitors = [
'0'
],
path = '0',
read_only = True,
secret_file = '0',
user = '0', ),
cinder = kubernetes.client.models.v1/cinder_volume_source.v1.CinderVolumeSource(
fs_type = '0',
read_only = True,
volume_id = '0', ),
config_map = kubernetes.client.models.v1/config_map_volume_source.v1.ConfigMapVolumeSource(
default_mode = 56,
items = [
kubernetes.client.models.v1/key_to_path.v1.KeyToPath(
key = '0',
mode = 56,
path = '0', )
],
name = '0',
optional = True, ),
csi = kubernetes.client.models.v1/csi_volume_source.v1.CSIVolumeSource(
driver = '0',
fs_type = '0',
node_publish_secret_ref = kubernetes.client.models.v1/local_object_reference.v1.LocalObjectReference(
name = '0', ),
read_only = True,
volume_attributes = {
'key' : '0'
}, ),
downward_api = kubernetes.client.models.v1/downward_api_volume_source.v1.DownwardAPIVolumeSource(
default_mode = 56, ),
empty_dir = kubernetes.client.models.v1/empty_dir_volume_source.v1.EmptyDirVolumeSource(
medium = '0',
size_limit = '0', ),
ephemeral = kubernetes.client.models.v1/ephemeral_volume_source.v1.EphemeralVolumeSource(
read_only = True,
volume_claim_template = kubernetes.client.models.v1/persistent_volume_claim_template.v1.PersistentVolumeClaimTemplate(
spec = kubernetes.client.models.v1/persistent_volume_claim_spec.v1.PersistentVolumeClaimSpec(
access_modes = [
'0'
],
data_source = kubernetes.client.models.v1/typed_local_object_reference.v1.TypedLocalObjectReference(
api_group = '0',
kind = '0',
name = '0', ),
selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(),
storage_class_name = '0',
volume_mode = '0',
volume_name = '0', ), ), ),
fc = kubernetes.client.models.v1/fc_volume_source.v1.FCVolumeSource(
fs_type = '0',
lun = 56,
read_only = True,
target_ww_ns = [
'0'
],
wwids = [
'0'
], ),
flex_volume = kubernetes.client.models.v1/flex_volume_source.v1.FlexVolumeSource(
driver = '0',
fs_type = '0',
read_only = True, ),
flocker = kubernetes.client.models.v1/flocker_volume_source.v1.FlockerVolumeSource(
dataset_name = '0',
dataset_uuid = '0', ),
gce_persistent_disk = kubernetes.client.models.v1/gce_persistent_disk_volume_source.v1.GCEPersistentDiskVolumeSource(
fs_type = '0',
partition = 56,
pd_name = '0',
read_only = True, ),
git_repo = kubernetes.client.models.v1/git_repo_volume_source.v1.GitRepoVolumeSource(
directory = '0',
repository = '0',
revision = '0', ),
glusterfs = kubernetes.client.models.v1/glusterfs_volume_source.v1.GlusterfsVolumeSource(
endpoints = '0',
path = '0',
read_only = True, ),
host_path = kubernetes.client.models.v1/host_path_volume_source.v1.HostPathVolumeSource(
path = '0',
type = '0', ),
iscsi = kubernetes.client.models.v1/iscsi_volume_source.v1.ISCSIVolumeSource(
chap_auth_discovery = True,
chap_auth_session = True,
fs_type = '0',
initiator_name = '0',
iqn = '0',
iscsi_interface = '0',
lun = 56,
portals = [
'0'
],
read_only = True,
target_portal = '0', ),
name = '0',
nfs = kubernetes.client.models.v1/nfs_volume_source.v1.NFSVolumeSource(
path = '0',
read_only = True,
server = '0', ),
persistent_volume_claim = kubernetes.client.models.v1/persistent_volume_claim_volume_source.v1.PersistentVolumeClaimVolumeSource(
claim_name = '0',
read_only = True, ),
photon_persistent_disk = kubernetes.client.models.v1/photon_persistent_disk_volume_source.v1.PhotonPersistentDiskVolumeSource(
fs_type = '0',
pd_id = '0', ),
portworx_volume = kubernetes.client.models.v1/portworx_volume_source.v1.PortworxVolumeSource(
fs_type = '0',
read_only = True,
volume_id = '0', ),
projected = kubernetes.client.models.v1/projected_volume_source.v1.ProjectedVolumeSource(
default_mode = 56,
sources = [
kubernetes.client.models.v1/volume_projection.v1.VolumeProjection(
secret = kubernetes.client.models.v1/secret_projection.v1.SecretProjection(
name = '0',
optional = True, ),
service_account_token = kubernetes.client.models.v1/service_account_token_projection.v1.ServiceAccountTokenProjection(
audience = '0',
expiration_seconds = 56,
path = '0', ), )
], ),
quobyte = kubernetes.client.models.v1/quobyte_volume_source.v1.QuobyteVolumeSource(
group = '0',
read_only = True,
registry = '0',
tenant = '0',
user = '0',
volume = '0', ),
rbd = kubernetes.client.models.v1/rbd_volume_source.v1.RBDVolumeSource(
fs_type = '0',
image = '0',
keyring = '0',
monitors = [
'0'
],
pool = '0',
read_only = True,
user = '0', ),
scale_io = kubernetes.client.models.v1/scale_io_volume_source.v1.ScaleIOVolumeSource(
fs_type = '0',
gateway = '0',
protection_domain = '0',
read_only = True,
secret_ref = kubernetes.client.models.v1/local_object_reference.v1.LocalObjectReference(
name = '0', ),
ssl_enabled = True,
storage_mode = '0',
storage_pool = '0',
system = '0',
volume_name = '0', ),
secret = kubernetes.client.models.v1/secret_volume_source.v1.SecretVolumeSource(
default_mode = 56,
optional = True,
secret_name = '0', ),
storageos = kubernetes.client.models.v1/storage_os_volume_source.v1.StorageOSVolumeSource(
fs_type = '0',
read_only = True,
volume_name = '0',
volume_namespace = '0', ),
vsphere_volume = kubernetes.client.models.v1/vsphere_virtual_disk_volume_source.v1.VsphereVirtualDiskVolumeSource(
fs_type = '0',
storage_policy_id = '0',
storage_policy_name = '0',
volume_path = '0', ), )
], ), ), )
],
)
def testV1PodTemplateList(self):
"""Test V1PodTemplateList"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 69.563603 | 170 | 0.307061 |
acf5c072147dd19b52c5b235bd3573237c756ef3 | 1,481 | py | Python | dataStructures/set.py | evanxg852000/rockstartdev | cdcff119d35891cf69593f1e924153d29833c03f | [
"Apache-2.0"
] | 1 | 2020-08-27T22:24:47.000Z | 2020-08-27T22:24:47.000Z | dataStructures/set.py | evanxg852000/rockstartdev | cdcff119d35891cf69593f1e924153d29833c03f | [
"Apache-2.0"
] | null | null | null | dataStructures/set.py | evanxg852000/rockstartdev | cdcff119d35891cf69593f1e924153d29833c03f | [
"Apache-2.0"
] | null | null | null | from linkedlist import LinkedList
class Set(object):
def __init__(self):
self._store = LinkedList()
def has(self, data):
return (self._store.find(data) != None)
def items(self):
for i in self._store.items():
yield i.data
def add(self, data):
if (self.has(data)):
return False
self._store.add_back(data)
return True
def remove(self, data):
target = self._store.find(data)
if(target == None):
return False
self._store.delete(target)
def count(self):
return self._store.count()
def union(self, otherSet):
union = Set()
for i in self.items():
union.add(i)
for i in otherSet.items():
union.add(i)
return union
def intersect(self, otherSet):
intersect = Set()
for i in self.items():
if (otherSet.has(i)):
intersect.add(i)
return intersect
def diff(self, otherSet):
diff = Set()
for i in self.items():
if (not otherSet.has(i)):
diff.add(i)
for i in otherSet.items():
if (not self.has(i)):
diff.add(i)
return diff
def is_subset(self, otherSet):
if self.count() > otherSet.count():
return False
for i in self.items():
if not otherSet.has(i):
return False
return True
| 23.887097 | 47 | 0.516543 |
acf5c0d7368b472d502ac27f95094c5c172e3f25 | 3,440 | py | Python | deploy-agent/deployd/types/deploy_goal.py | aagxxi/teletraan | 93af2abfd72e99258e80f978a80343656de2172f | [
"Apache-2.0"
] | null | null | null | deploy-agent/deployd/types/deploy_goal.py | aagxxi/teletraan | 93af2abfd72e99258e80f978a80343656de2172f | [
"Apache-2.0"
] | null | null | null | deploy-agent/deployd/types/deploy_goal.py | aagxxi/teletraan | 93af2abfd72e99258e80f978a80343656de2172f | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deployd.types.build import Build
from deployd.types.deploy_stage import DeployStage
class DeployGoal(object):
def __init__(self, jsonValue=None):
self.deployId = None
self.envId = None
self.envName = None
self.stageName = None
self.deployStage = None
self.build = None
self.deployAlias = None
self.config = None
self.scriptVariables = None
self.firstDeploy = None
self.isDocker = None
if jsonValue:
self.deployId = jsonValue.get('deployId')
self.envId = jsonValue.get('envId')
self.envName = jsonValue.get('envName')
self.stageName = jsonValue.get('stageName')
# TODO: Only used for migration, should remove later
if isinstance(jsonValue.get('deployStage'), int):
self.deployStage = DeployStage._VALUES_TO_NAMES[jsonValue.get('deployStage')]
else:
self.deployStage = jsonValue.get('deployStage')
if jsonValue.get('build'):
self.build = Build(jsonValue=jsonValue.get('build'))
self.deployAlias = jsonValue.get('deployAlias')
self.config = jsonValue.get('agentConfigs')
self.scriptVariables = jsonValue.get('scriptVariables')
self.firstDeploy = jsonValue.get('firstDeploy')
self.isDocker = jsonValue.get('isDocker')
def __key(self):
return (self.deployId,
self.envId,
self.envName,
self.stageName,
self.deployStage,
self.build,
self.deployAlias,
self.config,
self.scriptVariables,
self.firstDeploy,
self.isDocker)
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
""" compare DeployGoals """
return isinstance(other, DeployGoal) \
and self.__key() == other.__key()
def __ne__(self, other):
""" compare DeployGoals """
return not (isinstance(other, DeployGoal)
and self.__key() == other.__key())
def __str__(self):
return "DeployGoal(deployId={}, envId={}, envName={}, stageName={}, " \
"deployStage={}, build={}, deployAlias={}, agentConfig={}," \
"scriptVariables={}, firstDeploy={}, isDocker={})".format(self.deployId, self.envId, self.envName,
self.stageName, self.deployStage,
self.build, self.deployAlias,
self.config, self.scriptVariables,
self.firstDeploy, self.isDocker)
| 40 | 113 | 0.571802 |
acf5c268298d28cf3e7bda6752a5c789b04aadc1 | 130 | py | Python | nso_jsonrpc_requester/__init__.py | btr1975/nso_jsonrpc_requester | 63362045b998e1b7a2235804c55da151b781c0bd | [
"MIT"
] | null | null | null | nso_jsonrpc_requester/__init__.py | btr1975/nso_jsonrpc_requester | 63362045b998e1b7a2235804c55da151b781c0bd | [
"MIT"
] | null | null | null | nso_jsonrpc_requester/__init__.py | btr1975/nso_jsonrpc_requester | 63362045b998e1b7a2235804c55da151b781c0bd | [
"MIT"
] | null | null | null | """
This exports the main Classes to use NSO JSON-RPC
"""
from .config import NsoJsonRpcConfig
from .comet import NsoJsonRpcComet
| 21.666667 | 49 | 0.784615 |
acf5c28cb8c8910c15f5b7ed8f00fdf2c9664ec6 | 2,631 | py | Python | pypy/module/pypyjit/test_pypy_c/test_min_max.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | 34 | 2015-07-09T04:53:27.000Z | 2021-07-19T05:22:27.000Z | pypy/module/pypyjit/test_pypy_c/test_min_max.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | 6 | 2015-05-30T17:20:45.000Z | 2017-06-12T14:29:23.000Z | pypy/module/pypyjit/test_pypy_c/test_min_max.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | 11 | 2015-09-07T14:26:08.000Z | 2020-04-10T07:20:41.000Z | from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class TestMinMax(BaseTestPyPyC):
def test_min_max(self):
def main():
i=0
sa=0
while i < 300:
sa+=min(max(i, 3000), 4000)
i+=1
return sa
log = self.run(main, [])
assert log.result == 300*3000
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i7 = int_lt(i4, 300)
guard_true(i7, descr=...)
guard_not_invalidated(descr=...)
i9 = int_add_ovf(i5, 3000)
guard_no_overflow(descr=...)
i11 = int_add(i4, 1)
--TICK--
jump(..., descr=...)
""")
def test_silly_max(self):
def main():
i = 13
sa = 0
while i < 30000:
lst = range(i % 1000 + 2)
sa += max(*lst) # ID: max
i += 1
return sa
log = self.run(main, [])
assert log.result == main()
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
...
p76 = call_assembler(_, _, _, _, descr=...)
...
""")
loop2 = log.loops[0]
loop2.match('''
...
label(..., descr=...)
...
label(..., descr=...)
guard_not_invalidated?
i17 = int_ge(i11, i7)
guard_false(i17, descr=...)
p18 = getarrayitem_gc(p5, i11, descr=...)
i19 = int_add(i11, 1)
setfield_gc(p2, i19, descr=...)
guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...)
i20 = getfield_gc_pure(p18, descr=...)
i21 = int_gt(i20, i14)
guard_true(i21, descr=...)
jump(..., descr=...)
''')
# XXX could be "guard_class(p18)" instead; we lost somewhere
# the information that it cannot be null.
def test_iter_max(self):
def main():
i = 2
sa = 0
while i < 300:
lst = range(i)
sa += max(lst) # ID: max
i += 1
return sa
log = self.run(main, [])
assert log.result == main()
loop, = log.loops_by_filename(self.filepath)
# We dont want too many guards, but a residual call to min_max_loop
guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')]
assert len(guards) < 20
assert loop.match("""
...
p76 = call_assembler(_, _, _, _, descr=...)
...
""")
| 30.952941 | 89 | 0.466363 |
acf5c2f7ab97bb684c8ca9d75555668d4fef4713 | 7,916 | py | Python | physionet-django/search/views.py | T-CAIREM/physionet-build | e3d1f2f0e3d998f5b64baebe3a396b08c6f7ca85 | [
"BSD-3-Clause"
] | 36 | 2019-02-14T18:10:39.000Z | 2022-01-21T12:48:52.000Z | physionet-django/search/views.py | T-CAIREM/physionet-build | e3d1f2f0e3d998f5b64baebe3a396b08c6f7ca85 | [
"BSD-3-Clause"
] | 1,051 | 2019-01-31T18:03:14.000Z | 2022-03-31T20:53:04.000Z | physionet-django/search/views.py | T-CAIREM/physionet-build | e3d1f2f0e3d998f5b64baebe3a396b08c6f7ca85 | [
"BSD-3-Clause"
] | 13 | 2019-03-26T11:02:32.000Z | 2022-03-17T11:39:49.000Z | import pdb
import re
import operator
from functools import reduce
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.shortcuts import render, redirect, reverse
from django.db.models import Q, Count, Case, When, Value, IntegerField, Sum
from django.conf import settings
from django.http import Http404
from search import forms
from project.models import PublishedProject, PublishedTopic
from physionet.utility import paginate
def topic_search(request):
"""
Search published projects by topic keyword
Search with form submission or direct url
"""
topic, valid_search, projects = '', False, None
# If we get a form submission, redirect to generate the querystring
# in the url
if 'topic' in request.GET:
form = forms.TopicSearchForm(request.GET)
if form.is_valid():
topic = form.cleaned_data['topic']
valid_search = True
projects = PublishedProject.objects.filter(topics__description=topic)
else:
form = forms.TopicSearchForm()
return render(request, 'search/topic_search.html',
{'topic': topic,
'projects': projects,
'form': form,
'valid_search': valid_search})
def all_topics(request):
"""
Show all topics
"""
topics = PublishedTopic.objects.all().order_by('-project_count')
return render(request, 'search/all_topics.html',
{'topics': topics})
def get_content(resource_type, orderby, direction, search_term):
"""
Helper function to get content shown on a resource listing page
"""
# Word boundary for different database engines
wb = r'\b'
if 'postgresql' in settings.DATABASES['default']['ENGINE']:
wb = r'\y'
# Build query for resource type and keyword filtering
if len(search_term) == 0:
query = Q(resource_type__in=resource_type)
else:
search_term = re.split(r'\s*[\;\,\s]\s*', re.escape(search_term))
query = reduce(operator.or_, (Q(topics__description__iregex=r'{0}{1}{0}'.format(wb,
item)) for item in search_term))
query = query | reduce(operator.or_, (Q(abstract__iregex=r'{0}{1}{0}'.format(wb,
item)) for item in search_term))
query = query | reduce(operator.or_, (Q(title__iregex=r'{0}{1}{0}'.format(wb,
item)) for item in search_term))
query = query & Q(resource_type__in=resource_type)
published_projects = (PublishedProject.objects
.filter(query, is_latest_version=True)
.annotate(relevance=Count('core_project_id'))
.annotate(has_keys=Value(0, IntegerField()))
)
# Relevance
for t in search_term:
published_projects = (published_projects.annotate(has_keys=Case(
When(topics__description__iregex=r'{0}{1}{0}'.format(wb, t),
then=Value(3)),
When(title__iregex=r'{0}{1}{0}'.format(wb, t),
then=Value(2)),
When(abstract__iregex=r'{0}{1}{0}'.format(wb, t),
then=Value(1)),
default=Value(0),
output_field=IntegerField()
)).annotate(has_keys=Sum('has_keys'))
)
# Sorting
direction = '-' if direction == 'desc' else ''
order_string = '{}{}'.format(direction, orderby)
if orderby == 'relevance':
published_projects = published_projects.order_by(direction+'has_keys',
order_string, '-publish_datetime')
else:
published_projects = published_projects.order_by(order_string)
return published_projects
def content_index(request, resource_type=None):
"""
List of all published resources
"""
LABELS = {0: ['Database', 'databases'],
1: ['Software', 'softwares'],
2: ['Challenge', 'challenges'],
3: ['Model', 'models'],
}
# PROJECT TYPE FILTER
form_type = forms.ProjectTypeForm()
if 'types' in request.GET:
form_type = forms.ProjectTypeForm(request.GET)
if form_type.is_valid():
resource_type = [int(t) for t in form_type.cleaned_data['types']]
else:
resource_type = list(LABELS.keys())
elif resource_type is None:
resource_type = list(LABELS.keys())
form_type = forms.ProjectTypeForm({'types': resource_type})
else:
resource_type = [resource_type]
form_type = forms.ProjectTypeForm({'types': resource_type})
# SORT PROJECTS
orderby, direction = 'relevance', 'desc'
form_order = forms.ProjectOrderForm()
if 'orderby' in request.GET:
form_order = forms.ProjectOrderForm(request.GET)
if form_order.is_valid():
orderby, direction = form_order.cleaned_data['orderby'].split('-')
# TOPIC SEARCH
topic = ''
if 'topic' in request.GET:
form_topic = forms.TopicSearchForm(request.GET)
if form_topic.is_valid():
topic = form_topic.cleaned_data['topic']
else:
form_topic = forms.TopicSearchForm()
# BUILD
published_projects = get_content(resource_type=resource_type,
orderby=orderby,
direction=direction,
search_term=topic)
# PAGINATION
projects = paginate(request, published_projects, 10)
params = request.GET.copy()
# Remove the page argument from the querystring if it exists
try:
params.pop('page')
except KeyError:
pass
querystring = params.urlencode()
return render(request, 'search/content_index.html',
{'form_order': form_order,
'projects': projects,
'form_type': form_type,
'form_topic': form_topic,
'querystring': querystring})
def database_index(request):
"""
List of published databases
"""
return content_index(request, resource_type=0)
def software_index(request):
"""
List of published software
"""
return content_index(request, resource_type=1)
def challenge_index(request):
"""
List of published challenges
"""
return content_index(request, resource_type=2)
def model_index(request):
"""
List of published models
"""
return content_index(request, resource_type=3)
def charts(request):
"""
Chart statistics about published projects
"""
resource_type = None
if ('resource_type' in request.GET and
request.GET['resource_type'] in ['0', '1', '2', '3']):
resource_type = int(request.GET['resource_type'])
LABELS = {None: ['Content', 'Projects'],
0: ['Database', 'Databases'],
1: ['Software', 'Software Projects'],
2: ['Challenge', 'Challenges'],
3: ['Model', 'Models']}
main_label, plural_label = LABELS[resource_type]
return render(request, 'search/charts.html', {
'resource_type': resource_type,
'main_label': main_label,
'plural_label': plural_label})
def physiobank(request):
"""Redirect"""
return redirect('database_index')
def physiotools(request):
"""Redirect"""
return redirect('software_index')
def wfdbcal(request):
return redirect(static('wfdbcal'))
def redirect_latest_if_project_exists(project_slug):
project = PublishedProject.objects.filter(slug=project_slug)
if project:
return redirect('published_project_latest', project_slug=project_slug)
else:
raise Http404()
def redirect_project(request, project_slug):
return redirect_latest_if_project_exists(project_slug)
def redirect_challenge_project(request, year):
return redirect_latest_if_project_exists('challenge-{}'.format(year))
| 31.288538 | 91 | 0.621779 |
acf5c37b699d2235b9a2d1a4b35839c380f06368 | 34,319 | py | Python | syncplay/messages_en.py | ccxcz/syncplay | 6f840e4b4c5f9e5f515d4be45a76eb61dfede508 | [
"Apache-2.0"
] | null | null | null | syncplay/messages_en.py | ccxcz/syncplay | 6f840e4b4c5f9e5f515d4be45a76eb61dfede508 | [
"Apache-2.0"
] | null | null | null | syncplay/messages_en.py | ccxcz/syncplay | 6f840e4b4c5f9e5f515d4be45a76eb61dfede508 | [
"Apache-2.0"
] | null | null | null | # coding:utf8
"""English dictionary"""
en = {
"LANGUAGE": "English",
# Client notifications
"config-cleared-notification": "Settings cleared. Changes will be saved when you store a valid configuration.",
"relative-config-notification": "Loaded relative configuration file(s): {}",
"connection-attempt-notification": "Attempting to connect to {}:{}", # Port, IP
"reconnection-attempt-notification": "Connection with server lost, attempting to reconnect",
"disconnection-notification": "Disconnected from server",
"connection-failed-notification": "Connection with server failed",
"connected-successful-notification": "Successfully connected to server",
"retrying-notification": "%s, Retrying in %d seconds...", # Seconds
"rewind-notification": "Rewinded due to time difference with {}", # User
"fastforward-notification": "Fast-forwarded due to time difference with {}", # User
"slowdown-notification": "Slowing down due to time difference with {}", # User
"revert-notification": "Reverting speed back to normal",
"pause-notification": "{} paused", # User
"unpause-notification": "{} unpaused", # User
"seek-notification": "{} jumped from {} to {}", # User, from time, to time
"current-offset-notification": "Current offset: {} seconds", # Offset
"media-directory-list-updated-notification": "Syncplay media directories have been updated.",
"room-join-notification": "{} has joined the room: '{}'", # User
"left-notification": "{} has left", # User
"left-paused-notification": "{} left, {} paused", # User who left, User who paused
"playing-notification": "{} is playing '{}' ({})", # User, file, duration
"playing-notification/room-addendum": " in room: '{}'", # Room
"not-all-ready": "Not ready: {}", # Usernames
"all-users-ready": "Everyone is ready ({} users)", # Number of ready users
"ready-to-unpause-notification": "You are now set as ready - unpause again to unpause",
"set-as-ready-notification": "You are now set as ready",
"set-as-not-ready-notification": "You are now set as not ready",
"autoplaying-notification": "Auto-playing in {}...", # Number of seconds until playback will start
"identifying-as-controller-notification": "Identifying as room operator with password '{}'...",
"failed-to-identify-as-controller-notification": "{} failed to identify as a room operator.",
"authenticated-as-controller-notification": "{} authenticated as a room operator",
"created-controlled-room-notification": "Created managed room '{}' with password '{}'. Please save this information for future reference!", # RoomName, operatorPassword
"file-different-notification": "File you are playing appears to be different from {}'s", # User
"file-differences-notification": "Your file differs in the following way(s): {}", # Differences
"room-file-differences": "File differences: {}", # File differences (filename, size, and/or duration)
"file-difference-filename": "name",
"file-difference-filesize": "size",
"file-difference-duration": "duration",
"alone-in-the-room": "You're alone in the room",
"different-filesize-notification": " (their file size is different from yours!)",
"userlist-playing-notification": "{} is playing:", # Username
"file-played-by-notification": "File: {} is being played by:", # File
"no-file-played-notification": "{} is not playing a file", # Username
"notplaying-notification": "People who are not playing any file:",
"userlist-room-notification": "In room '{}':", # Room
"userlist-file-notification": "File",
"controller-userlist-userflag": "Operator",
"ready-userlist-userflag": "Ready",
"update-check-failed-notification": "Could not automatically check whether Syncplay {} is up to date. Want to visit https://syncplay.pl/ to manually check for updates?", # Syncplay version
"syncplay-uptodate-notification": "Syncplay is up to date",
"syncplay-updateavailable-notification": "A new version of Syncplay is available. Do you want to visit the release page?",
"mplayer-file-required-notification": "Syncplay using mplayer requires you to provide file when starting",
"mplayer-file-required-notification/example": "Usage example: syncplay [options] [url|path/]filename",
"mplayer2-required": "Syncplay is incompatible with MPlayer 1.x, please use mplayer2 or mpv",
"unrecognized-command-notification": "Unrecognized command",
"commandlist-notification": "Available commands:",
"commandlist-notification/room": "\tr [name] - change room",
"commandlist-notification/list": "\tl - show user list",
"commandlist-notification/undo": "\tu - undo last seek",
"commandlist-notification/pause": "\tp - toggle pause",
"commandlist-notification/seek": "\t[s][+-]time - seek to the given value of time, if + or - is not specified it's absolute time in seconds or min:sec",
"commandlist-notification/help": "\th - this help",
"commandlist-notification/toggle": "\tt - toggles whether you are ready to watch or not",
"commandlist-notification/create": "\tc [name] - create managed room using name of current room",
"commandlist-notification/auth": "\ta [password] - authenticate as room operator with operator password",
"commandlist-notification/chat": "\tch [message] - send a chat message in a room",
"syncplay-version-notification": "Syncplay version: {}", # syncplay.version
"more-info-notification": "More info available at: {}", # projectURL
"gui-data-cleared-notification": "Syncplay has cleared the path and window state data used by the GUI.",
"language-changed-msgbox-label": "Language will be changed when you run Syncplay.",
"promptforupdate-label": "Is it okay for Syncplay to automatically check for updates from time to time?",
"vlc-interface-version-mismatch": "You are running version {} of the Syncplay interface module for VLC, but Syncplay is designed to run with version {} and above. Please refer to the Syncplay User Guide at https://syncplay.pl/guide/ for instructions on how to install syncplay.lua.", # VLC interface version, VLC interface min version
"vlc-interface-oldversion-warning": "Warning: Syncplay detected that an old version version of the Syncplay interface module for VLC was installed in the VLC directory. Please refer to the Syncplay User Guide at https://syncplay.pl/guide/ for instructions on how to install syncplay.lua.",
"vlc-interface-not-installed": "Warning: The Syncplay interface module for VLC was not found in the VLC directory. As such, if you are running VLC 2.0 then VLC will use the syncplay.lua module contained within the Syncplay directory, but this will mean that other custom interface scripts and extensions will not work. Please refer to the Syncplay User Guide at https://syncplay.pl/guide/ for instructions on how to install syncplay.lua.",
"media-player-latency-warning": "Warning: The media player took {} seconds to respond. If you experience syncing issues then close applications to free up system resources, and if that doesn't work then try a different media player.", # Seconds to respond
"mpv-unresponsive-error": "mpv has not responded for {} seconds so appears to have malfunctioned. Please restart Syncplay.", # Seconds to respond
# Client prompts
"enter-to-exit-prompt": "Press enter to exit\n",
# Client errors
"missing-arguments-error": "Some necessary arguments are missing, refer to --help",
"server-timeout-error": "Connection with server timed out",
"mpc-slave-error": "Unable to start MPC in slave mode!",
"mpc-version-insufficient-error": "MPC version not sufficient, please use `mpc-hc` >= `{}`",
"mpc-be-version-insufficient-error": "MPC version not sufficient, please use `mpc-be` >= `{}`",
"mpv-version-error": "Syncplay is not compatible with this version of mpv. Please use a different version of mpv (e.g. Git HEAD).",
"player-file-open-error": "Player failed opening file",
"player-path-error": "Player path is not set properly. Supported players are: mpv, VLC, MPC-HC, MPC-BE and mplayer2",
"hostname-empty-error": "Hostname can't be empty",
"empty-error": "{} can't be empty", # Configuration
"media-player-error": "Media player error: \"{}\"", # Error line
"unable-import-gui-error": "Could not import GUI libraries. If you do not have PySide installed then you will need to install it for the GUI to work.",
"unable-import-twisted-error": "Could not import Twisted. Please install Twisted v12.1.0 or later.",
"arguments-missing-error": "Some necessary arguments are missing, refer to --help",
"unable-to-start-client-error": "Unable to start client",
"player-path-config-error": "Player path is not set properly. Supported players are: mpv, VLC, MPC-HC, MPC-BE and mplayer2.",
"no-file-path-config-error": "File must be selected before starting your player",
"no-hostname-config-error": "Hostname can't be empty",
"invalid-port-config-error": "Port must be valid",
"empty-value-config-error": "{} can't be empty", # Config option
"not-json-error": "Not a json encoded string\n",
"hello-arguments-error": "Not enough Hello arguments\n", # DO NOT TRANSLATE
"version-mismatch-error": "Mismatch between versions of client and server\n",
"vlc-failed-connection": "Failed to connect to VLC. If you have not installed syncplay.lua and are using the latest verion of VLC then please refer to https://syncplay.pl/LUA/ for instructions.",
"vlc-failed-noscript": "VLC has reported that the syncplay.lua interface script has not been installed. Please refer to https://syncplay.pl/LUA/ for instructions.",
"vlc-failed-versioncheck": "This version of VLC is not supported by Syncplay.",
"feature-sharedPlaylists": "shared playlists", # used for not-supported-by-server-error
"feature-chat": "chat", # used for not-supported-by-server-error
"feature-readiness": "readiness", # used for not-supported-by-server-error
"feature-managedRooms": "managed rooms", # used for not-supported-by-server-error
"not-supported-by-server-error": "The {} feature is not supported by this server..", # feature
"shared-playlists-not-supported-by-server-error": "The shared playlists feature may not be supported by the server. To ensure that it works correctly requires a server running Syncplay {}+, but the server is running Syncplay {}.", # minVersion, serverVersion
"shared-playlists-disabled-by-server-error": "The shared playlist feature has been disabled in the server configuration. To use this feature you will need to connect to a different server.",
"invalid-seek-value": "Invalid seek value",
"invalid-offset-value": "Invalid offset value",
"switch-file-not-found-error": "Could not switch to file '{0}'. Syncplay looks in specified media directories.", # File not found
"folder-search-timeout-error": "The search for media in media directories was aborted as it took too long to search through '{}'. This will occur if you select a folder with too many sub-folders in your list of media folders to search through. For automatic file switching to work again please select File->Set Media Directories in the menu bar and remove this directory or replace it with an appropriate sub-folder. If the folder is actually fine then you can re-enable it by selecting File->Set Media Directories and pressing 'OK'.", # Folder
"folder-search-first-file-timeout-error": "The search for media in '{}' was aborted as it took too long to access the directory. This could happen if it is a network drive or if you configure your drive to spin down after a period of inactivity. For automatic file switching to work again please go to File->Set Media Directories and either remove the directory or resolve the issue (e.g. by changing power saving settings).", # Folder
"added-file-not-in-media-directory-error": "You loaded a file in '{}' which is not a known media directory. You can add this as a media directory by selecting File->Set Media Directories in the menu bar.", # Folder
"no-media-directories-error": "No media directories have been set. For shared playlist and file switching features to work properly please select File->Set Media Directories and specify where Syncplay should look to find media files.",
"cannot-find-directory-error": "Could not find media directory '{}'. To update your list of media directories please select File->Set Media Directories from the menu bar and specify where Syncplay should look to find media files.",
"failed-to-load-server-list-error": "Failed to load public server list. Please visit https://www.syncplay.pl/ in your browser.",
# Client arguments
"argument-description": 'Solution to synchronize playback of multiple media player instances over the network.',
"argument-epilog": 'If no options supplied _config values will be used',
"nogui-argument": 'show no GUI',
"host-argument": 'server\'s address',
"name-argument": 'desired username',
"debug-argument": 'debug mode',
"force-gui-prompt-argument": 'make configuration prompt appear',
"no-store-argument": 'don\'t store values in .syncplay',
"room-argument": 'default room',
"password-argument": 'server password',
"player-path-argument": 'path to your player executable',
"file-argument": 'file to play',
"args-argument": 'player options, if you need to pass options starting with - prepend them with single \'--\' argument',
"clear-gui-data-argument": 'resets path and window state GUI data stored as QSettings',
"language-argument": 'language for Syncplay messages (de/en/ru)',
"version-argument": 'prints your version',
"version-message": "You're using Syncplay version {} ({})",
# Client labels
"config-window-title": "Syncplay configuration",
"connection-group-title": "Connection settings",
"host-label": "Server address: ",
"name-label": "Username (optional):",
"password-label": "Server password (if any):",
"room-label": "Default room: ",
"media-setting-title": "Media player settings",
"executable-path-label": "Path to media player:",
"media-path-label": "Path to video (optional):",
"player-arguments-label": "Player arguments (if any):",
"browse-label": "Browse",
"update-server-list-label": "Update list",
"more-title": "Show more settings",
"never-rewind-value": "Never",
"seconds-suffix": " secs",
"privacy-sendraw-option": "Send raw",
"privacy-sendhashed-option": "Send hashed",
"privacy-dontsend-option": "Don't send",
"filename-privacy-label": "Filename information:",
"filesize-privacy-label": "File size information:",
"checkforupdatesautomatically-label": "Check for Syncplay updates automatically",
"slowondesync-label": "Slow down on minor desync (not supported on MPC-HC/BE)",
"rewindondesync-label": "Rewind on major desync (recommended)",
"fastforwardondesync-label": "Fast-forward if lagging behind (recommended)",
"dontslowdownwithme-label": "Never slow down or rewind others (experimental)",
"pausing-title": "Pausing",
"pauseonleave-label": "Pause when user leaves (e.g. if they are disconnected)",
"readiness-title": "Initial readiness state",
"readyatstart-label": "Set me as 'ready to watch' by default",
"forceguiprompt-label": "Don't always show the Syncplay configuration window", # (Inverted)
"showosd-label": "Enable OSD Messages",
"showosdwarnings-label": "Include warnings (e.g. when files are different, users not ready)",
"showsameroomosd-label": "Include events in your room",
"shownoncontrollerosd-label": "Include events from non-operators in managed rooms",
"showdifferentroomosd-label": "Include events in other rooms",
"showslowdownosd-label": "Include slowing down / reverting notifications",
"language-label": "Language:",
"automatic-language": "Default ({})", # Default language
"showdurationnotification-label": "Warn about media duration mismatches",
"basics-label": "Basics",
"readiness-label": "Play/Pause",
"misc-label": "Misc",
"core-behaviour-title": "Core room behaviour",
"syncplay-internals-title": "Syncplay internals",
"syncplay-mediasearchdirectories-title": "Directories to search for media",
"syncplay-mediasearchdirectories-label": "Directories to search for media (one path per line)",
"sync-label": "Sync",
"sync-otherslagging-title": "If others are lagging behind...",
"sync-youlaggging-title": "If you are lagging behind...",
"messages-label": "Messages",
"messages-osd-title": "On-screen Display settings",
"messages-other-title": "Other display settings",
"chat-label": "Chat",
"privacy-label": "Privacy", # Currently unused, but will be brought back if more space is needed in Misc tab
"privacy-title": "Privacy settings",
"unpause-title": "If you press play, set as ready and:",
"unpause-ifalreadyready-option": "Unpause if already set as ready",
"unpause-ifothersready-option": "Unpause if already ready or others in room are ready (default)",
"unpause-ifminusersready-option": "Unpause if already ready or if all others ready and min users ready",
"unpause-always": "Always unpause",
"syncplay-trusteddomains-title": "Trusted domains (for streaming services and hosted content)",
"chat-title": "Chat message input",
"chatinputenabled-label": "Enable chat input via mpv",
"chatdirectinput-label": "Allow instant chat input (bypass having to press enter key to chat)",
"chatinputfont-label": "Chat input font",
"chatfont-label": "Set font",
"chatcolour-label": "Set colour",
"chatinputposition-label": "Position of message input area in mpv",
"chat-top-option": "Top",
"chat-middle-option": "Middle",
"chat-bottom-option": "Bottom",
"chatoutputheader-label": "Chat message output",
"chatoutputfont-label": "Chat output font",
"chatoutputenabled-label": "Enable chat output in media player (mpv only for now)",
"chatoutputposition-label": "Output mode",
"chat-chatroom-option": "Chatroom style",
"chat-scrolling-option": "Scrolling style",
"mpv-key-tab-hint": "[TAB] to toggle access to alphabet row key shortcuts.",
"mpv-key-hint": "[ENTER] to send message. [ESC] to escape chat mode.",
"alphakey-mode-warning-first-line": "You can temporarily use old mpv bindings with a-z keys.",
"alphakey-mode-warning-second-line": "Press [TAB] to return to Syncplay chat mode.",
"help-label": "Help",
"reset-label": "Restore defaults",
"run-label": "Run Syncplay",
"storeandrun-label": "Store configuration and run Syncplay",
"contact-label": "Feel free to e-mail <a href=\"mailto:dev@syncplay.pl\"><nobr>dev@syncplay.pl</nobr></a>, chat via the <a href=\"https://webchat.freenode.net/?channels=#syncplay\"><nobr>#Syncplay IRC channel</nobr></a> on irc.freenode.net, <a href=\"https://github.com/Uriziel/syncplay/issues\"><nobr>raise an issue</nobr></a> via GitHub, <a href=\"https://www.facebook.com/SyncplaySoftware\"><nobr>like us on Facebook</nobr></a>, <a href=\"https://twitter.com/Syncplay/\"><nobr>follow us on Twitter</nobr></a>, or visit <a href=\"https://syncplay.pl/\"><nobr>https://syncplay.pl/</nobr></a>. NOTE: Chat messages are not encrypted so do not use Syncplay to send sensitive information.",
"joinroom-label": "Join room",
"joinroom-menu-label": "Join room {}",
"seektime-menu-label": "Seek to time",
"undoseek-menu-label": "Undo seek",
"play-menu-label": "Play",
"pause-menu-label": "Pause",
"playbackbuttons-menu-label": "Show playback buttons",
"autoplay-menu-label": "Show auto-play button",
"autoplay-guipushbuttonlabel": "Play when all ready",
"autoplay-minimum-label": "Min users:",
"sendmessage-label": "Send",
"ready-guipushbuttonlabel": "I'm ready to watch!",
"roomuser-heading-label": "Room / User",
"size-heading-label": "Size",
"duration-heading-label": "Length",
"filename-heading-label": "Filename",
"notifications-heading-label": "Notifications",
"userlist-heading-label": "List of who is playing what",
"browseformedia-label": "Browse for media files",
"file-menu-label": "&File", # & precedes shortcut key
"openmedia-menu-label": "&Open media file",
"openstreamurl-menu-label": "Open &media stream URL",
"setmediadirectories-menu-label": "Set media &directories",
"exit-menu-label": "E&xit",
"advanced-menu-label": "&Advanced",
"window-menu-label": "&Window",
"setoffset-menu-label": "Set &offset",
"createcontrolledroom-menu-label": "&Create managed room",
"identifyascontroller-menu-label": "&Identify as room operator",
"settrusteddomains-menu-label": "Set &trusted domains",
"addtrusteddomain-menu-label": "Add {} as trusted domain", # Domain
"playback-menu-label": "&Playback",
"help-menu-label": "&Help",
"userguide-menu-label": "Open user &guide",
"update-menu-label": "Check for &update",
# About dialog
"about-menu-label": "&About Syncplay",
"about-dialog-title": "About Syncplay",
"about-dialog-release": "Version {} release {}",
"about-dialog-license-text": "Licensed under the Apache License, Version 2.0",
"about-dialog-license-button": "License",
"about-dialog-dependencies": "Dependencies",
"setoffset-msgbox-label": "Set offset",
"offsetinfo-msgbox-label": "Offset (see https://syncplay.pl/guide/ for usage instructions):",
"promptforstreamurl-msgbox-label": "Open media stream URL",
"promptforstreamurlinfo-msgbox-label": "Stream URL",
"addfolder-label": "Add folder",
"adduris-msgbox-label": "Add URLs to playlist (one per line)",
"editplaylist-msgbox-label": "Set playlist (one per line)",
"trusteddomains-msgbox-label": "Domains it is okay to automatically switch to (one per line)",
"createcontrolledroom-msgbox-label": "Create managed room",
"controlledroominfo-msgbox-label": "Enter name of managed room\r\n(see https://syncplay.pl/guide/ for usage instructions):",
"identifyascontroller-msgbox-label": "Identify as room operator",
"identifyinfo-msgbox-label": "Enter operator password for this room\r\n(see https://syncplay.pl/guide/ for usage instructions):",
"public-server-msgbox-label": "Select the public server for this viewing session",
"megabyte-suffix": " MB",
# Tooltips
"host-tooltip": "Hostname or IP to connect to, optionally including port (e.g. syncplay.pl:8999). Only synchronised with people on same server/port.",
"name-tooltip": "Nickname you will be known by. No registration, so can easily change later. Random name generated if none specified.",
"password-tooltip": "Passwords are only needed for connecting to private servers.",
"room-tooltip": "Room to join upon connection can be almost anything, but you will only be synchronised with people in the same room.",
"executable-path-tooltip": "Location of your chosen supported media player (mpv, VLC, MPC-HC/BE or mplayer2).",
"media-path-tooltip": "Location of video or stream to be opened. Necessary for mplayer2.",
"player-arguments-tooltip": "Additional command line arguments / switches to pass on to this media player.",
"mediasearcdirectories-arguments-tooltip": "Directories where Syncplay will search for media files, e.g. when you are using the click to switch feature. Syncplay will look recursively through sub-folders.",
"more-tooltip": "Display less frequently used settings.",
"filename-privacy-tooltip": "Privacy mode for sending currently playing filename to server.",
"filesize-privacy-tooltip": "Privacy mode for sending size of currently playing file to server.",
"privacy-sendraw-tooltip": "Send this information without obfuscation. This is the default option with most functionality.",
"privacy-sendhashed-tooltip": "Send a hashed version of the information, making it less visible to other clients.",
"privacy-dontsend-tooltip": "Do not send this information to the server. This provides for maximum privacy.",
"checkforupdatesautomatically-tooltip": "Regularly check with the Syncplay website to see whether a new version of Syncplay is available.",
"slowondesync-tooltip": "Reduce playback rate temporarily when needed to bring you back in sync with other viewers. Not supported on MPC-HC/BE.",
"dontslowdownwithme-tooltip": "Means others do not get slowed down or rewinded if your playback is lagging. Useful for room operators.",
"pauseonleave-tooltip": "Pause playback if you get disconnected or someone leaves from your room.",
"readyatstart-tooltip": "Set yourself as 'ready' at start (otherwise you are set as 'not ready' until you change your readiness state)",
"forceguiprompt-tooltip": "Configuration dialogue is not shown when opening a file with Syncplay.", # (Inverted)
"nostore-tooltip": "Run Syncplay with the given configuration, but do not permanently store the changes.", # (Inverted)
"rewindondesync-tooltip": "Jump back when needed to get back in sync. Disabling this option can result in major desyncs!",
"fastforwardondesync-tooltip": "Jump forward when out of sync with room operator (or your pretend position if 'Never slow down or rewind others' enabled).",
"showosd-tooltip": "Sends Syncplay messages to media player OSD.",
"showosdwarnings-tooltip": "Show warnings if playing different file, alone in room, users not ready, etc.",
"showsameroomosd-tooltip": "Show OSD notifications for events relating to room user is in.",
"shownoncontrollerosd-tooltip": "Show OSD notifications for events relating to non-operators who are in managed rooms.",
"showdifferentroomosd-tooltip": "Show OSD notifications for events relating to room user is not in.",
"showslowdownosd-tooltip": "Show notifications of slowing down / reverting on time difference.",
"showdurationnotification-tooltip": "Useful for when a segment in a multi-part file is missing, but can result in false positives.",
"language-tooltip": "Language to be used by Syncplay.",
"unpause-always-tooltip": "If you press unpause it always sets you as ready and unpause, rather than just setting you as ready.",
"unpause-ifalreadyready-tooltip": "If you press unpause when not ready it will set you as ready - press unpause again to unpause.",
"unpause-ifothersready-tooltip": "If you press unpause when not ready, it will only upause if others are ready.",
"unpause-ifminusersready-tooltip": "If you press unpause when not ready, it will only unpause if others are ready and minimum users threshold is met.",
"trusteddomains-arguments-tooltip": "Domains that it is okay for Syncplay to automatically switch to when shared playlists is enabled.",
"chatinputenabled-tooltip": "Enable chat input in mpv (press enter to chat, enter to send, escape to cancel)",
"chatdirectinput-tooltip": "Skip having to press 'enter' to go into chat input mode in mpv. Press TAB in mpv to temporarily disable this feature.",
"font-label-tooltip": "Font used for when entering chat messages in mpv. Client-side only, so doesn't affect what other see.",
"set-input-font-tooltip": "Font family used for when entering chat messages in mpv. Client-side only, so doesn't affect what other see.",
"set-input-colour-tooltip": "Font colour used for when entering chat messages in mpv. Client-side only, so doesn't affect what other see.",
"chatinputposition-tooltip": "Location in mpv where chat input text will appear when you press enter and type.",
"chatinputposition-top-tooltip": "Place chat input at top of mpv window.",
"chatinputposition-middle-tooltip": "Place chat input in dead centre of mpv window.",
"chatinputposition-bottom-tooltip": "Place chat input at bottom of mpv window.",
"chatoutputenabled-tooltip": "Show chat messages in OSD (if supported by media player).",
"font-output-label-tooltip": "Chat output font.",
"set-output-font-tooltip": "Font used for when displaying chat messages.",
"chatoutputmode-tooltip": "How chat messages are displayed.",
"chatoutputmode-chatroom-tooltip": "Display new lines of chat directly below previous line.",
"chatoutputmode-scrolling-tooltip": "Scroll chat text from right to left.",
"help-tooltip": "Opens the Syncplay.pl user guide.",
"reset-tooltip": "Reset all settings to the default configuration.",
"update-server-list-tooltip": "Connect to syncplay.pl to update list of public servers.",
"joinroom-tooltip": "Leave current room and joins specified room.",
"seektime-msgbox-label": "Jump to specified time (in seconds / min:sec). Use +/- for relative seek.",
"ready-tooltip": "Indicates whether you are ready to watch.",
"autoplay-tooltip": "Auto-play when all users who have readiness indicator are ready and minimum user threshold met.",
"switch-to-file-tooltip": "Double click to switch to {}", # Filename
"sendmessage-tooltip": "Send message to room",
# In-userlist notes (GUI)
"differentsize-note": "Different size!",
"differentsizeandduration-note": "Different size and duration!",
"differentduration-note": "Different duration!",
"nofile-note": "(No file being played)",
# Server messages to client
"new-syncplay-available-motd-message": "<NOTICE> You are using Syncplay {} but a newer version is available from https://syncplay.pl </NOTICE>", # ClientVersion
# Server notifications
"welcome-server-notification": "Welcome to Syncplay server, ver. {0}", # version
"client-connected-room-server-notification": "{0}({2}) connected to room '{1}'", # username, host, room
"client-left-server-notification": "{0} left server", # name
"no-salt-notification": "PLEASE NOTE: To allow room operator passwords generated by this server instance to still work when the server is restarted, please add the following command line argument when running the Syncplay server in the future: --salt {}", # Salt
# Server arguments
"server-argument-description": 'Solution to synchronize playback of multiple MPlayer and MPC-HC/BE instances over the network. Server instance',
"server-argument-epilog": 'If no options supplied _config values will be used',
"server-port-argument": 'server TCP port',
"server-password-argument": 'server password',
"server-isolate-room-argument": 'should rooms be isolated?',
"server-salt-argument": "random string used to generate managed room passwords",
"server-disable-ready-argument": "disable readiness feature",
"server-motd-argument": "path to file from which motd will be fetched",
"server-chat-argument": "Should chat be disabled?",
"server-chat-maxchars-argument": "Maximum number of characters in a chat message (default is {})", # Default number of characters
"server-maxusernamelength-argument": "Maximum number of characters in a username (default is {})",
"server-stats-db-file-argument": "Enable server stats using the SQLite db file provided",
"server-messed-up-motd-unescaped-placeholders": "Message of the Day has unescaped placeholders. All $ signs should be doubled ($$).",
"server-messed-up-motd-too-long": "Message of the Day is too long - maximum of {} chars, {} given.",
# Server errors
"unknown-command-server-error": "Unknown command {}", # message
"not-json-server-error": "Not a json encoded string {}", # message
"not-known-server-error": "You must be known to server before sending this command",
"client-drop-server-error": "Client drop: {} -- {}", # host, error
"password-required-server-error": "Password required",
"wrong-password-server-error": "Wrong password supplied",
"hello-server-error": "Not enough Hello arguments", # DO NOT TRANSLATE
# Playlists
"playlist-selection-changed-notification": "{} changed the playlist selection", # Username
"playlist-contents-changed-notification": "{} updated the playlist", # Username
"cannot-find-file-for-playlist-switch-error": "Could not find file {} in media directories for playlist switch!", # Filename
"cannot-add-duplicate-error": "Could not add second entry for '{}' to the playlist as no duplicates are allowed.", # Filename
"cannot-add-unsafe-path-error": "Could not automatically load {} because it is not on a trusted domain. You can switch to the URL manually by double clicking it in the playlist, and add trusted domains via File->Advanced->Set Trusted Domains. If you right click on a URL then you can add its domain as a trusted domain via the context menu.", # Filename
"sharedplaylistenabled-label": "Enable shared playlists",
"removefromplaylist-menu-label": "Remove from playlist",
"shuffleremainingplaylist-menu-label": "Shuffle remaining playlist",
"shuffleentireplaylist-menu-label": "Shuffle entire playlist",
"undoplaylist-menu-label": "Undo last change to playlist",
"addfilestoplaylist-menu-label": "Add file(s) to bottom of playlist",
"addurlstoplaylist-menu-label": "Add URL(s) to bottom of playlist",
"editplaylist-menu-label": "Edit playlist",
"open-containing-folder": "Open folder containing this file",
"addusersfiletoplaylist-menu-label": "Add {} file to playlist", # item owner indicator
"addusersstreamstoplaylist-menu-label": "Add {} stream to playlist", # item owner indicator
"openusersstream-menu-label": "Open {} stream", # [username]'s
"openusersfile-menu-label": "Open {} file", # [username]'s
"item-is-yours-indicator": "your", # Goes with addusersfiletoplaylist/addusersstreamstoplaylist
"item-is-others-indicator": "{}'s", # username - goes with addusersfiletoplaylist/addusersstreamstoplaylist
"playlist-instruction-item-message": "Drag file here to add it to the shared playlist.",
"sharedplaylistenabled-tooltip": "Room operators can add files to a synced playlist to make it easy for everyone to watching the same thing. Configure media directories under 'Misc'.",
}
| 71.201245 | 691 | 0.715347 |
acf5c5886983ebcf716b5593b4e4fd73d86b4191 | 2,987 | py | Python | autoarray/plot/__init__.py | jonathanfrawley/PyAutoArray_copy | c21e8859bdb20737352147b9904797ac99985b73 | [
"MIT"
] | null | null | null | autoarray/plot/__init__.py | jonathanfrawley/PyAutoArray_copy | c21e8859bdb20737352147b9904797ac99985b73 | [
"MIT"
] | null | null | null | autoarray/plot/__init__.py | jonathanfrawley/PyAutoArray_copy | c21e8859bdb20737352147b9904797ac99985b73 | [
"MIT"
] | null | null | null | from autoarray.plot.mat_wrap.wrap.wrap_base import Units
from autoarray.plot.mat_wrap.wrap.wrap_base import Figure
from autoarray.plot.mat_wrap.wrap.wrap_base import Axis
from autoarray.plot.mat_wrap.wrap.wrap_base import Cmap
from autoarray.plot.mat_wrap.wrap.wrap_base import Colorbar
from autoarray.plot.mat_wrap.wrap.wrap_base import ColorbarTickParams
from autoarray.plot.mat_wrap.wrap.wrap_base import TickParams
from autoarray.plot.mat_wrap.wrap.wrap_base import YTicks
from autoarray.plot.mat_wrap.wrap.wrap_base import XTicks
from autoarray.plot.mat_wrap.wrap.wrap_base import Title
from autoarray.plot.mat_wrap.wrap.wrap_base import YLabel
from autoarray.plot.mat_wrap.wrap.wrap_base import XLabel
from autoarray.plot.mat_wrap.wrap.wrap_base import Legend
from autoarray.plot.mat_wrap.wrap.wrap_base import Output
from autoarray.plot.mat_wrap.wrap.wrap_1d import YXPlot
from autoarray.plot.mat_wrap.wrap.wrap_1d import AXVLine
from autoarray.plot.mat_wrap.wrap.wrap_2d import ArrayOverlay
from autoarray.plot.mat_wrap.wrap.wrap_2d import GridScatter
from autoarray.plot.mat_wrap.wrap.wrap_2d import GridPlot
from autoarray.plot.mat_wrap.wrap.wrap_2d import VectorFieldQuiver
from autoarray.plot.mat_wrap.wrap.wrap_2d import PatchOverlay
from autoarray.plot.mat_wrap.wrap.wrap_2d import VoronoiDrawer
from autoarray.plot.mat_wrap.wrap.wrap_2d import OriginScatter
from autoarray.plot.mat_wrap.wrap.wrap_2d import MaskScatter
from autoarray.plot.mat_wrap.wrap.wrap_2d import BorderScatter
from autoarray.plot.mat_wrap.wrap.wrap_2d import PositionsScatter
from autoarray.plot.mat_wrap.wrap.wrap_2d import IndexScatter
from autoarray.plot.mat_wrap.wrap.wrap_2d import PixelizationGridScatter
from autoarray.plot.mat_wrap.wrap.wrap_2d import ParallelOverscanPlot
from autoarray.plot.mat_wrap.wrap.wrap_2d import SerialPrescanPlot
from autoarray.plot.mat_wrap.wrap.wrap_2d import SerialOverscanPlot
from autoarray.plot.mat_wrap.mat_plot import MatPlot1D
from autoarray.plot.mat_wrap.include import Include1D
from autoarray.plot.mat_wrap.visuals import Visuals1D
from autoarray.plot.mat_wrap.mat_plot import MatPlot2D
from autoarray.plot.mat_wrap.include import Include2D
from autoarray.plot.mat_wrap.visuals import Visuals2D
from autoarray.plot.structure_plotters import Array2DPlotter
from autoarray.plot.structure_plotters import Grid2DPlotter
from autoarray.plot.structure_plotters import MapperPlotter
from autoarray.plot.structure_plotters import YX1DPlotter
from autoarray.plot.inversion_plotters import InversionPlotter
from autoarray.plot.imaging_plotters import ImagingPlotter
from autoarray.plot.interferometer_plotters import InterferometerPlotter
from autoarray.plot.fit_imaging_plotters import FitImagingPlotter
from autoarray.plot.fit_interferometer_plotters import FitInterferometerPlotter
from autoarray.plot.multi_plotters import MultiFigurePlotter
from autoarray.plot.multi_plotters import MultiYX1DPlotter
| 55.314815 | 80 | 0.861399 |
acf5c5ff00bea6db2b50cf5c749dfa55a5c0aef1 | 14,658 | py | Python | library_old/bigip_iapp_service.py | Larsende/f5_ansible | 93b0747ba663128e2c8dfc456dad4653cdde4f38 | [
"Apache-2.0"
] | 12 | 2016-12-29T16:09:21.000Z | 2019-06-29T14:12:17.000Z | library_old/bigip_iapp_service.py | Larsende/f5_ansible | 93b0747ba663128e2c8dfc456dad4653cdde4f38 | [
"Apache-2.0"
] | 24 | 2017-05-24T07:56:56.000Z | 2017-11-30T09:31:56.000Z | library_old/bigip_iapp_service.py | Larsende/f5_ansible | 93b0747ba663128e2c8dfc456dad4653cdde4f38 | [
"Apache-2.0"
] | 26 | 2017-05-31T17:15:32.000Z | 2021-03-29T03:45:06.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_iapp_service
short_description: Manages TCL iApp services on a BIG-IP.
description:
- Manages TCL iApp services on a BIG-IP.
version_added: "2.4"
options:
name:
description:
- The name of the iApp service that you want to deploy.
required: True
template:
description:
- The iApp template from which to instantiate a new service. This
template must exist on your BIG-IP before you can successfully
create a service. This parameter is required if the C(state)
parameter is C(present).
required: False
default: None
parameters:
description:
- A hash of all the required template variables for the iApp template.
If your parameters are stored in a file (the more common scenario)
it is recommended you use either the `file` or `template` lookups
to supply the expected parameters.
required: False
default: None
force:
description:
- Forces the updating of an iApp service even if the parameters to the
service have not changed. This option is of particular importance if
the iApp template that underlies the service has been updated in-place.
This option is equivalent to re-configuring the iApp if that template
has changed.
required: False
default: False
state:
description:
- When C(present), ensures that the iApp service is created and running.
When C(absent), ensures that the iApp service has been removed.
required: False
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
requirements:
- f5-sdk
- deepdiff
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create HTTP iApp service from iApp template
bigip_iapp_service:
name: "foo-service"
template: "f5.http"
parameters: "{{ lookup('file', 'f5.http.parameters.json') }}"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Upgrade foo-service to v1.2.0rc4 of the f5.http template
bigip_iapp_service:
name: "foo-service"
template: "f5.http.v1.2.0rc4"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Configure a service using parameters in YAML
bigip_iapp_service:
name: "tests"
template: "web_frontends"
password: "admin"
server: "{{ inventory_hostname }}"
server_port: "{{ bigip_port }}"
validate_certs: "{{ validate_certs }}"
state: "present"
user: "admin"
parameters:
variables:
- name: "var__vs_address"
value: "1.1.1.1"
- name: "pm__apache_servers_for_http"
value: "2.2.2.1:80"
- name: "pm__apache_servers_for_https"
value: "2.2.2.2:80"
delegate_to: localhost
- name: Re-configure a service whose underlying iApp was updated in place
bigip_iapp_service:
name: "tests"
template: "web_frontends"
password: "admin"
force: yes
server: "{{ inventory_hostname }}"
server_port: "{{ bigip_port }}"
validate_certs: "{{ validate_certs }}"
state: "present"
user: "admin"
parameters:
variables:
- name: "var__vs_address"
value: "1.1.1.1"
- name: "pm__apache_servers_for_http"
value: "2.2.2.1:80"
- name: "pm__apache_servers_for_https"
value: "2.2.2.2:80"
delegate_to: localhost
'''
RETURN = '''
'''
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iteritems,
defaultdict,
iControlUnexpectedHTTPError
)
from deepdiff import DeepDiff
class Parameters(AnsibleF5Parameters):
returnables = ['variables']
api_attributes = [
'tables', 'variables', 'template', 'lists'
]
updatables = ['tables', 'variables', 'lists']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def tables(self):
result = []
if not self._values['tables']:
return None
tables = self._values['tables']
for table in tables:
tmp = dict()
name = table.get('name', None)
if name is None:
raise F5ModuleError(
"One of the provided tables does not have a name"
)
tmp['name'] = str(name)
columns = table.get('columnNames', None)
if columns:
tmp['columnNames'] = [str(x) for x in columns]
# You cannot have rows without columns
rows = table.get('rows', None)
if rows:
tmp['rows'] = []
for row in rows:
tmp['rows'].append(dict(row=[str(x) for x in row['row']]))
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@tables.setter
def tables(self, value):
self._values['tables'] = value
@property
def variables(self):
result = []
if not self._values['variables']:
return None
variables = self._values['variables']
for variable in variables:
tmp = dict((str(k), str(v)) for k, v in iteritems(variable))
if 'encrypted' not in tmp:
# BIG-IP will inject an 'encrypted' key if you don't provide one.
# If you don't provide one, then we give you the default 'no', by
# default.
tmp['encrypted'] = 'no'
if 'value' not in tmp:
tmp['value'] = ''
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@variables.setter
def variables(self, value):
self._values['variables'] = value
@property
def lists(self):
result = []
if not self._values['lists']:
return None
lists = self._values['lists']
for list in lists:
tmp = dict((str(k), str(v)) for k, v in iteritems(list) if k != 'value')
if 'encrypted' not in list:
# BIG-IP will inject an 'encrypted' key if you don't provide one.
# If you don't provide one, then we give you the default 'no', by
# default.
tmp['encrypted'] = 'no'
if 'value' in list:
if len(list['value']) > 0:
# BIG-IP removes empty values entries, so mimic this behavior
# for user-supplied values.
tmp['value'] = [str(x) for x in list['value']]
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@lists.setter
def lists(self, value):
self._values['lists'] = value
@property
def parameters(self):
return dict(
tables=self.tables,
variables=self.variables,
lists=self.lists
)
@parameters.setter
def parameters(self, value):
if value is None:
return
if 'tables' in value:
self.tables = value['tables']
if 'variables' in value:
self.variables = value['variables']
if 'lists' in value:
self.lists = value['lists']
@property
def template(self):
if self._values['template'] is None:
return None
if self._values['template'].startswith("/"+self.partition):
return self._values['template']
else:
return '/{0}/{1}'.format(
self.partition, self._values['template']
)
@template.setter
def template(self, value):
self._values['template'] = value
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = str(DeepDiff(attr1,attr2))
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def exists(self):
result = self.client.api.tm.sys.application.services.service.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update() and not self.want.force:
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
params['execute-action'] = 'definition'
resource = self.client.api.tm.sys.application.services.service.load(
name=self.want.name,
partition=self.want.partition
)
resource.update(**params)
def read_current_from_device(self):
result = self.client.api.tm.sys.application.services.service.load(
name=self.want.name,
partition=self.want.partition
).to_dict()
result.pop('_meta_data', None)
return Parameters(result)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.sys.application.services.service.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp service")
return True
def remove_from_device(self):
resource = self.client.api.tm.sys.application.services.service.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
template=dict(
required=False,
default=None
),
parameters=dict(
required=False,
default=None,
type='dict'
),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
),
force=dict(
required=False,
default=False,
choices=BOOLEANS,
type='bool'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| 30.410788 | 84 | 0.580502 |
acf5c653584a4406e9360df1bf0828e7ffea211d | 1,312 | py | Python | task_7.py | shubhamguptaorg/pyhton_basic_interview_task | 7c87fa76df77c00df1e4e35ad6ea52e876fb21e6 | [
"MIT"
] | null | null | null | task_7.py | shubhamguptaorg/pyhton_basic_interview_task | 7c87fa76df77c00df1e4e35ad6ea52e876fb21e6 | [
"MIT"
] | null | null | null | task_7.py | shubhamguptaorg/pyhton_basic_interview_task | 7c87fa76df77c00df1e4e35ad6ea52e876fb21e6 | [
"MIT"
] | null | null | null | # Task 7
# Write a password generator in Python. Be creative with how you generate passwords - strong passwords have a mix of lowercase letters, uppercase letters, numbers, and symbols.
# The passwords should be random, generating a new password every time the user asks for a new password. Include your run-time code in a main method.
# Extra:
# Ask the user how strong they want their password to be. For weak passwords, pick a word or two from a list.
import random
def passwordGenerator(strength = 12):
stringForPass = "ACONITEYDATAsWithEDucationtySolution0123450shuBham!@&*()#$%^?"
if strength == 'weak':
return "".join(random.sample(stringForPass[0:10], 10))
return "".join(random.sample(stringForPass, strength))
switcher = {
1: passwordGenerator(), #strong
2: passwordGenerator(15), #strongest
3: passwordGenerator("weak"), #weak
}
def main():
result = ''
try:
take_user_input = int(input("How strong password you want \n 1-Strong \n 2-Strongest \n 3-Weak \n"))
result = switcher.get(take_user_input, "Wrong Choice")
except ValueError:
print("*************Please Enter Number only*************")
take_user_input = None
main()
print(result)
if __name__ == "__main__":
main() | 38.588235 | 177 | 0.664634 |
acf5c6e6827b14e28f0f4397202f1b1e0fc5383b | 5,469 | py | Python | app.py | FairWell-dev/FairWell | 6c05b055d0e1b162402f1ad164ec8c81c8fc5fc8 | [
"MIT"
] | 1 | 2021-12-02T11:35:23.000Z | 2021-12-02T11:35:23.000Z | app.py | FairWell-dev/FairWell | 6c05b055d0e1b162402f1ad164ec8c81c8fc5fc8 | [
"MIT"
] | null | null | null | app.py | FairWell-dev/FairWell | 6c05b055d0e1b162402f1ad164ec8c81c8fc5fc8 | [
"MIT"
] | null | null | null | import json
import numpy as np
import os
import pandas as pd
import streamlit as st
import seaborn as sns
import torch
from pages import features_page, data_fairness_page, model_bias_detection_page
def convert_to_tensor(df, columns, type='Float'):
arr = np.array(df[[*columns]]).astype(int)
tensor = torch.Tensor(arr)
if type == 'Long':
return tensor.type(torch.LongTensor)
return tensor
def predict(model, inputs, threshold=0.5):
"""
:param model: Torchscript model
:param inputs: Torch tensor (or tuple of Torch tensors) to be fed to model
:param threshold: Classification threshold value, default 0.5
:return pred_proba: Numpy array, probability predictions for class 1
:return y_pred: Numpy array, predicted labels (0/1) based on threshold
"""
with torch.no_grad():
model.eval()
pred_proba = model(*inputs)
# convert from tensor to np array
pred_proba = pred_proba.detach().cpu().numpy()
y_pred = [1 if i >= threshold else 0 for i in pred_proba]
return pred_proba, y_pred
def read_csv_list(file_list, selected=None):
# Check for user uploads
if not selected:
selected = st.sidebar.selectbox("Select one dataset for selection of features.",
options=[file.name for file in file_list],
index=0)
df_dict = {}
select_key = None
for file_path in file_list:
df = pd.read_csv(file_path)
if not isinstance(file_path, str):
file_path = file_path.name
key = os.path.basename(file_path)[:-4]
df_dict[key] = df
if file_path == selected:
select_key = key
return df_dict, select_key
def run_inference(file_list, df_dict, json_files): #TODO
# Loads list of models, runs inference and returns predictions
pred_dict = {}
for file_path in file_list:
# Define key from file_path
key = os.path.basename(file_path.name)[:-3]
# Get corresponding feature_dict
json_file = [file for file in json_files
if file.name[:-5] == key][0]
feature_dict = json.load(json_file)
# Get corresponding test data
test_df = df_dict[key]
x1_ts = convert_to_tensor(test_df, feature_dict.get('x1'), type='Long')
x2_ts = convert_to_tensor(test_df, feature_dict.get('x2'))
# Load model and get predictions
model = torch.jit.load(file_path)
pred_proba, y_pred = predict(model, (x1_ts, x2_ts))
test_df[feature_dict['y'][0]+'_prediction'] = y_pred
test_df[feature_dict['y'][0]+'_probability'] = pred_proba
pred_dict[key] = test_df
return pred_dict
def sidebar_handler(label, type_list, eg_dict):
# Example Use Case
eg_labels = list(eg_dict.keys())
st.sidebar.title('Example: NYC Subway Traffic')
eg_df_dict, eg_key = read_csv_list(eg_dict.values(), eg_dict[eg_labels[0]])
eg_df_dict_rep_key = dict(zip(eg_labels, eg_df_dict.values()))
example = ''
for dataset in eg_labels:
example += '- **%s**\n' % dataset
st.sidebar.markdown(example)
# User Upload
st.sidebar.title('Upload')
file_list = st.sidebar.file_uploader('%s, (%s)' % (label, ', '.join([type.upper() for type in type_list])),
type = type_list,
accept_multiple_files = True)
# Load Files
if file_list:
csv_files = [file for file in file_list if file.type in ['text/csv', 'application/vnd.ms-excel']]
pt_files = [file for file in file_list if file.type in ['application/octet-stream']]
json_files = [file for file in file_list if file.type in ['application/json']]
df_dict, select_key = read_csv_list(csv_files)
if len(type_list) > 1:
try:
# Run Inference
pred_dict = run_inference(pt_files, df_dict, json_files)
selected = pred_dict[select_key]
return pred_dict, select_key
except:
st.warning("Please ensure you have uploaded the corresponding model, test dataset and features json files with the same name for each model")
return eg_df_dict_rep_key, eg_labels[0]
return df_dict, select_key
else:
return eg_df_dict_rep_key, eg_labels[0]
# Config
st.set_page_config(page_title='FairWell',
layout='wide',
initial_sidebar_state='expanded')
# Sidebar
st.sidebar.title('FairWell')
page = st.sidebar.radio('Navigate',
options=['Guide',
'Feature Explorer',
'Data Fairness Assessment',
'Model Bias Detection & Mitigation'],
index=0)
# Title
st.title('FairWell')
# Pages
if page.lower() == 'guide':
about = open('README.md', 'r', encoding='utf8')
about = about.read().replace('./images/','https://raw.githubusercontent.com/FairWell-dev/FairWell/main/images/')[12:]
st.markdown(about, unsafe_allow_html=True)
elif page.lower() == 'feature explorer':
features_page.render(sidebar_handler)
elif page.lower() == 'data fairness assessment':
data_fairness_page.render(sidebar_handler)
elif page.lower() == 'model bias detection & mitigation':
model_bias_detection_page.render(sidebar_handler)
else:
st.text('Page ' + page + ' is not implemented.')
| 36.704698 | 157 | 0.632474 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.