hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9af2e86d2d19ba95d7c121bcc98c92fbd274e537 | 804 | py | Python | util/typecheck.py | crisperdue/holpy | fe88eb91a8db8386184329e3f51a80d11ecdb316 | [
"BSD-3-Clause"
] | 22 | 2021-06-15T00:01:27.000Z | 2022-03-15T11:22:25.000Z | util/typecheck.py | crisperdue/holpy | fe88eb91a8db8386184329e3f51a80d11ecdb316 | [
"BSD-3-Clause"
] | null | null | null | util/typecheck.py | crisperdue/holpy | fe88eb91a8db8386184329e3f51a80d11ecdb316 | [
"BSD-3-Clause"
] | 2 | 2021-11-30T08:56:03.000Z | 2022-01-24T10:46:39.000Z | """Type checking at runtime."""
from collections.abc import Iterable
check = True
def checkinstance(*args):
"""Performs type checking.
The first argument is the name of the function. The remaining
arguments come in pairs v, T, and it is checked that v has type T.
"""
if not check:
return
fname = args[0]
def check_type(v, T):
if type(T) == list:
if not isinstance(v, Iterable):
raise TypeError('%s expects %s but got %s' % (fname, 'list', str(type(v))))
for item in v:
check_type(item, T[0])
elif not isinstance(v, T):
raise TypeError('%s expects %s but got %s' % (fname, T, str(type(v))))
for i in range(len(args) // 2):
check_type(args[2*i+1], args[2*i+2])
| 26.8 | 91 | 0.567164 |
1c0f5b1ac93f2add170ec633e481a245ce5284f2 | 1,770 | py | Python | chapter14/search_mindrecord.py | mindspore-ai/book | d25e0f3948e882054dee68403bebd052e097e976 | [
"Apache-2.0",
"CC-BY-4.0"
] | 165 | 2020-03-28T07:05:18.000Z | 2021-08-05T01:04:42.000Z | chapter14/search_mindrecord.py | mindspore-ai/book | d25e0f3948e882054dee68403bebd052e097e976 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | chapter14/search_mindrecord.py | mindspore-ai/book | d25e0f3948e882054dee68403bebd052e097e976 | [
"Apache-2.0",
"CC-BY-4.0"
] | 5 | 2020-06-05T02:52:00.000Z | 2020-10-26T06:29:00.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import uuid
from mindspore.mindrecord import MindPage, SUCCESS
from write_mindrecord import write_mindrecord_tutorial
MINDRECORD_FILE_NAME = "./imagenet.mindrecord"
def search_mindrecord_tutorial():
reader = MindPage(MINDRECORD_FILE_NAME)
fields = reader.get_category_fields()
assert fields == ['file_name', 'label'], \
'failed on getting candidate category fields.'
ret = reader.set_category_field("label")
assert ret == SUCCESS, 'failed on setting category field.'
info = reader.read_category_info()
# print("category info: {}".format(info))
row = reader.read_at_page_by_id(0, 0, 1)
assert len(row) == 1
assert len(row[0]) == 3
# print("row[0]: {}".format(row[0]))
row1 = reader.read_at_page_by_name("2", 0, 2)
assert len(row1) == 2
assert len(row1[0]) == 3
# print("row1[0]: {}".format(row1[0]))
# print("row1[1]: {}".format(row1[1]))
if __name__ == '__main__':
write_mindrecord_tutorial()
search_mindrecord_tutorial()
os.remove(MINDRECORD_FILE_NAME)
os.remove(MINDRECORD_FILE_NAME + ".db")
| 32.777778 | 78 | 0.677401 |
3d16d98404d6355a61101c1c6e9c4345315b6e60 | 469 | py | Python | larcv/core/DataFormat/test/test_pca.py | kvtsang/larcv2 | b2804b3390ea4d9f8bdf2c3ab5c82216fa532adf | [
"MIT"
] | 14 | 2017-10-19T15:08:29.000Z | 2021-03-31T21:21:07.000Z | larcv/core/DataFormat/test/test_pca.py | kvtsang/larcv2 | b2804b3390ea4d9f8bdf2c3ab5c82216fa532adf | [
"MIT"
] | 32 | 2017-10-25T22:54:06.000Z | 2019-10-01T13:57:15.000Z | larcv/core/DataFormat/test/test_pca.py | kvtsang/larcv2 | b2804b3390ea4d9f8bdf2c3ab5c82216fa532adf | [
"MIT"
] | 16 | 2017-12-07T12:04:40.000Z | 2021-11-15T00:53:31.000Z | from larcv import larcv
import numpy as np
npts = 10
example = np.repeat(np.arange(npts, dtype=np.float32)[:, None], 3, axis=1)
#voxel_set = larcv.as_tensor3d(example)
voxel_set = larcv.VoxelSet()
meta = larcv.Voxel3DMeta()
meta.set(0, 0, 0, npts, npts, npts, npts, npts, npts)
for pt in example:
voxel_set.add(larcv.Voxel(meta.id(pt[0], pt[1], pt[2]), 0.))
tensor = larcv.SparseTensor3D()
tensor.set(voxel_set, meta)
pca = tensor.pca()
print(pca.x, pca.y, pca.z)
| 29.3125 | 74 | 0.69936 |
9dc8cc2e59e31ddb6ee67f88350209f2fbe358ff | 26 | py | Python | aries_cloudagent/wallet/tests/__init__.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 247 | 2019-07-02T21:10:21.000Z | 2022-03-30T13:55:33.000Z | aries_cloudagent/wallet/tests/__init__.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 1,462 | 2019-07-02T20:57:30.000Z | 2022-03-31T23:13:35.000Z | aries_cloudagent/wallet/tests/__init__.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 377 | 2019-06-20T21:01:31.000Z | 2022-03-30T08:27:53.000Z | """
Wallet test suite
"""
| 6.5 | 17 | 0.576923 |
71fa63d4c8574d43aa4e9c54b0314b1e977adb94 | 67 | py | Python | src/tess_sip/__init__.py | icweaver/TESS-SIP | 6ead09dbc00abd0ab19b8d4043683c2383afb5c9 | [
"MIT"
] | 11 | 2020-11-30T19:14:19.000Z | 2022-02-14T14:45:14.000Z | src/tess_sip/__init__.py | icweaver/TESS-SIP | 6ead09dbc00abd0ab19b8d4043683c2383afb5c9 | [
"MIT"
] | 7 | 2020-12-17T14:41:44.000Z | 2021-08-17T19:34:58.000Z | src/tess_sip/__init__.py | icweaver/TESS-SIP | 6ead09dbc00abd0ab19b8d4043683c2383afb5c9 | [
"MIT"
] | 6 | 2021-01-14T04:26:39.000Z | 2021-07-13T22:13:00.000Z | from .tess_sip import SIP
__version__ = "1.1.0"
__all__ = ["SIP"]
| 13.4 | 25 | 0.671642 |
a6063bc403f05699efeeabd5e09a8f9835a3030c | 1,241 | py | Python | test/proj4/proj-regression-EPSG-3857-19.py | dvuckovic/magics-test | bd8baf97b0db986f6adf63700d3cf77bbcbad2f2 | [
"Apache-2.0"
] | 7 | 2019-03-19T09:32:41.000Z | 2022-02-07T13:20:33.000Z | test/proj4/proj-regression-EPSG-3857-19.py | dvuckovic/magics-test | bd8baf97b0db986f6adf63700d3cf77bbcbad2f2 | [
"Apache-2.0"
] | 2 | 2021-03-30T05:37:20.000Z | 2021-08-17T13:58:04.000Z | test/proj4/proj-regression-EPSG-3857-19.py | dvuckovic/magics-test | bd8baf97b0db986f6adf63700d3cf77bbcbad2f2 | [
"Apache-2.0"
] | 5 | 2019-03-19T10:43:46.000Z | 2021-09-09T14:28:39.000Z | from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
#Setting output
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
#Setting the geographical area
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("EPSG:3857", -53.47376639988446, -47.1895176004755, 44.83594753345853, 80.56952110902338 ) | 28.860465 | 101 | 0.630137 |
1ff41658de24405ccc9dcd593864696139f2b9c5 | 4,556 | py | Python | mysite/gui/nodes.py | GatlinCruz/Capstone | 447a4fea6b9dff6b89f2e4bd82ea1b7dd5175666 | [
"MIT"
] | 1 | 2021-05-13T22:17:41.000Z | 2021-05-13T22:17:41.000Z | mysite/gui/nodes.py | GatlinCruz/Capstone | 447a4fea6b9dff6b89f2e4bd82ea1b7dd5175666 | [
"MIT"
] | null | null | null | mysite/gui/nodes.py | GatlinCruz/Capstone | 447a4fea6b9dff6b89f2e4bd82ea1b7dd5175666 | [
"MIT"
] | 1 | 2021-05-14T04:33:24.000Z | 2021-05-14T04:33:24.000Z | """
This file contains classes for nodes in the network
__author__ Cade Tipton
__author__ Gatlin Cruz
__version__ 4/27/21
"""
class Host:
def __init__(self, name, ip):
"""
Creates a new Host object
:param name: The name of the host
:param ip: The ip of the host
:return: None
"""
self.name = name
self.ip = ip
def __str__(self):
"""
Used to display host in a formatted way
:return: a formatted string
"""
return self.name + ": ip - " + self.ip
def __repr__(self):
"""
Used to display host in a formatted way
:return: a formatted string
"""
return self.name + ": ip - " + self.ip
def add_to_file(self):
"""
Automates the text to add a host to a file
:return: the text to add a host to a file
"""
return self.name + " = net.addHost( '" + self.name + "' )\n"
def add_ip_to_file(self):
"""
Automates the text to add host ip to a file
:return: the text to add a host ip to a file
"""
return self.name + ".setIP( '" + self.ip + "' )\n"
def get_ip(self):
"""
Getter for the ip address
:return: ip The ip of the host
"""
return self.ip
class Switch:
def __init__(self, name):
"""
Creates a new Switch object
:param name: The name of the switch
:return: None
"""
self.name = name
def __str__(self):
"""
Used to display switch in a formatted way
:return: a formatted string
"""
return str(self.name)
def __repr__(self):
"""
Used to display switch in a formatted way
:return: a formatted string
"""
return str(self.name)
def add_to_file(self):
"""
Automates the text to add a switch to a file
:return: the text to add a switch to a file
"""
return self.name + " = net.addSwitch( '" + self.name + "' )\n"
class Controller:
def __init__(self, name):
"""
Creates a Controller object
:param name: The name of the controller
:return: None
"""
self.name = name
def __str__(self):
"""
Used to display controller in a formatted way
:return: a formatted string
"""
return str(self.name)
def __repr__(self):
"""
Used to display controller in a formatted way
:return: a formatted string
"""
return str(self.name)
def add_to_file(self):
"""
Automates the text to add a controller to a file
:return: the text to add a controller to a file
"""
return self.name + " = net.addController( '" + self.name + "' )\n"
class Link:
def __init__(self, first, second):
"""
Creates a Link object
:param first: The first item in the link
:param second: The second item in the link
:return: None
"""
self.first = first
self.second = second
def __str__(self):
"""
Used to display link in a formatted way
:return: the formatted string
"""
return self.first + " <-> " + self.second
def __repr__(self):
"""
Used to display link in a formatted way
:return: the formatted string
"""
return str(self.first) + " <-> " + str(self.second)
def add_to_file(self):
"""
Automates the text to add a link to a file
:return: the text to add a link to a file
"""
return self.first + self.second + " = net.addLink( '" + self.first + "', " + "'" + self.second + "' )\n"
def to_tuple(self):
"""
Converts the first and second item into a tuple
:return: a tuple containing the first and second item
"""
return tuple((self.first, self.second))
graph = {
"hosts": [],
"switches": [],
"controllers": [],
"links": []
}
if __name__ == '__main__':
h1 = Host("h1", "127.0.0.1")
print(h1)
s1 = Switch("s1")
print(s1)
c1 = Controller("c1")
print(c1)
l1 = Link(h1, s1)
print("l1 first name: " + l1.first.name)
graph['hosts'].append(h1)
graph['switches'].append(s1)
graph['controllers'].append(c1)
graph['links'].append(l1)
print(graph) # uses __repr__ for printing
print(graph.get('hosts')[0]) # uses __str__ for printing
| 24.494624 | 112 | 0.536655 |
c86033c4a60b8854b7933b793acf0cd88e9a40b4 | 5,881 | py | Python | source/deepsecurity/models/port_list.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:09.000Z | 2021-10-30T16:40:09.000Z | source/deepsecurity/models/port_list.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-07-28T20:19:03.000Z | 2021-07-28T20:19:03.000Z | source/deepsecurity/models/port_list.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:02.000Z | 2021-10-30T16:40:02.000Z | # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PortList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str',
'items': 'list[str]',
'id': 'int'
}
attribute_map = {
'name': 'name',
'description': 'description',
'items': 'items',
'id': 'ID'
}
def __init__(self, name=None, description=None, items=None, id=None): # noqa: E501
"""PortList - a model defined in Swagger""" # noqa: E501
self._name = None
self._description = None
self._items = None
self._id = None
self.discriminator = None
if name is not None:
self.name = name
if description is not None:
self.description = description
if items is not None:
self.items = items
if id is not None:
self.id = id
@property
def name(self):
"""Gets the name of this PortList. # noqa: E501
Name of the port list. Searchable as String. # noqa: E501
:return: The name of this PortList. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PortList.
Name of the port list. Searchable as String. # noqa: E501
:param name: The name of this PortList. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this PortList. # noqa: E501
Description of the port list. Searchable as String. # noqa: E501
:return: The description of this PortList. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this PortList.
Description of the port list. Searchable as String. # noqa: E501
:param description: The description of this PortList. # noqa: E501
:type: str
"""
self._description = description
@property
def items(self):
"""Gets the items of this PortList. # noqa: E501
List of comma-delimited port numbers. Can contain single ports or port ranges (for example: \"20-21\"). # noqa: E501
:return: The items of this PortList. # noqa: E501
:rtype: list[str]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this PortList.
List of comma-delimited port numbers. Can contain single ports or port ranges (for example: \"20-21\"). # noqa: E501
:param items: The items of this PortList. # noqa: E501
:type: list[str]
"""
self._items = items
@property
def id(self):
"""Gets the id of this PortList. # noqa: E501
ID of the port list. Searchable as ID. # noqa: E501
:return: The id of this PortList. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PortList.
ID of the port list. Searchable as ID. # noqa: E501
:param id: The id of this PortList. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PortList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.970443 | 311 | 0.543445 |
ef7985a0eebda08b6bbbcef54fd5aa8b84715adc | 22,308 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_bastion_hosts_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_bastion_hosts_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_bastion_hosts_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BastionHostsOperations:
"""BastionHostsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified Bastion Host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def get(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs
) -> "models.BastionHost":
"""Gets the specified Bastion Host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BastionHost, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.BastionHost
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BastionHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BastionHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
bastion_host_name: str,
parameters: "models.BastionHost",
**kwargs
) -> "models.BastionHost":
cls = kwargs.pop('cls', None) # type: ClsType["models.BastionHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BastionHost')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BastionHost', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BastionHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
bastion_host_name: str,
parameters: "models.BastionHost",
**kwargs
) -> AsyncLROPoller["models.BastionHost"]:
"""Creates or updates the specified Bastion Host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:param parameters: Parameters supplied to the create or update Bastion Host operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.BastionHost
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BastionHost or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.BastionHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.BastionHost"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BastionHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["models.BastionHostListResult"]:
"""Lists all Bastion Hosts in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BastionHostListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.BastionHostListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BastionHostListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionHostListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/bastionHosts'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.BastionHostListResult"]:
"""Lists all Bastion Hosts in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BastionHostListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.BastionHostListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BastionHostListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionHostListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts'} # type: ignore
| 48.181425 | 193 | 0.663125 |
98e6adcef40899caf6c02252b50b9a6a96cd12ec | 550 | py | Python | Jumpscale/tools/perftesttools/NodeHost.py | threefoldtech/jumpscale_core9 | f366bea61bf2e6112c4afb5a4979ec00b843e58a | [
"Apache-2.0"
] | null | null | null | Jumpscale/tools/perftesttools/NodeHost.py | threefoldtech/jumpscale_core9 | f366bea61bf2e6112c4afb5a4979ec00b843e58a | [
"Apache-2.0"
] | 162 | 2018-07-31T14:40:33.000Z | 2019-04-03T06:31:20.000Z | Jumpscale/tools/perftesttools/NodeHost.py | threefoldtech/jumpscale_core9 | f366bea61bf2e6112c4afb5a4979ec00b843e58a | [
"Apache-2.0"
] | 2 | 2018-07-31T12:42:02.000Z | 2018-11-21T09:40:22.000Z | from jumpscale import j
# import sys
# import time
#
# import os
# import psutil
from .NodeBase import NodeBase
class NodeHost(NodeBase):
def __init__(self, ipaddr, sshport=22, name=""):
"""
is host running the hypervisor
"""
NodeBase.__init__(self, ipaddr=ipaddr, sshport=sshport, role="host", name=name)
self.startMonitor()
def authorizeKey(self, keypath="/home/despiegk/.ssh/perftest.pub"):
from IPython import embed
self.logger.debug("DEBUG NOW authorizeKey")
embed()
| 22 | 87 | 0.650909 |
1639c4134685f000997e372d849b0fd6487b06cd | 2,360 | py | Python | dataproc/list_clusters.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 3 | 2021-01-24T23:42:57.000Z | 2021-02-17T12:02:12.000Z | dataproc/list_clusters.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 320 | 2020-11-08T21:02:43.000Z | 2022-02-10T10:43:29.000Z | dataproc/list_clusters.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 2 | 2019-11-04T18:25:20.000Z | 2019-11-05T14:35:28.000Z | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample command-line program to list Cloud Dataproc clusters in a region.
Example usage:
python list_clusters.py --project_id=my-project-id --region=global
"""
import argparse
from google.cloud import dataproc_v1
from google.cloud.dataproc_v1.gapic.transports import (
cluster_controller_grpc_transport)
# [START dataproc_list_clusters]
def list_clusters(dataproc, project, region):
"""List the details of clusters in the region."""
for cluster in dataproc.list_clusters(project, region):
print(('{} - {}'.format(cluster.cluster_name,
cluster.status.State.Name(
cluster.status.state))))
# [END dataproc_list_clusters]
def main(project_id, region):
if region == 'global':
# Use the default gRPC global endpoints.
dataproc_cluster_client = dataproc_v1.ClusterControllerClient()
else:
# Use a regional gRPC endpoint. See:
# https://cloud.google.com/dataproc/docs/concepts/regional-endpoints
client_transport = (
cluster_controller_grpc_transport.ClusterControllerGrpcTransport(
address='{}-dataproc.googleapis.com:443'.format(region)))
dataproc_cluster_client = dataproc_v1.ClusterControllerClient(
client_transport)
list_clusters(dataproc_cluster_client, project_id, region)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=(
argparse.RawDescriptionHelpFormatter))
parser.add_argument(
'--project_id', help='Project ID to access.', required=True)
parser.add_argument(
'--region', help='Region of clusters to list.', required=True)
args = parser.parse_args()
main(args.project_id, args.region)
| 36.875 | 77 | 0.70678 |
02ca9db72bb196c11e874507afd2599e944904ff | 265 | py | Python | tests/artificial/transf_Difference/trend_Lag1Trend/cycle_7/ar_/test_artificial_1024_Difference_Lag1Trend_7__0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Difference/trend_Lag1Trend/cycle_7/ar_/test_artificial_1024_Difference_Lag1Trend_7__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Difference/trend_Lag1Trend/cycle_7/ar_/test_artificial_1024_Difference_Lag1Trend_7__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 0); | 37.857143 | 165 | 0.732075 |
69f891add9cc5faa9bcc3b0fb3fb67de87cd7426 | 34,817 | py | Python | pysnmp-with-texts/MICOM-RSI-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/MICOM-RSI-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/MICOM-RSI-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module MICOM-RSI-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MICOM-RSI-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:12:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
micom_oscar, = mibBuilder.importSymbols("MICOM-OSCAR-MIB", "micom-oscar")
mcmSysAsciiTimeOfDay, = mibBuilder.importSymbols("MICOM-SYS-MIB", "mcmSysAsciiTimeOfDay")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, MibIdentifier, Unsigned32, NotificationType, Integer32, Counter64, ObjectIdentity, NotificationType, iso, TimeTicks, Bits, Counter32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "MibIdentifier", "Unsigned32", "NotificationType", "Integer32", "Counter64", "ObjectIdentity", "NotificationType", "iso", "TimeTicks", "Bits", "Counter32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
micom_rsi = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23)).setLabel("micom-rsi")
rsi_configuration = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1)).setLabel("rsi-configuration")
rsi_control = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 2)).setLabel("rsi-control")
rsi_statistics = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3)).setLabel("rsi-statistics")
mcmRSISysCfgDefGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 1))
mcmRSISysCfgDefVNCSInstance = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSISysCfgDefVNCSInstance.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSISysCfgDefVNCSInstance.setDescription('NAME = ; DESC = The value of this object provides a unique \\ VNCS identifier. (Operational); HELP = This allows multiple VNCS databases to \\ exist in the Passport.; CAPABILITIES = NET_CFG, VPN_DISP ;')
mcmRSISysCfgDefNumCacheEntries = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 100)).clone(30)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSISysCfgDefNumCacheEntries.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSISysCfgDefNumCacheEntries.setDescription("NAME = ; DESC = The value of this object specifies the \\ maximum number of Address Resolutions \\ to be cached in memory. (Operational); HELP = If this value is very high, the free \\ memory space on the unit will be reduced \\ which may affect the unit's performance. \\ A very low value may cause the unit to \\ frequently consult the server if diverse \\ numbers are dialed, defeating the purpose \\ of having a local cache. The optimum value \\ will depend on the network size and the \\ number of frequently dialed DNs. It is \\ recommended to start with the default value \\ (30) and tune it to the optimum value \\ by observing the performance.; CAPABILITIES = NET_CFG, VPN_DISP ;")
mcmRSISysCfgSetGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 2))
mcmRSISysCfgSetAddrResRetries = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5)).clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmRSISysCfgSetAddrResRetries.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSISysCfgSetAddrResRetries.setDescription('NAME = ; DESC = The value of this object specifies the \\ number of times an address resolution \\ request is sent to the RSA to get a DNA \\ address. (Operational); HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSISysCfgSetAddrResTimeout = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmRSISysCfgSetAddrResTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSISysCfgSetAddrResTimeout.setDescription('NAME = ; DESC = The value of this object specifies the \\ timeout period (in tenths of a second), after \\ which, an address resolution request will be \\ resent to the RSA (in case the previous one \\ is not answered). (Operational); HELP = Setting a very low value in a \\ heavily loaded network would result in \\ transmissions of multiple requests \\ further increasing the network load. \\ It is recommended to tune this value \\ by considering the network load and \\ performance.; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSISysCfgSetAddrCacheStatus = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2), ("flush", 3))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmRSISysCfgSetAddrCacheStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSISysCfgSetAddrCacheStatus.setDescription('NAME = ; DESC = The value of this object specifies the \\ ability to enable or disable caching of \\ previously resolved addresses. Also, the \\ capability to remove previously resolved \\ addresses from the cache is provided. \\ (Configuration); HELP = It is recommended that caching be enabled \\ as it helps relieve the load on the server, \\ reduces network traffic, speeds up address \\ resolution, and hence call-setup. \\ Flushing of cache is recommended if the \\ network configuration has changed resulting \\ in changes in DNAs and/or DNs, rendering \\ the previously cached resolutions incorrect.; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSICacheCfgTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 3), )
if mibBuilder.loadTexts: mcmRSICacheCfgTable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSICacheCfgTable.setDescription('NAME = ; DESC = The RSI Cache Configuration Table. \\ (Operational) ; HELP = ; CAPABILITIES = NET_DISP, VPN_DISP ;')
mcmRSICacheCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 3, 1), ).setIndexNames((0, "MICOM-RSI-MIB", "mcmRSICacheCfgDNDigits"))
if mibBuilder.loadTexts: mcmRSICacheCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSICacheCfgEntry.setDescription('NAME = ; DESC = An entry in the RSI Cache Configuration \\ Table. (Operational) ; HELP = ; CAPABILITIES = ;')
mcmRSICacheCfgDNDigits = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSICacheCfgDNDigits.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSICacheCfgDNDigits.setDescription('NAME = ; DESC = The RSI cache DN digits specifies the dialing \\ number sequence which is resolved to a DNA. \\ Used as the index to the RSI Cache Configuration \\ table. (Operational); HELP = ; CAPABILITIES = ;')
mcmRSICacheCfgDNAAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSICacheCfgDNAAddr.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSICacheCfgDNAAddr.setDescription('NAME = ; DESC = The value of this object designates the DNA \\ address of the destination unit for the specified \\ DN digit string. (Operational); HELP = Note: DNA is used by Nortel to identify \\ services in a given Magellan Passport Network.; CAPABILITIES = ;')
mcmRSICacheCfgProfileNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSICacheCfgProfileNumber.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSICacheCfgProfileNumber.setDescription('NAME = ; DESC = The value of this object designates the number \\ of the Profile associated with this entry. \\ (Operational) ; HELP = The Passport uses a Profile to assign call \\ parameters. This is the number that identifies \\ this profile.; CAPABILITIES = ;')
mcmRSICacheCfgNumberOfHits = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSICacheCfgNumberOfHits.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSICacheCfgNumberOfHits.setDescription('NAME = ; DESC = The value of this object indicates the \\ number of times this entry was used \\ by this unit for resolving dialed digits. \\ (Operational); HELP = This value is useful in identifying \\ frequently dialed numbers; CAPABILITIES = ;')
mcmRSICacheCfgServerDNA = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 3, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSICacheCfgServerDNA.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSICacheCfgServerDNA.setDescription("NAME = ; DESC = The value of this object indicates the DNA \\ address of the server which resolved this entry. \\ Value of this object is valid only when \\ EntryType is 'learned'. (Operational) ; HELP = The DNA is unique to a given server. This \\ object helps in tracking the address resolution \\ by a particular server. A DNA is used by \\ Nortel to identify services in a given Magellan \\ Passport Network.; CAPABILITIES = ;")
mcmRSIServerTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4), )
if mibBuilder.loadTexts: mcmRSIServerTable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerTable.setDescription('NAME = ; DESC = The RSI Server Configuration Table. \\ (Operational) ; HELP = This table provides the list of servers this \\ unit can use to request address resolutions from.; CAPABILITIES = NET_CFG, VPN_DISP ;')
mcmRSIServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1), ).setIndexNames((0, "MICOM-RSI-MIB", "mcmRSIServerDNAAddr"))
if mibBuilder.loadTexts: mcmRSIServerEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerEntry.setDescription('NAME = ; DESC = An entry in the RSI Server Configuration \\ Table. (Operational) ; HELP = ; CAPABILITIES = ;')
mcmRSIServerDNAAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerDNAAddr.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerDNAAddr.setDescription('NAME = ; DESC = The RSI Server DNA address specifies the \\ DNA digits corresponding to a unique RSA. \\ Used as the index to the RSI Server \\ Configuration table. (Operational); HELP = ; CAPABILITIES = ;')
mcmRSIServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 50))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerName.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerName.setDescription('NAME = ; DESC = The value of this object designates the user \\ supplied name for the server. (Operational); HELP = ; CAPABILITIES = ;')
mcmRSIServerType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerType.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerType.setDescription("NAME = ; DESC = The value of this object indicates the preferred \\ server which that will be consulted for address \\ resolution. (Operational); HELP = A 'primary' server will be consulted first for \\ address resolution. A 'secondary' server is used \\ as an alternative if the primary is not responding. \\ Note that all servers have to be marked 'secondary' \\ if round-robin load sharing is desired between \\ servers.; CAPABILITIES = ;")
mcmRSIServerPortID = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerPortID.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerPortID.setDescription('NAME = ; DESC = The value of this object indicates the \\ current WAN port being used for the DLCI \\ call between RSI and RSA.; HELP = When a connection to a remote server is \\ established, either the primary or the \\ backup port will be in use. This field \\ corresponds to the Wide Area Network (WAN) \\ port numbering convention for primary and \\ backup links.; CAPABILITIES = NET_OPER, VPN_OPER ;')
mcmRSIServerDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerDLCI.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerDLCI.setDescription('NAME = ; DESC = The value of this object indicates the DLCI of \\ the RSI-RSA connection. (Operational); HELP = When a connection to a remote server is \\ established, a DLCI is assigned for transferring \\ data across a virtual channel. This DLCI is \\ useful for debugging. When no connection is \\ present, the DLCI will be reported as 0.; CAPABILITIES = ;')
mcmRSIServerAvailStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("available", 1), ("notAvailable", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerAvailStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerAvailStatus.setDescription("NAME = ; DESC = The value of this object indicates the \\ availability of the server for address \\ resolution. (Operational); HELP = A server is said to be 'available' if this \\ unit has been successfully communicating \\ with it. A server is said to be 'notAvailable' \\ if this unit has temporarily lost communication \\ with it.; CAPABILITIES = ;")
mcmRSIServerLastDisconnectCause = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 7), DisplayString().subtype(subtypeSpec=ValueRangeConstraint(1, 50))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerLastDisconnectCause.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerLastDisconnectCause.setDescription('NAME = ; DESC = The string displayed by this object \\ indicates the reason for the last \\ RSI-RSA call disconnect for this server.; HELP = When a connection to a remote server is \\ disconnected, a cause code may be generated \\ to aid in determining the reason for the call \\ disconnect. This shows the last reported reason.; CAPABILITIES = ;')
mcmRSIServerRequestCount = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerRequestCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerRequestCount.setDescription('NAME = ; DESC = The value of this object indicates the \\ number of DN-DNA requests to this \\ server since start up. \\ (Operational); HELP = ; CAPABILITIES = ;')
mcmRSIServerResolvedCount = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerResolvedCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerResolvedCount.setDescription("NAME = ; DESC = The value of this object indicates the \\ number of DN-DNA's resolved by this \\ server since start up. \\ (Operational); HELP = ; CAPABILITIES = ;")
mcmRSIServerNoNumberCount = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerNoNumberCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerNoNumberCount.setDescription('NAME = ; DESC = The value of this object indicates the \\ number of DN-DNA requests with phone \\ numbers not found in server since start up. \\ (Operational); HELP = ; CAPABILITIES = ;')
mcmRSIServerTimeoutCount = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerTimeoutCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerTimeoutCount.setDescription('NAME = ; DESC = The value of this object indicates the \\ number of entries resent due to timeouts \\ since start up. \\ (Operational); HELP = ; CAPABILITIES = ;')
mcmRSIServerRecoveryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 4, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIServerRecoveryCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIServerRecoveryCount.setDescription('NAME = ; DESC = The value of this object indicates the \\ number of times RSI has attempted to \\ reestablish a connection to this server. \\ (Operational); HELP = ; CAPABILITIES = ;')
nvmRSISysCfgDefGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 5))
nvmRSISysCfgDefVNCSInstance = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmRSISysCfgDefVNCSInstance.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSISysCfgDefVNCSInstance.setDescription('NAME = ; DESC = The value of this object provides a unique \\ VNCS identifier. (Configuration); HELP = This allows multiple VNCS databases to \\ exist in the Passport.; CAPABILITIES = NET_CFG, VPN_DISP ;')
nvmRSISysCfgDefNumCacheEntries = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 5, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 100)).clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmRSISysCfgDefNumCacheEntries.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSISysCfgDefNumCacheEntries.setDescription("NAME = ; DESC = The value of this object specifies the \\ maximum number of Address Resolutions \\ to be cached in memory. (Configuration); HELP = If this value is very high, the free \\ memory space on the unit will be reduced \\ which may affect the unit's performance. \\ A very low value may cause the unit to \\ frequently consult the server if diverse \\ numbers are dialed, defeating the purpose \\ of having a local cache. The optimum value \\ will depend on the network size and the \\ number of frequently dialed DNs. It is \\ recommended to start with the default value \\ (30) and tune it to the optimum value \\ by observing the performance.; CAPABILITIES = NET_CFG, VPN_DISP ;")
nvmRSISysCfgSetGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 6))
nvmRSISysCfgSetAddrResRetries = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 6, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5)).clone(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmRSISysCfgSetAddrResRetries.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSISysCfgSetAddrResRetries.setDescription('NAME = ; DESC = The value of this object specifies the \\ number of times an address resolution \\ request is sent to the RSA to get a DNA \\ address. (Configuration); HELP = ; CAPABILITIES = NET_CFG, VPN_DISP ;')
nvmRSISysCfgSetAddrResTimeout = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 6, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30)).clone(15)).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmRSISysCfgSetAddrResTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSISysCfgSetAddrResTimeout.setDescription('NAME = ; DESC = The value of this object specifies the \\ timeout period (in tenths of a second), after \\ which, an address resolution request will be \\ resent to the RSA (in case the previous one \\ is not answered). (Configuration); HELP = Setting a very low value in a \\ heavily loaded network would result in \\ transmissions of multiple requests \\ further increasing the network load. \\ It is recommended to tune this value \\ by considering the network load and \\ performance.; CAPABILITIES = NET_CFG, VPN_DISP ;')
nvmRSISysCfgSetAddrCacheStatus = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 6, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('enabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmRSISysCfgSetAddrCacheStatus.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSISysCfgSetAddrCacheStatus.setDescription('NAME = ; DESC = The value of this object specifies the \\ ability to enable or disable caching of \\ previously resolved addresses. \\ (Configuration); HELP = It is recommended that caching be enabled \\ as it helps relieve the load on the server, \\ reduces network traffic, speeds up address \\ resolution, and hence call-setup.; CAPABILITIES = NET_CFG, VPN_DISP ;')
nvmRSIServerTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 7), )
if mibBuilder.loadTexts: nvmRSIServerTable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSIServerTable.setDescription('NAME = ; DESC = The RSI Server Configuration Table. \\ (Configuration) ; HELP = This table provides the list of servers this \\ unit can use to request address resolutions from.; CAPABILITIES = NET_CFG, VPN_DISP ;')
nvmRSIServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 7, 1), ).setIndexNames((0, "MICOM-RSI-MIB", "nvmRSIServerDNAAddr"))
if mibBuilder.loadTexts: nvmRSIServerEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSIServerEntry.setDescription('NAME = ; DESC = An entry in the RSI Server Configuration \\ Table. (Configuration) ; HELP = ; CAPABILITIES = ;')
nvmRSIServerDNAAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 7, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmRSIServerDNAAddr.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSIServerDNAAddr.setDescription('NAME = ; DESC = The RSI Server DNA address specifies the \\ DNA digits corresponding to a unique RSA. \\ Used as the index to the RSI Server \\ Configuration table. (Configuration); HELP = ; CAPABILITIES = ;')
nvmRSIServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 7, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 50))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmRSIServerName.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSIServerName.setDescription('NAME = ; DESC = The value of this object designates the user \\ supplied name for the server. (Configuration); HELP = ; CAPABILITIES = ;')
nvmRSIServerType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmRSIServerType.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSIServerType.setDescription("NAME = ; DESC = The value of this object indicates the preferred \\ server which that will be consulted for address \\ resolution. (Configuration); HELP = A 'primary' server will be consulted first for \\ address resolution. A 'secondary' server is used \\ as an alternative if the primary is not responding. \\ Note that all servers have to be marked 'secondary' \\ if round-robin load sharing is desired between \\ servers.; CAPABILITIES = ;")
nvmRSIServerEntryRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 1, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("add", 1), ("delete", 2), ("active", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmRSIServerEntryRowStatus.setStatus('mandatory')
if mibBuilder.loadTexts: nvmRSIServerEntryRowStatus.setDescription("NAME = ; DESC = For software releases prior to 3.0 \\ this object only supports add and delete \\ with an access of write-only. This object is used to insert or remove \\ an RSA entry in this RSI server table. \\ Upon creation of a row, the \\ the row status is internally set to 'active'. \\ (Configuration); HELP = ; CAPABILITIES = ;")
mcmRSIStatisticsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1))
mcmRSIStatisticsCacheCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsCacheCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsCacheCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of entries currently in the RSI cache. \\ Includes learned entries. (Not \\ cleared by mcmRSICounterResetCmd.); HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsRequestAllCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsRequestAllCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsRequestAllCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of DN-DNA requests received from \\ the voice application.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsLocalResolvedCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsLocalResolvedCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsLocalResolvedCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of DN-DNA requests resolved out \\ of the local RSI cache.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsPurgeCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsPurgeCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsPurgeCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of purge requests received from the \\ voice application.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsServerCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsServerCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsServerCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of servers configured to resolve \\ addresses. (Not cleared by \\ mcmRSICounterResetCmd.); HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsServerRequestCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsServerRequestCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsServerRequestCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of DN-DNA requests made to all \\ servers.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsServerResolvedCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsServerResolvedCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsServerResolvedCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of DN-DNA requests resolved by \\ all servers.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsServerNoNumberCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsServerNoNumberCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsServerNoNumberCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of DN-DNA requests with phone \\ numbers not found in servers.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsTimeoutCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsTimeoutCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsTimeoutCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of DN-DNA requests timed out.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSIStatisticsRecoveryCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmRSIStatisticsRecoveryCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmRSIStatisticsRecoveryCount.setDescription('NAME = ; DESC = The value of this object indicates the total \\ number of attempts to connect to all servers.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG ;')
mcmRSICounterResetCmd = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 23, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("reset", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: mcmRSICounterResetCmd.setStatus('obsolete')
if mibBuilder.loadTexts: mcmRSICounterResetCmd.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ Setting this object to reset(1) will zero \\ the values of the counters in the RSI \\ Statistics table.; HELP = ; CAPABILITIES = NET_OPER, VPN_OPER;')
mcmAlarmRsiFailedToLocateRSA = NotificationType((1, 3, 6, 1, 4, 1, 335, 1, 4, 23) + (0,1)).setObjects(("MICOM-SYS-MIB", "mcmSysAsciiTimeOfDay"))
if mibBuilder.loadTexts: mcmAlarmRsiFailedToLocateRSA.setDescription('NAME = ServerUnreachable; DESC = No RSA was configured\\ Severity Level: CRITICAL.; HELP = No RSA server was configured on the access device. \\ At least one RSA server must be configured in the \\ access device using the add rsi server command.;')
mcmAlarmRsiRSAIsDown = NotificationType((1, 3, 6, 1, 4, 1, 335, 1, 4, 23) + (0,2)).setObjects(("MICOM-SYS-MIB", "mcmSysAsciiTimeOfDay"), ("MICOM-RSI-MIB", "mcmRSIServerDNAAddr"))
if mibBuilder.loadTexts: mcmAlarmRsiRSAIsDown.setDescription('NAME = ServerCommunicationLost; DESC = This unit is now unable to communicate with a \\ previously available server. \\ This unit will periodically attempt to re-establish \\ communication with the server.\\ Severity Level: CRITICAL.; HELP = Communication is lost with a previously available server. \\ This could indicate a problem with the Frame Relay \\ network, or the server in the Passport may be temporarily \\ out-of-service. This unit will periodically attempt to \\ re-establish communication with the server, or the \\ alternate server, if configured.;')
mcmAlarmRsiRSAIsUp = NotificationType((1, 3, 6, 1, 4, 1, 335, 1, 4, 23) + (0,3)).setObjects(("MICOM-SYS-MIB", "mcmSysAsciiTimeOfDay"), ("MICOM-RSI-MIB", "mcmRSIServerDNAAddr"))
if mibBuilder.loadTexts: mcmAlarmRsiRSAIsUp.setDescription('NAME = ServerCommunicationRestored; DESC = This unit is now able to communicate with a \\ server with which it had previously lost communication. Severity Level: INFORMATIVE.; HELP = Communication is restored with a previously \\ non-responsive server. \\ It will henceforth be used for address resolution.;')
mibBuilder.exportSymbols("MICOM-RSI-MIB", mcmRSIServerDLCI=mcmRSIServerDLCI, rsi_configuration=rsi_configuration, mcmRSIStatisticsCacheCount=mcmRSIStatisticsCacheCount, nvmRSISysCfgDefVNCSInstance=nvmRSISysCfgDefVNCSInstance, mcmRSIStatisticsRequestAllCount=mcmRSIStatisticsRequestAllCount, mcmRSISysCfgSetAddrResRetries=mcmRSISysCfgSetAddrResRetries, mcmRSIServerName=mcmRSIServerName, mcmAlarmRsiRSAIsUp=mcmAlarmRsiRSAIsUp, mcmRSIStatisticsLocalResolvedCount=mcmRSIStatisticsLocalResolvedCount, nvmRSISysCfgDefGroup=nvmRSISysCfgDefGroup, mcmRSIServerTable=mcmRSIServerTable, mcmRSICacheCfgProfileNumber=mcmRSICacheCfgProfileNumber, mcmRSIServerRequestCount=mcmRSIServerRequestCount, nvmRSISysCfgDefNumCacheEntries=nvmRSISysCfgDefNumCacheEntries, mcmRSIServerPortID=mcmRSIServerPortID, mcmRSIServerResolvedCount=mcmRSIServerResolvedCount, nvmRSIServerEntry=nvmRSIServerEntry, mcmRSIServerLastDisconnectCause=mcmRSIServerLastDisconnectCause, mcmRSIStatisticsPurgeCount=mcmRSIStatisticsPurgeCount, mcmRSIStatisticsServerCount=mcmRSIStatisticsServerCount, mcmRSISysCfgSetGroup=mcmRSISysCfgSetGroup, mcmRSICacheCfgDNDigits=mcmRSICacheCfgDNDigits, mcmRSISysCfgSetAddrResTimeout=mcmRSISysCfgSetAddrResTimeout, mcmRSIStatisticsRecoveryCount=mcmRSIStatisticsRecoveryCount, mcmRSICounterResetCmd=mcmRSICounterResetCmd, nvmRSISysCfgSetAddrCacheStatus=nvmRSISysCfgSetAddrCacheStatus, mcmRSISysCfgSetAddrCacheStatus=mcmRSISysCfgSetAddrCacheStatus, nvmRSIServerDNAAddr=nvmRSIServerDNAAddr, mcmRSICacheCfgServerDNA=mcmRSICacheCfgServerDNA, mcmRSIStatisticsServerResolvedCount=mcmRSIStatisticsServerResolvedCount, nvmRSIServerType=nvmRSIServerType, mcmRSISysCfgDefGroup=mcmRSISysCfgDefGroup, mcmRSIServerType=mcmRSIServerType, nvmRSISysCfgSetAddrResTimeout=nvmRSISysCfgSetAddrResTimeout, mcmRSIStatisticsServerRequestCount=mcmRSIStatisticsServerRequestCount, nvmRSIServerName=nvmRSIServerName, mcmRSIServerEntry=mcmRSIServerEntry, micom_rsi=micom_rsi, mcmRSICacheCfgEntry=mcmRSICacheCfgEntry, mcmRSIServerRecoveryCount=mcmRSIServerRecoveryCount, mcmRSIServerDNAAddr=mcmRSIServerDNAAddr, mcmRSISysCfgDefVNCSInstance=mcmRSISysCfgDefVNCSInstance, mcmRSIStatisticsServerNoNumberCount=mcmRSIStatisticsServerNoNumberCount, mcmRSISysCfgDefNumCacheEntries=mcmRSISysCfgDefNumCacheEntries, rsi_control=rsi_control, nvmRSISysCfgSetGroup=nvmRSISysCfgSetGroup, nvmRSISysCfgSetAddrResRetries=nvmRSISysCfgSetAddrResRetries, mcmRSIServerNoNumberCount=mcmRSIServerNoNumberCount, mcmAlarmRsiFailedToLocateRSA=mcmAlarmRsiFailedToLocateRSA, nvmRSIServerTable=nvmRSIServerTable, mcmAlarmRsiRSAIsDown=mcmAlarmRsiRSAIsDown, mcmRSICacheCfgDNAAddr=mcmRSICacheCfgDNAAddr, mcmRSICacheCfgNumberOfHits=mcmRSICacheCfgNumberOfHits, mcmRSIServerTimeoutCount=mcmRSIServerTimeoutCount, mcmRSIStatisticsTimeoutCount=mcmRSIStatisticsTimeoutCount, mcmRSIStatisticsGroup=mcmRSIStatisticsGroup, mcmRSICacheCfgTable=mcmRSICacheCfgTable, nvmRSIServerEntryRowStatus=nvmRSIServerEntryRowStatus, rsi_statistics=rsi_statistics, mcmRSIServerAvailStatus=mcmRSIServerAvailStatus)
| 197.823864 | 3,017 | 0.770601 |
2be27a2842d6576c8935cd4cb6a57326f42b9702 | 21,227 | py | Python | physionet-django/project/modelcomponents/activeproject.py | rguo123/physionet-build | 09a7467052df11711fa23a9180dfeb5dcfe6a836 | [
"BSD-3-Clause"
] | 36 | 2019-02-14T18:10:39.000Z | 2022-01-21T12:48:52.000Z | physionet-django/project/modelcomponents/activeproject.py | rguo123/physionet-build | 09a7467052df11711fa23a9180dfeb5dcfe6a836 | [
"BSD-3-Clause"
] | 1,051 | 2019-01-31T18:03:14.000Z | 2022-03-31T20:53:04.000Z | physionet-django/project/modelcomponents/activeproject.py | rguo123/physionet-build | 09a7467052df11711fa23a9180dfeb5dcfe6a836 | [
"BSD-3-Clause"
] | 13 | 2019-03-26T11:02:32.000Z | 2022-03-17T11:39:49.000Z | from datetime import timedelta
from html import unescape
import logging
import os
import shutil
from background_task import background
from django.conf import settings
from django.db import models, transaction
from django.forms.utils import ErrorList
from django.urls import reverse
from django.utils import timezone
from django.utils.html import strip_tags
from project.modelcomponents.archivedproject import ArchivedProject
from project.modelcomponents.authors import PublishedAffiliation, PublishedAuthor
from project.modelcomponents.metadata import Contact, Metadata, PublishedPublication, PublishedReference
from project.modelcomponents.publishedproject import PublishedProject
from project.modelcomponents.submission import CopyeditLog, EditLog, SubmissionInfo
from project.modelcomponents.unpublishedproject import UnpublishedProject
from project.validators import validate_subdir
LOGGER = logging.getLogger(__name__)
@background()
def move_files_as_readonly(pid, dir_from, dir_to, make_zip):
"""
Schedule a background task to set the files as read only.
If a file starts with a Shebang, then it will be set as executable.
"""
published_project = PublishedProject.objects.get(id=pid)
published_project.make_checksum_file()
quota = published_project.quota_manager()
published_project.incremental_storage_size = quota.bytes_used
published_project.save(update_fields=['incremental_storage_size'])
published_project.set_storage_info()
# Make the files read only
file_root = published_project.project_file_root()
for root, dirs, files in os.walk(file_root):
for f in files:
fline = open(os.path.join(root, f), 'rb').read(2)
if fline[:2] == b'#!':
os.chmod(os.path.join(root, f), 0o555)
else:
os.chmod(os.path.join(root, f), 0o444)
for d in dirs:
os.chmod(os.path.join(root, d), 0o555)
if make_zip:
published_project.make_zip()
class ActiveProject(Metadata, UnpublishedProject, SubmissionInfo):
"""
The project used for submitting
The submission_status field:
- 0 : Not submitted
- 10 : Submitting author submits. Awaiting editor assignment.
- 20 : Editor assigned. Awaiting editor decision.
- 30 : Revisions requested. Waiting for resubmission. Loops back
to 20 when author resubmits.
- 40 : Accepted. In copyedit stage. Awaiting editor to copyedit.
- 50 : Editor completes copyedit. Awaiting authors to approve.
- 60 : Authors approve copyedit. Ready for editor to publish
"""
submission_status = models.PositiveSmallIntegerField(default=0)
# Max number of active submitting projects a user is allowed to have
MAX_SUBMITTING_PROJECTS = 10
INDIVIDUAL_FILE_SIZE_LIMIT = 10 * 1024**3
# Where all the active project files are kept
FILE_ROOT = os.path.join(settings.MEDIA_ROOT, 'active-projects')
REQUIRED_FIELDS = (
# 0: Database
('title', 'abstract', 'background', 'methods', 'content_description',
'usage_notes', 'conflicts_of_interest', 'version', 'license',
'short_description'),
# 1: Software
('title', 'abstract', 'background', 'content_description',
'usage_notes', 'installation', 'conflicts_of_interest', 'version',
'license', 'short_description'),
# 2: Challenge
('title', 'abstract', 'background', 'methods', 'content_description',
'usage_notes', 'conflicts_of_interest', 'version', 'license',
'short_description'),
# 3: Model
('title', 'abstract', 'background', 'methods', 'content_description',
'usage_notes', 'installation', 'conflicts_of_interest', 'version',
'license', 'short_description'),
)
# Custom labels that don't match model field names
LABELS = (
# 0: Database
{'content_description': 'Data Description'},
# 1: Software
{'content_description': 'Software Description',
'methods': 'Technical Implementation',
'installation': 'Installation and Requirements'},
# 2: Challenge
{'background': 'Objective',
'methods': 'Participation',
'content_description': 'Data Description',
'usage_notes': 'Evaluation'},
# 3: Model
{'content_description': 'Model Description',
'methods': 'Technical Implementation',
'installation': 'Installation and Requirements'},
)
SUBMISSION_STATUS_LABELS = {
0: 'Not submitted.',
10: 'Awaiting editor assignment.',
20: 'Awaiting editor decision.',
30: 'Revisions requested.',
40: 'Submission accepted; awaiting editor copyedits.',
50: 'Awaiting authors to approve publication.',
60: 'Awaiting editor to publish.',
}
def storage_used(self):
"""
Total storage used in bytes.
This includes the total size of new files uploaded to this
project, as well as the total size of files published in past
versions of this CoreProject. (The QuotaManager should ensure
that the same file is not counted twice in this total.)
"""
current = self.quota_manager().bytes_used
published = self.core_project.total_published_size
return current + published
def storage_allowance(self):
"""
Storage allowed in bytes
"""
return self.core_project.storage_allowance
def get_inspect_dir(self, subdir):
"""
Return the folder to inspect if valid. subdir joined onto
the file root of this project.
"""
# Sanitize subdir for illegal characters
validate_subdir(subdir)
# Folder must be a subfolder of the file root
# (but not necessarily exist or be a directory)
inspect_dir = os.path.join(self.file_root(), subdir)
if inspect_dir.startswith(self.file_root()):
return inspect_dir
else:
raise Exception('Invalid directory request')
def file_url(self, subdir, file):
"""
Url of a file to download in this project
"""
return reverse('serve_active_project_file',
args=(self.slug, os.path.join(subdir, file)))
def file_display_url(self, subdir, file):
"""
URL of a file to display in this project
"""
return reverse('display_active_project_file',
args=(self.slug, os.path.join(subdir, file)))
def under_submission(self):
"""
Whether the project is under submission
"""
return bool(self.submission_status)
def submission_deadline(self):
return self.creation_datetime + timedelta(days=180)
def submission_days_remaining(self):
return (self.submission_deadline() - timezone.now()).days
def submission_status_label(self):
return ActiveProject.SUBMISSION_STATUS_LABELS[self.submission_status]
def author_editable(self):
"""
Whether the project can be edited by its authors
"""
if self.submission_status in [0, 30]:
return True
def copyeditable(self):
"""
Whether the project can be copyedited
"""
if self.submission_status == 40:
return True
def archive(self, archive_reason):
"""
Archive the project. Create an ArchivedProject object, copy over
the fields, and delete this object
"""
archived_project = ArchivedProject(archive_reason=archive_reason,
slug=self.slug)
modified_datetime = self.modified_datetime
# Direct copy over fields
for attr in [f.name for f in Metadata._meta.fields] + [f.name for f in SubmissionInfo._meta.fields]:
setattr(archived_project, attr, getattr(self, attr))
archived_project.save()
# Redirect the related objects
for reference in self.references.all():
reference.project = archived_project
reference.save()
for publication in self.publications.all():
publication.project = archived_project
publication.save()
for topic in self.topics.all():
topic.project = archived_project
topic.save()
for author in self.authors.all():
author.project = archived_project
author.save()
for edit_log in self.edit_logs.all():
edit_log.project = archived_project
edit_log.save()
for copyedit_log in self.copyedit_logs.all():
copyedit_log.project = archived_project
copyedit_log.save()
for parent_project in self.parent_projects.all():
archived_project.parent_projects.add(parent_project)
if self.resource_type.id == 1:
languages = self.programming_languages.all()
if languages:
archived_project.programming_languages.add(*list(languages))
# Voluntary delete
if archive_reason == 1:
self.clear_files()
else:
# Move over files
os.rename(self.file_root(), archived_project.file_root())
# Copy the ActiveProject timestamp to the ArchivedProject.
# Since this is an auto_now field, save() doesn't allow
# setting an arbitrary value.
queryset = ArchivedProject.objects.filter(id=archived_project.id)
queryset.update(modified_datetime=modified_datetime)
return self.delete()
def fake_delete(self):
"""
Appear to delete this project. Actually archive it.
"""
self.archive(archive_reason=1)
def check_integrity(self):
"""
Run integrity tests on metadata fields and return whether the
project passes the checks
"""
self.integrity_errors = ErrorList()
# Invitations
for invitation in self.authorinvitations.filter(is_active=True):
self.integrity_errors.append(
'Outstanding author invitation to {0}'.format(invitation.email))
# Storage requests
for storage_request in self.storagerequests.filter(
is_active=True):
self.integrity_errors.append('Outstanding storage request')
# Authors
for author in self.authors.all().order_by('display_order'):
if not author.get_full_name():
self.integrity_errors.append('Author {0} has not fill in name'.format(author.user.username))
if not author.affiliations.all():
self.integrity_errors.append('Author {0} has not filled in affiliations'.format(author.user.username))
# Metadata
for attr in ActiveProject.REQUIRED_FIELDS[self.resource_type.id]:
value = getattr(self, attr)
text = unescape(strip_tags(str(value)))
if value is None or not text or text.isspace():
l = self.LABELS[self.resource_type.id][attr] if attr in self.LABELS[self.resource_type.id] else attr.title().replace('_', ' ')
self.integrity_errors.append('Missing required field: {0}'.format(l))
published_projects = self.core_project.publishedprojects.all()
if published_projects:
published_versions = [p.version for p in published_projects]
if self.version in published_versions:
self.integrity_errors.append('The version matches a previously published version.')
self.version_clash = True
else:
self.version_clash = False
if self.integrity_errors:
return False
else:
return True
def is_submittable(self):
"""
Whether the project can be submitted
"""
return (not self.under_submission() and self.check_integrity())
def submit(self, author_comments):
"""
Submit the project for review.
"""
if not self.is_submittable():
raise Exception('ActiveProject is not submittable')
self.submission_status = 10
self.submission_datetime = timezone.now()
self.author_comments = author_comments
self.save()
# Create the first edit log
EditLog.objects.create(project=self, author_comments=author_comments)
def set_submitting_author(self):
"""
Used to save query time in templates
"""
self.submitting_author = self.submitting_author()
def assign_editor(self, editor):
"""
Assign an editor to the project and set the submission status to the
edit stage.
"""
self.editor = editor
self.submission_status = 20
self.editor_assignment_datetime = timezone.now()
self.save()
def reassign_editor(self, editor):
"""
Reassign the current project editor with new editor
"""
self.editor = editor
self.save()
def reject(self):
"""
Reject a project under submission
"""
self.archive(archive_reason=3)
def is_resubmittable(self):
"""
Submit the project for review.
"""
return (self.submission_status == 30 and self.check_integrity())
def resubmit(self, author_comments):
"""
"""
if not self.is_resubmittable():
raise Exception('ActiveProject is not resubmittable')
with transaction.atomic():
self.submission_status = 20
self.resubmission_datetime = timezone.now()
self.save()
# Create a new edit log
EditLog.objects.create(project=self, is_resubmission=True,
author_comments=author_comments)
def reopen_copyedit(self):
"""
Reopen the project for copyediting
"""
if self.submission_status == 50:
self.submission_status = 40
self.copyedit_completion_datetime = None
self.save()
CopyeditLog.objects.create(project=self, is_reedit=True)
self.authors.all().update(approval_datetime=None)
def approve_author(self, author):
""""
Approve an author. Move the project into the next state if the
author is the final outstanding one. Return whether the
process was successful.
"""
if self.submission_status == 50 and not author.approval_datetime:
now = timezone.now()
author.approval_datetime = now
author.save()
if self.all_authors_approved():
self.author_approval_datetime = now
self.submission_status = 60
self.save()
return True
def all_authors_approved(self):
"""
Whether all authors have approved the publication
"""
authors = self.authors.all()
return len(authors) == len(authors.filter(
approval_datetime__isnull=False))
def is_publishable(self):
"""
Check whether a project may be published
"""
if self.submission_status == 60 and self.check_integrity() and self.all_authors_approved():
return True
return False
def clear_files(self):
"""
Delete the project file directory
"""
shutil.rmtree(self.file_root())
def publish(self, slug=None, make_zip=True, title=None):
"""
Create a published version of this project and update the
submission status.
Parameters
----------
slug : the desired custom slug of the published project.
make_zip : whether to make a zip of all the files.
"""
if not self.is_publishable():
raise Exception('The project is not publishable')
published_project = PublishedProject(has_wfdb=self.has_wfdb())
# Direct copy over fields
for attr in [f.name for f in Metadata._meta.fields] + [f.name for f in SubmissionInfo._meta.fields]:
setattr(published_project, attr, getattr(self, attr))
published_project.slug = slug or self.slug
# Create project file root if this is first version or the first
# version with a different access policy
if not os.path.isdir(published_project.project_file_root()):
os.mkdir(published_project.project_file_root())
os.rename(self.file_root(), published_project.file_root())
try:
with transaction.atomic():
# If this is a new version, previous fields need to be updated
# and slug needs to be carried over
if self.version_order:
previous_published_projects = self.core_project.publishedprojects.all()
slug = previous_published_projects.first().slug
title = previous_published_projects.first().title
if slug != published_project.slug:
raise ValueError(
{"message": "The published project has different slugs."})
# Set the slug if specified
published_project.slug = slug or self.slug
published_project.title = title or self.title
published_project.doi = self.doi
# Change internal links (that point to files within
# the active project) to point to their new locations
# in the published project
published_project.update_internal_links(old_project=self)
published_project.save()
# If this is a new version, all version fields have to be updated
if self.version_order > 0:
published_project.set_version_order()
# Same content, different objects.
for reference in self.references.all().order_by('id'):
published_reference = PublishedReference.objects.create(
description=reference.description,
project=published_project)
for publication in self.publications.all():
published_publication = PublishedPublication.objects.create(
citation=publication.citation, url=publication.url,
project=published_project)
published_project.set_topics([t.description for t in self.topics.all()])
for parent_project in self.parent_projects.all():
published_project.parent_projects.add(parent_project)
if self.resource_type.id == 1:
languages = self.programming_languages.all()
if languages:
published_project.programming_languages.add(*list(languages))
for author in self.authors.all():
author_profile = author.user.profile
published_author = PublishedAuthor.objects.create(
project=published_project, user=author.user,
is_submitting=author.is_submitting,
is_corresponding=author.is_corresponding,
approval_datetime=author.approval_datetime,
display_order=author.display_order,
first_names=author_profile.first_names,
last_name=author_profile.last_name,
)
affiliations = author.affiliations.all()
for affiliation in affiliations:
published_affiliation = PublishedAffiliation.objects.create(
name=affiliation.name, author=published_author)
if author.is_corresponding:
published_author.corresponding_email = author.corresponding_email.email
published_author.save()
contact = Contact.objects.create(name=author.get_full_name(),
affiliations='; '.join(a.name for a in affiliations),
email=author.corresponding_email, project=published_project)
# Move the edit and copyedit logs
for edit_log in self.edit_logs.all():
edit_log.project = published_project
edit_log.save()
for copyedit_log in self.copyedit_logs.all():
copyedit_log.project = published_project
copyedit_log.save()
# Set files read only and make zip file if requested
move_files_as_readonly(published_project.id, self.file_root(),
published_project.file_root(), make_zip,
verbose_name='Read Only Files - {}'.format(published_project))
# Remove the ActiveProject
self.delete()
return published_project
except:
# Move the files to the active project directory
os.rename(published_project.file_root(), self.file_root())
raise
| 37.83779 | 142 | 0.62053 |
2ac14a12a22a1e30ceec2b61e4cebad722830799 | 1,039 | py | Python | combine_npz.py | kromond/stylegan2-ada-pytorch | 7f15d01fd700e909548ab3b3c8b4e7d09c590bcc | [
"BSD-Source-Code"
] | 168 | 2021-02-01T17:01:45.000Z | 2022-03-26T03:06:36.000Z | combine_npz.py | kromond/stylegan2-ada-pytorch | 7f15d01fd700e909548ab3b3c8b4e7d09c590bcc | [
"BSD-Source-Code"
] | 21 | 2021-03-16T20:46:40.000Z | 2022-02-18T22:38:30.000Z | combine_npz.py | kromond/stylegan2-ada-pytorch | 7f15d01fd700e909548ab3b3c8b4e7d09c590bcc | [
"BSD-Source-Code"
] | 85 | 2021-02-04T11:03:01.000Z | 2022-03-30T09:33:43.000Z | import os
import numpy as np
import torch
import click
import PIL.Image
import dnnlib
import legacy
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--npzs', help='comma separated .npz files', type=str, required=True, metavar='FILE')
@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
def combine_npz(
ctx: click.Context,
npzs: str,
outdir: str,
):
print('Combining .npz files...')
files = npzs.split(',')
os.makedirs(outdir, exist_ok=True)
ws = torch.tensor(())
for i,f in enumerate(files):
print(f)
w = torch.tensor(np.load(f)['w'])
ws = torch.cat((ws,w), 0)
print(ws.size())
np.savez(f'{outdir}/combined.npz', w=ws.numpy())
#----------------------------------------------------------------------------
if __name__ == "__main__":
combine_npz() # pylint: disable=no-value-for-parameter
#---------------------------------------------------------------------------- | 25.341463 | 105 | 0.526468 |
04d175bdcaa5d75fe4b131be41ed22dd02487567 | 808 | py | Python | investigations/check_desi0_dataset.py | nam8/Barry | 5deb15d71d620079aa46ced73e23b1da8b9c4e57 | [
"MIT"
] | 13 | 2019-07-29T20:39:20.000Z | 2021-09-26T09:20:52.000Z | investigations/check_desi0_dataset.py | nam8/Barry | 5deb15d71d620079aa46ced73e23b1da8b9c4e57 | [
"MIT"
] | 1 | 2021-02-11T10:54:58.000Z | 2021-02-11T10:54:58.000Z | investigations/check_desi0_dataset.py | nam8/Barry | 5deb15d71d620079aa46ced73e23b1da8b9c4e57 | [
"MIT"
] | 7 | 2019-08-26T04:54:00.000Z | 2022-01-20T14:47:47.000Z | import logging
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="[%(levelname)7s |%(funcName)20s] %(message)s")
logging.getLogger("matplotlib").setLevel(logging.ERROR)
from barry.datasets.dataset_power_spectrum import PowerSpectrum_DESIMockChallenge0_Z01
from barry.models import PowerBeutler2017
from barry.models.model import Correction
model = PowerBeutler2017(recon=False, isotropic=False, correction=Correction.NONE, fix_params=["om"])
dataset = PowerSpectrum_DESIMockChallenge0_Z01(recon=False, isotropic=False, realisation="data")
data = dataset.get_data()
for i in range(20):
model.sanity_check(dataset, figname="desi_mock0_optimised_bestfit.png", niter=100)
# print(likelihood)
#
# model.plot_default(dataset)
| 38.47619 | 105 | 0.75 |
f35cb7a7bbd180e145516fdcfb303fe13c5c27fd | 7,166 | py | Python | mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py | nishantg96/HandTrack-MP | bcbf584e0a47d301923f54d0187324ae7423c6d4 | [
"Apache-2.0"
] | null | null | null | mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py | nishantg96/HandTrack-MP | bcbf584e0a47d301923f54d0187324ae7423c6d4 | [
"Apache-2.0"
] | null | null | null | mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py | nishantg96/HandTrack-MP | bcbf584e0a47d301923f54d0187324ae7423c6d4 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.modules.face_geometry.protos import mesh_3d_pb2 as mediapipe_dot_modules_dot_face__geometry_dot_protos_dot_mesh__3d__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata.proto',
package='mediapipe.face_geometry',
syntax='proto2',
serialized_options=_b('\n)com.google.mediapipe.modules.facegeometryB\035GeometryPipelineMetadataProto'),
serialized_pb=_b('\nGmediapipe/modules/face_geometry/protos/geometry_pipeline_metadata.proto\x12\x17mediapipe.face_geometry\x1a\x34mediapipe/modules/face_geometry/protos/mesh_3d.proto\":\n\x13WeightedLandmarkRef\x12\x13\n\x0blandmark_id\x18\x01 \x01(\r\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"\xe0\x01\n\x18GeometryPipelineMetadata\x12:\n\x0cinput_source\x18\x03 \x01(\x0e\x32$.mediapipe.face_geometry.InputSource\x12\x37\n\x0e\x63\x61nonical_mesh\x18\x01 \x01(\x0b\x32\x1f.mediapipe.face_geometry.Mesh3d\x12O\n\x19procrustes_landmark_basis\x18\x02 \x03(\x0b\x32,.mediapipe.face_geometry.WeightedLandmarkRef*S\n\x0bInputSource\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x1a\n\x16\x46\x41\x43\x45_LANDMARK_PIPELINE\x10\x01\x12\x1b\n\x17\x46\x41\x43\x45_DETECTION_PIPELINE\x10\x02\x42J\n)com.google.mediapipe.modules.facegeometryB\x1dGeometryPipelineMetadataProto')
,
dependencies=[mediapipe_dot_modules_dot_face__geometry_dot_protos_dot_mesh__3d__pb2.DESCRIPTOR,])
_INPUTSOURCE = _descriptor.EnumDescriptor(
name='InputSource',
full_name='mediapipe.face_geometry.InputSource',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FACE_LANDMARK_PIPELINE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FACE_DETECTION_PIPELINE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=441,
serialized_end=524,
)
_sym_db.RegisterEnumDescriptor(_INPUTSOURCE)
InputSource = enum_type_wrapper.EnumTypeWrapper(_INPUTSOURCE)
DEFAULT = 0
FACE_LANDMARK_PIPELINE = 1
FACE_DETECTION_PIPELINE = 2
_WEIGHTEDLANDMARKREF = _descriptor.Descriptor(
name='WeightedLandmarkRef',
full_name='mediapipe.face_geometry.WeightedLandmarkRef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='landmark_id', full_name='mediapipe.face_geometry.WeightedLandmarkRef.landmark_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='mediapipe.face_geometry.WeightedLandmarkRef.weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=212,
)
_GEOMETRYPIPELINEMETADATA = _descriptor.Descriptor(
name='GeometryPipelineMetadata',
full_name='mediapipe.face_geometry.GeometryPipelineMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_source', full_name='mediapipe.face_geometry.GeometryPipelineMetadata.input_source', index=0,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='canonical_mesh', full_name='mediapipe.face_geometry.GeometryPipelineMetadata.canonical_mesh', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='procrustes_landmark_basis', full_name='mediapipe.face_geometry.GeometryPipelineMetadata.procrustes_landmark_basis', index=2,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=215,
serialized_end=439,
)
_GEOMETRYPIPELINEMETADATA.fields_by_name['input_source'].enum_type = _INPUTSOURCE
_GEOMETRYPIPELINEMETADATA.fields_by_name['canonical_mesh'].message_type = mediapipe_dot_modules_dot_face__geometry_dot_protos_dot_mesh__3d__pb2._MESH3D
_GEOMETRYPIPELINEMETADATA.fields_by_name['procrustes_landmark_basis'].message_type = _WEIGHTEDLANDMARKREF
DESCRIPTOR.message_types_by_name['WeightedLandmarkRef'] = _WEIGHTEDLANDMARKREF
DESCRIPTOR.message_types_by_name['GeometryPipelineMetadata'] = _GEOMETRYPIPELINEMETADATA
DESCRIPTOR.enum_types_by_name['InputSource'] = _INPUTSOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WeightedLandmarkRef = _reflection.GeneratedProtocolMessageType('WeightedLandmarkRef', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTEDLANDMARKREF,
__module__ = 'mediapipe.modules.face_geometry.protos.geometry_pipeline_metadata_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.face_geometry.WeightedLandmarkRef)
))
_sym_db.RegisterMessage(WeightedLandmarkRef)
GeometryPipelineMetadata = _reflection.GeneratedProtocolMessageType('GeometryPipelineMetadata', (_message.Message,), dict(
DESCRIPTOR = _GEOMETRYPIPELINEMETADATA,
__module__ = 'mediapipe.modules.face_geometry.protos.geometry_pipeline_metadata_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.face_geometry.GeometryPipelineMetadata)
))
_sym_db.RegisterMessage(GeometryPipelineMetadata)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 42.654762 | 866 | 0.794725 |
4c6b2db108eb618b1a7464c3d239f8d7978c24d7 | 7,364 | py | Python | monai/utils/deprecated.py | madil90/MONAI | 2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7 | [
"Apache-2.0"
] | 1 | 2021-08-20T01:54:26.000Z | 2021-08-20T01:54:26.000Z | monai/utils/deprecated.py | madil90/MONAI | 2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7 | [
"Apache-2.0"
] | null | null | null | monai/utils/deprecated.py | madil90/MONAI | 2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import warnings
from functools import wraps
from types import FunctionType
from typing import Optional
from monai.utils.module import version_leq
from .. import __version__
__all__ = ["deprecated", "deprecated_arg", "DeprecatedError"]
class DeprecatedError(Exception):
pass
def warn_deprecated(obj, msg):
"""
Issue the warning message `msg`.
"""
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
def deprecated(
since: Optional[str] = None, removed: Optional[str] = None, msg_suffix: str = "", version_val: str = __version__
):
"""
Marks a function or class as deprecated. If `since` is given this should be a version at or earlier than the
current version and states at what version of the definition was marked as deprecated. If `removed` is given
this can be any version and marks when the definition was removed.
When the decorated definition is called, that is when the function is called or the class instantiated,
a `DeprecationWarning` is issued if `since` is given and the current version is at or later than that given.
a `DeprecatedError` exception is instead raised if `removed` is given and the current version is at or later
than that, or if neither `since` nor `removed` is provided.
Args:
since: version at which the definition was marked deprecated but not removed.
removed: version at which the definition was removed and no longer usable.
msg_suffix: message appended to warning/exception detailing reasons for deprecation and what to use instead.
version_val: (used for testing) version to compare since and removed against, default is MONAI version.
Returns:
Decorated definition which warns or raises exception when used
"""
if since is not None and removed is not None and not version_leq(since, removed):
raise ValueError(f"since must be less or equal to removed, got since={since}, removed={removed}.")
is_not_yet_deprecated = since is not None and version_val != since and version_leq(version_val, since)
if is_not_yet_deprecated:
# smaller than `since`, do nothing
return lambda obj: obj
if since is None and removed is None:
# raise a DeprecatedError directly
is_removed = True
is_deprecated = True
else:
# compare the numbers
is_deprecated = since is not None and version_leq(since, version_val)
is_removed = removed is not None and version_leq(removed, version_val)
def _decorator(obj):
is_func = isinstance(obj, FunctionType)
call_obj = obj if is_func else obj.__init__
msg_prefix = f"{'Function' if is_func else 'Class'} `{obj.__name__}`"
if is_removed:
msg_infix = f"was removed in version {removed}."
elif is_deprecated:
msg_infix = f"has been deprecated since version {since}."
if removed is not None:
msg_infix += f" It will be removed in version {removed}."
else:
msg_infix = "has been deprecated."
msg = f"{msg_prefix} {msg_infix} {msg_suffix}".strip()
@wraps(call_obj)
def _wrapper(*args, **kwargs):
if is_removed:
raise DeprecatedError(msg)
if is_deprecated:
warn_deprecated(obj, msg)
return call_obj(*args, **kwargs)
if is_func:
return _wrapper
obj.__init__ = _wrapper
return obj
return _decorator
def deprecated_arg(
name,
since: Optional[str] = None,
removed: Optional[str] = None,
msg_suffix: str = "",
version_val: str = __version__,
):
"""
Marks a particular named argument of a callable as deprecated. The same conditions for `since` and `removed` as
described in the `deprecated` decorator.
When the decorated definition is called, that is when the function is called or the class instantiated with args,
a `DeprecationWarning` is issued if `since` is given and the current version is at or later than that given.
a `DeprecatedError` exception is instead raised if `removed` is given and the current version is at or later
than that, or if neither `since` nor `removed` is provided.
Args:
name: name of position or keyword argument to mark as deprecated.
since: version at which the argument was marked deprecated but not removed.
removed: version at which the argument was removed and no longer usable.
msg_suffix: message appended to warning/exception detailing reasons for deprecation and what to use instead.
version_val: (used for testing) version to compare since and removed against, default is MONAI version.
Returns:
Decorated callable which warns or raises exception when deprecated argument used
"""
if since is not None and removed is not None and not version_leq(since, removed):
raise ValueError(f"since must be less or equal to removed, got since={since}, removed={removed}.")
is_not_yet_deprecated = since is not None and version_val != since and version_leq(version_val, since)
if is_not_yet_deprecated:
# smaller than `since`, do nothing
return lambda obj: obj
if since is None and removed is None:
# raise a DeprecatedError directly
is_removed = True
is_deprecated = True
else:
# compare the numbers
is_deprecated = since is not None and version_leq(since, version_val)
is_removed = removed is not None and version_leq(removed, version_val)
if is_not_yet_deprecated:
return lambda obj: obj
def _decorator(func):
argname = f"{func.__name__}_{name}"
msg_prefix = f"Argument `{name}`"
if is_removed:
msg_infix = f"was removed in version {removed}."
elif is_deprecated:
msg_infix = f"has been deprecated since version {since}."
if removed is not None:
msg_infix += f" It will be removed in version {removed}."
else:
msg_infix = "has been deprecated."
msg = f"{msg_prefix} {msg_infix} {msg_suffix}".strip()
sig = inspect.signature(func)
@wraps(func)
def _wrapper(*args, **kwargs):
binding = sig.bind(*args, **kwargs).arguments
positional_found = name in binding
kw_found = "kwargs" in binding and name in binding["kwargs"]
if positional_found or kw_found:
if is_removed:
raise DeprecatedError(msg)
if is_deprecated:
warn_deprecated(argname, msg)
return func(*args, **kwargs)
return _wrapper
return _decorator
| 38.757895 | 117 | 0.676534 |
1d64899c584fbcd09b8db0a30ba1f931dc57ab9f | 6,906 | py | Python | main.py | JobQiu/PrototypicalNetwork | b46c34f8847946c4cd41774f4c8ee87c3486474c | [
"Apache-2.0"
] | null | null | null | main.py | JobQiu/PrototypicalNetwork | b46c34f8847946c4cd41774f4c8ee87c3486474c | [
"Apache-2.0"
] | null | null | null | main.py | JobQiu/PrototypicalNetwork | b46c34f8847946c4cd41774f4c8ee87c3486474c | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import os
from configs.config import MiniImageNetConfig
from data_loader.data_generator import DataGenerator, CompressedImageNetDataGenerator
from models.example_model import ExampleModel, PrototypicalNetwork
from trainers.example_trainer import ExampleTrainer, ProtoNetTrainer
from utils.config import process_config
from utils.dirs import create_dirs
from utils.logger import Logger
from utils.utils import get_args, send_msg
def main():
# capture the config path from the run arguments
# then process the json configuration file
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
if os.path.isfile("configs/example.json"):
config = process_config("configs/example.json")
else:
config = process_config("../configs/example.json")
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
# create tensorflow session
sess = tf.Session()
# create your data generator
data = DataGenerator(config)
# create an instance of the model you want
model = ExampleModel(config)
# create tensorboard logger
logger = Logger(sess, config)
# create trainer and pass all the previous components to it
trainer = ExampleTrainer(sess, model, data, config, logger)
# load model if exists
model.load(sess)
# here you train your model
trainer.train()
def run_proto_net():
config = MiniImageNetConfig()
create_dirs([config.summary_dir, config.checkpoint_dir])
# create your data generator
data = CompressedImageNetDataGenerator(config)
model = PrototypicalNetwork(config)
sess = tf.Session()
logger = Logger(sess, config)
trainer = ProtoNetTrainer(sess, model, data, config, logger)
model.load(sess)
trainer.train()
pass
def generate_image_embedding():
config = MiniImageNetConfig()
create_dirs([config.summary_dir, config.checkpoint_dir])
data = CompressedImageNetDataGenerator(config)
model = PrototypicalNetwork(config)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
model.load(sess)
train_inputs, train_query, train_labels = next(data.next_batch())
x_embedding, q_embedding, prototype = sess.run(fetches=[model.emb_x, model.emb_q, model.prototype],
feed_dict={model.x: train_inputs,
model.q: train_query,
model.y: train_labels})
import numpy as np
from sklearn.manifold import TSNE
all = np.concatenate([x_embedding, q_embedding, prototype])
tsne = TSNE(n_components=2, random_state=0)
all_res = tsne.fit_transform(all)
x_res, q_res, p_res = all[:len(x_embedding)], all[len(x_embedding):len(x_embedding) + len(q_embedding)], all[len(
x_embedding) + len(q_embedding):]
x_res = np.reshape(x_res, newshape=(20, 5, -1))
q_res = np.reshape(q_res, newshape=(20, 15, -1))
print("")
pass
if __name__ == '__main__':
tf.reset_default_graph()
experiment = 'protoNet2'
if experiment == 'protoNet2':
run_proto_net()
elif experiment == 'protoNet_embedding':
generate_image_embedding()
else:
main()
send_msg("train done")
"""
#%%
import tensorflow as tf
import os
from configs.config import MiniImageNetConfig
from data_loader.data_generator import DataGenerator, CompressedImageNetDataGenerator
from models.example_model import ExampleModel, PrototypicalNetwork
from trainers.example_trainer import ExampleTrainer, ProtoNetTrainer
from utils.config import process_config
from utils.dirs import create_dirs
from utils.logger import Logger
from utils.utils import get_args, send_msg
tf.reset_default_graph()
config = MiniImageNetConfig()
create_dirs([config.summary_dir, config.checkpoint_dir])
data = CompressedImageNetDataGenerator(config)
model = PrototypicalNetwork(config)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
model.load(sess)
train_inputs, train_query, train_labels = next(data.next_batch())
x_embedding, q_embedding, prototype = sess.run(fetches=[model.emb_x, model.emb_q, model.prototype],
feed_dict={model.x: train_inputs,
model.q: train_query,
model.y: train_labels})
import numpy as np
from sklearn.manifold import TSNE
all = np.concatenate([x_embedding, q_embedding, prototype])
tsne = TSNE(n_components=2, random_state=0)
all_res = tsne.fit_transform(all)
x_res, q_res, p_res = all[:len(x_embedding)], all[len(x_embedding):len(x_embedding) + len(q_embedding)], all[len(
x_embedding) + len(q_embedding):]
x_res = np.reshape(x_res, newshape=(20, 5, -1))
q_res = np.reshape(q_res, newshape=(20, 15, -1))
#%%
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
colors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'orange', 'purple'
c = 14
for s in range(5):
plt.figure()
plt.imshow(train_inputs[c][s])
plt.grid()
plt.figure(figsize=(20,12))
plt.scatter(p_res[c][0],p_res[c][1], c='r',s=400)
plt.scatter(p_res[c-1][0],p_res[c-1][1],c='b',s=400)
for s in range(5):
plt.scatter(x_res[c][s][0],x_res[c][s][1], c='r',alpha=0.8,s=100)
plt.scatter(x_res[c-1][s][0],x_res[c-1][s][1],c='b', alpha=0.8,s=100)
plt.text(x_res[c][s][0]+.03, x_res[c][s][1]+.03, "{}".format(s), fontsize=9)
#%%
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
import numpy as np
# Fixing random state for reproducibility
np.random.seed(19680801)
def randrange(n, vmin, vmax):
'''
Helper function to make an array of random numbers having shape (n, )
with each number distributed Uniform(vmin, vmax).
'''
return (vmax - vmin)*np.random.rand(n) + vmin
fig = plt.figure(figsize=(20,12))
ax = fig.add_subplot(111, projection='3d')
n = 100
base = 6
for c in range(base,base+2):
ax.scatter(p_res[c][0],p_res[c][1],p_res[c][2],marker=c,s=400)
for s in range(5):
ax.scatter(x_res[c][s][0], x_res[c][s][1], x_res[c][s][2], marker=c,c=colors[c], s = 100)
#plt.scatter(x_res[c][s][0],x_res[c][s][1], c=colors[c], alpha=0.8, marker=c,s=100)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
#%%
tf.reset_default_graph()
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=(5,1600))
singular_values,u,v = tf.svd(x)
sigma = tf.diag(singular_values)
s1,u1,v1, sigma1 = sess.run([singular_values,u,v,sigma],feed_dict={x:x_emb[0]})
uu, ss, vv = np.linalg.svd(x_emb[0])
"""
| 30.557522 | 117 | 0.669852 |
291c4d7da8355eb2587d73f8f4437a5214e8e58d | 38 | py | Python | foiamachine/local/lib/python2.7/encodings/cp1255.py | dwillis/foiamachine | 26d3b02870227696cdaab639c39d47b2a7a42ae5 | [
"Unlicense",
"MIT"
] | 3 | 2021-08-07T04:01:55.000Z | 2021-08-07T05:12:11.000Z | foiamachine/local/lib/python2.7/encodings/cp1255.py | dwillis/foiamachine | 26d3b02870227696cdaab639c39d47b2a7a42ae5 | [
"Unlicense",
"MIT"
] | null | null | null | foiamachine/local/lib/python2.7/encodings/cp1255.py | dwillis/foiamachine | 26d3b02870227696cdaab639c39d47b2a7a42ae5 | [
"Unlicense",
"MIT"
] | 1 | 2021-08-05T22:51:14.000Z | 2021-08-05T22:51:14.000Z | /usr/lib/python2.7/encodings/cp1255.py | 38 | 38 | 0.815789 |
0bdc15bcfa8f114d042e4bae5c3754b0722e2979 | 19,554 | py | Python | toontown/toonbase/ToonBase.py | Max-Rodriguez/toontown-online | cdb5d11fc9a7bae6171d4f3f52b5550fac7bc106 | [
"BSD-3-Clause"
] | 1 | 2022-03-25T18:20:49.000Z | 2022-03-25T18:20:49.000Z | toontown/toonbase/ToonBase.py | Max-Rodriguez/toontown-online | cdb5d11fc9a7bae6171d4f3f52b5550fac7bc106 | [
"BSD-3-Clause"
] | null | null | null | toontown/toonbase/ToonBase.py | Max-Rodriguez/toontown-online | cdb5d11fc9a7bae6171d4f3f52b5550fac7bc106 | [
"BSD-3-Clause"
] | null | null | null | from otp.otpbase import OTPBase
from otp.otpbase import OTPLauncherGlobals
from otp.otpbase import OTPGlobals
from otp.settings.Settings import Settings
from direct.showbase.PythonUtil import *
from . import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from . import ToontownLoader
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.otp import *
import sys
import os
import math
from toontown.toonbase import ToontownAccess
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownBattleGlobals
from toontown.launcher import ToontownDownloadWatcher
class ToonBase(OTPBase.OTPBase):
notify = DirectNotifyGlobal.directNotify.newCategory('ToonBase')
def __init__(self):
self.settings = Settings()
if not ConfigVariableInt('ignore-user-options', 0).value:
self.settings.readSettings()
mode = not self.settings.getSetting('windowed-mode', True)
music = self.settings.getSetting('music', True)
sfx = self.settings.getSetting('sfx', True)
toonChatSounds = self.settings.getSetting('toon-chat-sounds', True)
res = self.settings.getSetting('resolution', (800, 600))
if mode == None:
mode = 1
if res == None:
res = (800, 600)
loadPrcFileData('toonBase Settings Window Res', 'win-size %s %s' % (res[0], res[1]))
loadPrcFileData('toonBase Settings Window FullScreen', 'fullscreen %s' % mode)
loadPrcFileData('toonBase Settings Music Active', 'audio-music-active %s' % music)
loadPrcFileData('toonBase Settings Sound Active', 'audio-sfx-active %s' % sfx)
loadPrcFileData('toonBase Settings Toon Chat Sounds', 'toon-chat-sounds %s' % toonChatSounds)
OTPBase.OTPBase.__init__(self)
if not self.isMainWindowOpen():
try:
launcher.setPandaErrorCode(7)
except:
pass
sys.exit(1)
self.disableShowbaseMouse()
base.debugRunningMultiplier /= OTPGlobals.ToonSpeedFactor
self.toonChatSounds = ConfigVariableBool('toon-chat-sounds', 1).value
self.placeBeforeObjects = ConfigVariableBool('place-before-objects', 1).value
self.endlessQuietZone = False
self.wantDynamicShadows = 0
self.exitErrorCode = 0
camera.setPosHpr(0, 0, 0, 0, 0, 0)
self.camLens.setFov(ToontownGlobals.DefaultCameraFov)
self.camLens.setNearFar(ToontownGlobals.DefaultCameraNear, ToontownGlobals.DefaultCameraFar)
self.musicManager.setVolume(0.65)
self.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
tpm = TextPropertiesManager.getGlobalPtr()
candidateActive = TextProperties()
candidateActive.setTextColor(0, 0, 1, 1)
tpm.setProperties('candidate_active', candidateActive)
candidateInactive = TextProperties()
candidateInactive.setTextColor(0.3, 0.3, 0.7, 1)
tpm.setProperties('candidate_inactive', candidateInactive)
self.transitions.IrisModelName = 'phase_3/models/misc/iris'
self.transitions.FadeModelName = 'phase_3/models/misc/fade'
self.exitFunc = self.userExit
if 'launcher' in __builtins__ and launcher:
launcher.setPandaErrorCode(11)
globalClock.setMaxDt(0.2)
if ConfigVariableBool('want-particles', 1).value == 1:
self.notify.debug('Enabling particles')
self.enableParticles()
self.accept(ToontownGlobals.ScreenshotHotkey, self.takeScreenShot)
self.accept('panda3d-render-error', self.panda3dRenderError)
oldLoader = self.loader
self.loader = ToontownLoader.ToontownLoader(self)
__builtins__['loader'] = self.loader
oldLoader.destroy()
self.accept('PandaPaused', self.disableAllAudio)
self.accept('PandaRestarted', self.enableAllAudio)
self.friendMode = ConfigVariableBool('switchboard-friends', 0).value
self.wantPets = ConfigVariableBool('want-pets', 1).value
self.wantBingo = ConfigVariableBool('want-fish-bingo', 1).value
self.wantKarts = ConfigVariableBool('want-karts', 1).value
self.wantNewSpecies = ConfigVariableBool('want-new-species', 0).value
self.inactivityTimeout = ConfigVariableDouble('inactivity-timeout', ToontownGlobals.KeyboardTimeout).value
if self.inactivityTimeout:
self.notify.debug('Enabling Panda timeout: %s' % self.inactivityTimeout)
self.mouseWatcherNode.setInactivityTimeout(self.inactivityTimeout)
self.randomMinigameAbort = ConfigVariableBool('random-minigame-abort', 0).value
self.randomMinigameDisconnect = ConfigVariableBool('random-minigame-disconnect', 0).value
self.randomMinigameNetworkPlugPull = ConfigVariableBool('random-minigame-netplugpull', 0).value
self.autoPlayAgain = ConfigVariableBool('auto-play-again', 0).value
self.skipMinigameReward = ConfigVariableBool('skip-minigame-reward', 0).value
self.wantMinigameDifficulty = ConfigVariableBool('want-minigame-difficulty', 0).value
self.minigameDifficulty = ConfigVariableDouble('minigame-difficulty', -1.0).value
if self.minigameDifficulty == -1.0:
del self.minigameDifficulty
self.minigameSafezoneId = ConfigVariableInt('minigame-safezone-id', -1).value
if self.minigameSafezoneId == -1:
del self.minigameSafezoneId
cogdoGameSafezoneId = ConfigVariableInt('cogdo-game-safezone-id', -1).value
cogdoGameDifficulty = ConfigVariableDouble('cogdo-game-difficulty', -1).value
if cogdoGameDifficulty != -1:
self.cogdoGameDifficulty = cogdoGameDifficulty
if cogdoGameSafezoneId != -1:
self.cogdoGameSafezoneId = cogdoGameSafezoneId
ToontownBattleGlobals.SkipMovie = ConfigVariableBool('skip-battle-movies', 0).value
self.creditCardUpFront = ConfigVariableInt('credit-card-up-front', -1).value
if self.creditCardUpFront == -1:
del self.creditCardUpFront
else:
self.creditCardUpFront = self.creditCardUpFront != 0
self.housingEnabled = ConfigVariableBool('want-housing', 1).value
self.cannonsEnabled = ConfigVariableBool('estate-cannons', 0).value
self.fireworksEnabled = ConfigVariableBool('estate-fireworks', 0).value
self.dayNightEnabled = ConfigVariableBool('estate-day-night', 0).value
self.cloudPlatformsEnabled = ConfigVariableBool('estate-clouds', 0).value
self.greySpacing = ConfigVariableBool('allow-greyspacing', 0).value
self.goonsEnabled = ConfigVariableBool('estate-goon', 0).value
self.restrictTrialers = ConfigVariableBool('restrict-trialers', 1).value
self.roamingTrialers = ConfigVariableBool('roaming-trialers', 1).value
self.slowQuietZone = ConfigVariableBool('slow-quiet-zone', 0).value
self.slowQuietZoneDelay = ConfigVariableDouble('slow-quiet-zone-delay', 5).value
self.killInterestResponse = ConfigVariableBool('kill-interest-response', 0).value
tpMgr = TextPropertiesManager.getGlobalPtr()
WLDisplay = TextProperties()
WLDisplay.setSlant(0.3)
WLEnter = TextProperties()
WLEnter.setTextColor(1.0, 0.0, 0.0, 1)
tpMgr.setProperties('WLDisplay', WLDisplay)
tpMgr.setProperties('WLEnter', WLEnter)
del tpMgr
CullBinManager.getGlobalPtr().addBin('gui-popup', CullBinManager.BTUnsorted, 60)
CullBinManager.getGlobalPtr().addBin('shadow', CullBinManager.BTFixed, 15)
CullBinManager.getGlobalPtr().addBin('ground', CullBinManager.BTFixed, 14)
self.lastScreenShotTime = globalClock.getRealTime()
self.accept('InputState-forward', self.__walking)
self.canScreenShot = 1
self.glitchCount = 0
self.walking = 0
self.oldX = max(1, base.win.getXSize())
self.oldY = max(1, base.win.getYSize())
self.aspectRatio = float(self.oldX) / self.oldY
return
def windowEvent(self, win):
OTPBase.OTPBase.windowEvent(self, win)
if not ConfigVariableInt('keep-aspect-ratio', 0).value:
return
x = max(1, win.getXSize())
y = max(1, win.getYSize())
maxX = base.pipe.getDisplayWidth()
maxY = base.pipe.getDisplayHeight()
cwp = win.getProperties()
originX = 0
originY = 0
if cwp.hasOrigin():
originX = cwp.getXOrigin()
originY = cwp.getYOrigin()
if originX > maxX:
originX = originX - maxX
if originY > maxY:
oringY = originY - maxY
maxX -= originX
maxY -= originY
if math.fabs(x - self.oldX) > math.fabs(y - self.oldY):
newY = x / self.aspectRatio
newX = x
if newY > maxY:
newY = maxY
newX = self.aspectRatio * maxY
else:
newX = self.aspectRatio * y
newY = y
if newX > maxX:
newX = maxX
newY = maxX / self.aspectRatio
wp = WindowProperties()
wp.setSize(newX, newY)
base.win.requestProperties(wp)
base.cam.node().getLens().setFilmSize(newX, newY)
self.oldX = newX
self.oldY = newY
def disableShowbaseMouse(self):
self.useDrive()
self.disableMouse()
if self.mouseInterface:
self.mouseInterface.detachNode()
if base.mouse2cam:
self.mouse2cam.detachNode()
def __walking(self, pressed):
self.walking = pressed
def takeScreenShot(self):
if not os.path.exists('screenshots/'):
os.mkdir('screenshots/')
namePrefix = 'screenshot'
namePrefix = 'screenshots/' + launcher.logPrefix + namePrefix
timedif = globalClock.getRealTime() - self.lastScreenShotTime
if self.glitchCount > 10 and self.walking:
return
if timedif < 1.0 and self.walking:
self.glitchCount += 1
return
if not hasattr(self, 'localAvatar'):
self.screenshot(namePrefix=namePrefix)
self.lastScreenShotTime = globalClock.getRealTime()
return
coordOnScreen = ConfigVariableBool('screenshot-coords', 0).value
self.localAvatar.stopThisFrame = 1
ctext = self.localAvatar.getAvPosStr()
self.screenshotStr = ''
messenger.send('takingScreenshot')
if coordOnScreen:
coordTextLabel = DirectLabel(pos=(-0.81, 0.001, -0.87), text=ctext, text_scale=0.05, text_fg=VBase4(1.0, 1.0, 1.0, 1.0), text_bg=(0, 0, 0, 0), text_shadow=(0, 0, 0, 1), relief=None)
coordTextLabel.setBin('gui-popup', 0)
strTextLabel = None
if len(self.screenshotStr):
strTextLabel = DirectLabel(pos=(0.0, 0.001, 0.9), text=self.screenshotStr, text_scale=0.05, text_fg=VBase4(1.0, 1.0, 1.0, 1.0), text_bg=(0, 0, 0, 0), text_shadow=(0, 0, 0, 1), relief=None)
strTextLabel.setBin('gui-popup', 0)
self.graphicsEngine.renderFrame()
self.screenshot(namePrefix=namePrefix, imageComment=ctext + ' ' + self.screenshotStr)
self.lastScreenShotTime = globalClock.getRealTime()
if coordOnScreen:
if strTextLabel is not None:
strTextLabel.destroy()
coordTextLabel.destroy()
return
def addScreenshotString(self, str):
if len(self.screenshotStr):
self.screenshotStr += '\n'
self.screenshotStr += str
def initNametagGlobals(self):
arrow = loader.loadModel('phase_3/models/props/arrow')
card = loader.loadModel('phase_3/models/props/panel')
speech3d = ChatBalloon(loader.loadModel('phase_3/models/props/chatbox').node())
thought3d = ChatBalloon(loader.loadModel('phase_3/models/props/chatbox_thought_cutout').node())
speech2d = ChatBalloon(loader.loadModel('phase_3/models/props/chatbox_noarrow').node())
chatButtonGui = loader.loadModel('phase_3/models/gui/chat_button_gui')
NametagGlobals.setCamera(self.cam)
NametagGlobals.setArrowModel(arrow)
NametagGlobals.setNametagCard(card, VBase4(-0.5, 0.5, -0.5, 0.5))
if self.mouseWatcherNode:
NametagGlobals.setMouseWatcher(self.mouseWatcherNode)
NametagGlobals.setSpeechBalloon3d(speech3d)
NametagGlobals.setThoughtBalloon3d(thought3d)
NametagGlobals.setSpeechBalloon2d(speech2d)
NametagGlobals.setThoughtBalloon2d(thought3d)
NametagGlobals.setPageButton(PGButton.SReady, chatButtonGui.find('**/Horiz_Arrow_UP'))
NametagGlobals.setPageButton(PGButton.SDepressed, chatButtonGui.find('**/Horiz_Arrow_DN'))
NametagGlobals.setPageButton(PGButton.SRollover, chatButtonGui.find('**/Horiz_Arrow_Rllvr'))
NametagGlobals.setQuitButton(PGButton.SReady, chatButtonGui.find('**/CloseBtn_UP'))
NametagGlobals.setQuitButton(PGButton.SDepressed, chatButtonGui.find('**/CloseBtn_DN'))
NametagGlobals.setQuitButton(PGButton.SRollover, chatButtonGui.find('**/CloseBtn_Rllvr'))
rolloverSound = DirectGuiGlobals.getDefaultRolloverSound()
if rolloverSound:
NametagGlobals.setRolloverSound(rolloverSound)
clickSound = DirectGuiGlobals.getDefaultClickSound()
if clickSound:
NametagGlobals.setClickSound(clickSound)
NametagGlobals.setToon(self.cam)
self.marginManager = MarginManager()
self.margins = self.aspect2d.attachNewNode(self.marginManager, DirectGuiGlobals.MIDGROUND_SORT_INDEX + 1)
mm = self.marginManager
self.leftCells = [mm.addGridCell(0, 1, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop), mm.addGridCell(0, 2, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop), mm.addGridCell(0, 3, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop)]
self.bottomCells = [mm.addGridCell(0.5, 0, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop),
mm.addGridCell(1.5, 0, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop),
mm.addGridCell(2.5, 0, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop),
mm.addGridCell(3.5, 0, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop),
mm.addGridCell(4.5, 0, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop)]
self.rightCells = [mm.addGridCell(5, 2, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop), mm.addGridCell(5, 1, base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop)]
def setCellsAvailable(self, cell_list, available):
for cell in cell_list:
self.marginManager.setCellAvailable(cell, available)
def cleanupDownloadWatcher(self):
self.downloadWatcher.cleanup()
self.downloadWatcher = None
return
def startShow(self, cr, launcherServer = None):
self.cr = cr
base.graphicsEngine.renderFrame()
self.downloadWatcher = ToontownDownloadWatcher.ToontownDownloadWatcher(TTLocalizer.LauncherPhaseNames)
if launcher.isDownloadComplete():
self.cleanupDownloadWatcher()
else:
self.acceptOnce('launcherAllPhasesComplete', self.cleanupDownloadWatcher)
gameServer = ConfigVariableString('game-server', '').value
if gameServer:
self.notify.info('Using game-server from Configrc: %s ' % gameServer)
elif launcherServer:
gameServer = launcherServer
self.notify.info('Using gameServer from launcher: %s ' % gameServer)
else:
gameServer = '127.0.0.1'
serverPort = ConfigVariableInt('server-port', 7198).value
serverList = []
for name in gameServer.split(';'):
url = URLSpec(name, 1)
if ConfigVariableBool('server-want-ssl', False).value:
url.setScheme('s')
if not url.hasPort():
url.setPort(serverPort)
serverList.append(url)
if len(serverList) == 1:
failover = ConfigVariableString('server-failover', '').value
serverURL = serverList[0]
for arg in failover.split():
try:
port = int(arg)
url = URLSpec(serverURL)
url.setPort(port)
except:
url = URLSpec(arg, 1)
if url != serverURL:
serverList.append(url)
cr.loginFSM.request('connect', [serverList])
self.ttAccess = ToontownAccess.ToontownAccess()
def removeGlitchMessage(self):
self.ignore('InputState-forward')
print('ignoring InputState-forward')
def exitShow(self, errorCode = None):
self.notify.info('Exiting Toontown: errorCode = %s' % errorCode)
if errorCode:
launcher.setPandaErrorCode(errorCode)
else:
launcher.setPandaErrorCode(0)
sys.exit()
def setExitErrorCode(self, code):
self.exitErrorCode = code
if os.name == 'nt':
exitCode2exitPage = {OTPLauncherGlobals.ExitEnableChat: 'chat',
OTPLauncherGlobals.ExitSetParentPassword: 'setparentpassword',
OTPLauncherGlobals.ExitPurchase: 'purchase'}
if code in exitCode2exitPage:
launcher.setRegistry('EXIT_PAGE', exitCode2exitPage[code])
def getExitErrorCode(self):
return self.exitErrorCode
def userExit(self):
try:
self.localAvatar.d_setAnimState('TeleportOut', 1)
except:
pass
if self.cr.timeManager:
self.cr.timeManager.setDisconnectReason(ToontownGlobals.DisconnectCloseWindow)
base.cr._userLoggingOut = False
try:
localAvatar
except:
pass
else:
messenger.send('clientLogout')
self.cr.dumpAllSubShardObjects()
self.cr.loginFSM.request('shutdown')
self.notify.warning('Could not request shutdown; exiting anyway.')
self.exitShow()
def panda3dRenderError(self):
launcher.setPandaErrorCode(14)
if self.cr.timeManager:
self.cr.timeManager.setDisconnectReason(ToontownGlobals.DisconnectGraphicsError)
self.cr.sendDisconnect()
sys.exit()
def getShardPopLimits(self):
if self.cr.productName == 'JP':
return (ConfigVariableInt('shard-low-pop', ToontownGlobals.LOW_POP_JP).value, ConfigVariableInt('shard-mid-pop', ToontownGlobals.MID_POP_JP).value, ConfigVariableInt('shard-high-pop', ToontownGlobals.HIGH_POP_JP).value)
elif self.cr.productName in ['BR', 'FR']:
return (ConfigVariableInt('shard-low-pop', ToontownGlobals.LOW_POP_INTL).value, ConfigVariableInt('shard-mid-pop', ToontownGlobals.MID_POP_INTL).value, ConfigVariableInt('shard-high-pop', ToontownGlobals.HIGH_POP_INTL).value)
else:
return (ConfigVariableInt('shard-low-pop', ToontownGlobals.LOW_POP).value, ConfigVariableInt('shard-mid-pop', ToontownGlobals.MID_POP).value, ConfigVariableInt('shard-high-pop', ToontownGlobals.HIGH_POP).value)
def playMusic(self, music, looping = 0, interrupt = 1, volume = None, time = 0.0):
OTPBase.OTPBase.playMusic(self, music, looping, interrupt, volume, time)
| 49.130653 | 265 | 0.661808 |
2f48c921f7cf9a5b9ce739e512d4df017919744b | 5,766 | py | Python | src/aleph/storage.py | olethanh/pyaleph | 3a64fb89c54e74187679872d559fb00be0dfde43 | [
"MIT"
] | null | null | null | src/aleph/storage.py | olethanh/pyaleph | 3a64fb89c54e74187679872d559fb00be0dfde43 | [
"MIT"
] | null | null | null | src/aleph/storage.py | olethanh/pyaleph | 3a64fb89c54e74187679872d559fb00be0dfde43 | [
"MIT"
] | null | null | null | """ Storage module for Aleph.
Basically manages the IPFS storage.
"""
import asyncio
import json
import logging
from hashlib import sha256
from aleph.services.filestore import get_value, set_value
from aleph.services.ipfs.storage import add_bytes as add_ipfs_bytes
from aleph.services.ipfs.storage import add_file as ipfs_add_file
from aleph.services.ipfs.storage import get_ipfs_content
from aleph.services.ipfs.storage import pin_add as ipfs_pin_add
from aleph.services.p2p.http import request_hash as p2p_http_request_hash
from aleph.services.p2p.protocol import request_hash as p2p_protocol_request_hash
from aleph.utils import run_in_executor
from aleph.web import app
LOGGER = logging.getLogger("STORAGE")
async def json_async_loads(s):
"""Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance
containing a JSON document) to a Python object in an asynchronous executor."""
return await run_in_executor(None, json.loads, s)
async def get_message_content(message):
item_type = message.get('item_type', 'ipfs')
if item_type in ('ipfs', 'storage'):
return await get_json(message['item_hash'], engine=item_type)
elif item_type == 'inline':
try:
item_content = await json_async_loads(message['item_content'])
except (json.JSONDecodeError, KeyError):
LOGGER.exception("Can't decode JSON")
return -1, 0 # never retry, bogus data
return item_content, len(message['item_content'])
else:
LOGGER.exception("Unknown item type: %s", item_type)
return None, 0 # unknown, could retry later? shouldn't have arrived this far though.
def get_sha256(content):
if isinstance(content, str):
content = content.encode('utf-8')
return sha256(content).hexdigest()
async def get_hash_content(hash, engine='ipfs', timeout=2,
tries=1, use_network=True, use_ipfs=True,
store_value=True):
# TODO: determine which storage engine to use
ipfs_enabled = app['config'].ipfs.enabled.value
enabled_clients = app['config'].p2p.clients.value
# content = await loop.run_in_executor(None, get_value, hash)
content = await get_value(hash)
if content is None:
if use_network:
if 'protocol' in enabled_clients:
content = await p2p_protocol_request_hash(hash)
if 'http' in enabled_clients and content is None:
content = await p2p_http_request_hash(hash, timeout=timeout)
if content is not None:
if engine == 'ipfs' and ipfs_enabled:
# TODO: get a better way to compare hashes (without depending on IPFS daemon)
try:
compared_hash = await add_ipfs_bytes(content)
if compared_hash != hash:
LOGGER.warning(f"Got a bad hash! {hash}/{compared_hash}")
content = -1
except asyncio.TimeoutError:
LOGGER.warning(f"Can't verify hash {hash}")
content = None
elif engine == 'storage':
compared_hash = await run_in_executor(None, get_sha256, content)
# compared_hash = sha256(content.encode('utf-8')).hexdigest()
if compared_hash != hash:
LOGGER.warning(f"Got a bad hash! {hash}/{compared_hash}")
content = -1
if content is None:
if ipfs_enabled and engine == 'ipfs' and use_ipfs:
content = await get_ipfs_content(hash,
timeout=timeout, tries=tries)
else:
LOGGER.info(f"Got content from p2p {hash}")
if content is not None and content != -1 and store_value:
LOGGER.debug(f"Storing content for{hash}")
await set_value(hash, content)
else:
LOGGER.debug(f"Using stored content for {hash}")
return content
async def get_json(hash, engine='ipfs', timeout=2, tries=1):
content = await get_hash_content(hash, engine=engine,
timeout=timeout, tries=tries)
size = 0
if content is not None and content != -1:
size = len(content)
try:
content = await json_async_loads(content)
except json.JSONDecodeError:
LOGGER.exception("Can't decode JSON")
content = -1 # never retry, bogus data
return content, size
async def pin_hash(chash, timeout=2, tries=1):
return await ipfs_pin_add(chash, timeout=timeout, tries=tries)
async def add_json(value, engine='ipfs'):
# TODO: determine which storage engine to use
content = await run_in_executor(None, json.dumps, value)
content = content.encode('utf-8')
if engine == 'ipfs':
chash = await add_ipfs_bytes(content)
elif engine == 'storage':
if isinstance(content, str):
content = content.encode('utf-8')
chash = sha256(content).hexdigest()
else:
raise NotImplementedError('storage engine %s not supported' % engine)
await set_value(chash, content)
return chash
async def add_file(fileobject, filename=None, engine='ipfs'):
file_hash = None
file_content = None
if engine == 'ipfs':
output = await ipfs_add_file(fileobject, filename)
file_hash = output['Hash']
fileobject.seek(0)
file_content = fileobject.read()
elif engine == 'storage':
file_content = fileobject.read()
file_hash = sha256(file_content).hexdigest()
await set_value(file_hash, file_content)
return file_hash
| 38.697987 | 93 | 0.627818 |
845dcc1b9f7a5b67bbab895ab652b7e57ffb4f24 | 10,252 | py | Python | qa/rpc-tests/listtransactions.py | cryptoanzu/anzucoin | 43417ed5e96be249745f1a93e20be18ccb46da2b | [
"MIT"
] | null | null | null | qa/rpc-tests/listtransactions.py | cryptoanzu/anzucoin | 43417ed5e96be249745f1a93e20be18ccb46da2b | [
"MIT"
] | null | null | null | qa/rpc-tests/listtransactions.py | cryptoanzu/anzucoin | 43417ed5e96be249745f1a93e20be18ccb46da2b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
#Anzucoin: Disabled RBF
#self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| 49.76699 | 113 | 0.598712 |
c325b644e2f9a594a880516d59d97b1bd653a303 | 2,165 | py | Python | plansys2_executor/launch/executor_launch.py | RTI-BDI/ros2_planning_system | d115b6950ff6fb87ae728af930456f50736b9d8d | [
"Apache-2.0"
] | null | null | null | plansys2_executor/launch/executor_launch.py | RTI-BDI/ros2_planning_system | d115b6950ff6fb87ae728af930456f50736b9d8d | [
"Apache-2.0"
] | null | null | null | plansys2_executor/launch/executor_launch.py | RTI-BDI/ros2_planning_system | d115b6950ff6fb87ae728af930456f50736b9d8d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Intelligent Robotics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
namespace = LaunchConfiguration('namespace')
params_file = LaunchConfiguration('params_file')
default_action_bt_xml_filename = LaunchConfiguration('default_action_bt_xml_filename')
declare_namespace_cmd = DeclareLaunchArgument(
'namespace',
default_value='',
description='Namespace')
declare_default_bt_file_cmd = DeclareLaunchArgument(
'default_action_bt_xml_filename',
default_value=os.path.join(
get_package_share_directory('plansys2_executor'),
'behavior_trees', 'plansys2_action_bt.xml'),
description='BT representing a PDDL action')
# Specify the actions
executor_cmd = Node(
package='plansys2_executor',
executable='executor_node',
name='executor',
namespace=namespace,
output='screen',
parameters=[
{
'default_action_bt_xml_filename': default_action_bt_xml_filename,
'enable_groot_monitoring': False
},
params_file
])
# Create the launch description and populate
ld = LaunchDescription()
ld.add_action(declare_namespace_cmd)
ld.add_action(declare_default_bt_file_cmd)
# Declare the launch options
ld.add_action(executor_cmd)
return ld
| 32.313433 | 90 | 0.72933 |
fc0bd17b63a3c5351244954121624bc88330649e | 253 | py | Python | poop/hfdp/command/remote/ceiling_fan_on_command.py | cassiobotaro/poop | fc218fbf638c50da8ea98dab7de26ad2a52e83f5 | [
"MIT"
] | 37 | 2020-12-27T00:13:07.000Z | 2022-01-31T19:30:18.000Z | poop/hfdp/command/remote/ceiling_fan_on_command.py | cassiobotaro/poop | fc218fbf638c50da8ea98dab7de26ad2a52e83f5 | [
"MIT"
] | null | null | null | poop/hfdp/command/remote/ceiling_fan_on_command.py | cassiobotaro/poop | fc218fbf638c50da8ea98dab7de26ad2a52e83f5 | [
"MIT"
] | 7 | 2020-12-26T22:33:47.000Z | 2021-11-07T01:29:59.000Z | from poop.hfdp.command.remote.ceiling_fan import CeilingFan
class CeilingFanOnCommand:
def __init__(self, ceiling_fan: CeilingFan) -> None:
self.__ceiling_fan = ceiling_fan
def execute(self) -> None:
self.__ceiling_fan.high()
| 25.3 | 59 | 0.72332 |
df07738b26b2596ea3313666f8a725139de65fbc | 240 | py | Python | Python/Algorithms/1337.py | DimitrisJim/leetcode_solutions | 765ea578748f8c9b21243dec9dc8a16163e85c0c | [
"Unlicense"
] | 2 | 2021-01-15T17:22:54.000Z | 2021-05-16T19:58:02.000Z | Python/Algorithms/1337.py | DimitrisJim/leetcode_solutions | 765ea578748f8c9b21243dec9dc8a16163e85c0c | [
"Unlicense"
] | null | null | null | Python/Algorithms/1337.py | DimitrisJim/leetcode_solutions | 765ea578748f8c9b21243dec9dc8a16163e85c0c | [
"Unlicense"
] | null | null | null | class Solution:
def kWeakestRows(self, mat, k: int):
# sort by sum of rows and return row indices as attached by enumerate
return [i for i, _ in sorted(
enumerate(mat), key=lambda x: sum(x[1]))[:k]
]
| 34.285714 | 77 | 0.591667 |
748dff34986e341b263f3691ce1d683672bcc274 | 143 | py | Python | knx_stack/decode/usb_hid/report_body/usb_protocol_header/bus_access_server_feature/device_feature_info/__init__.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 2 | 2021-07-28T07:42:28.000Z | 2022-01-25T18:56:05.000Z | knx_stack/decode/usb_hid/report_body/usb_protocol_header/bus_access_server_feature/device_feature_info/__init__.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 6 | 2021-07-25T21:36:01.000Z | 2022-02-20T21:11:31.000Z | knx_stack/decode/usb_hid/report_body/usb_protocol_header/bus_access_server_feature/device_feature_info/__init__.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | null | null | null | from knx_stack.decode.usb_hid.report_body.usb_protocol_header.bus_access_server_feature.device_feature_info import (
feature_identifier,
)
| 35.75 | 116 | 0.867133 |
2a4b2d11890e0ca8a2058262372e8fafa5545832 | 18,567 | py | Python | armin_analysis/fit_integrator_model.py | arminbahl/mutant_zebrafish_behavior | 17bee04b35c23b0f93fcecac9758e6ba19872be1 | [
"MIT"
] | null | null | null | armin_analysis/fit_integrator_model.py | arminbahl/mutant_zebrafish_behavior | 17bee04b35c23b0f93fcecac9758e6ba19872be1 | [
"MIT"
] | null | null | null | armin_analysis/fit_integrator_model.py | arminbahl/mutant_zebrafish_behavior | 17bee04b35c23b0f93fcecac9758e6ba19872be1 | [
"MIT"
] | null | null | null | from pathlib import Path
import numpy as np
import autograd.numpy as anp
from numba import jit
import random
import pandas as pd
from get_fish_info import get_fish_info
from pymoo.model.problem import Problem
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.factory import get_sampling, get_crossover, get_mutation
from pymoo.factory import get_termination
from pymoo.optimize import minimize
import argparse
from sklearn.metrics import mean_squared_log_error
# Initialize the buffers
dt = 0.01
ts = np.arange(0, 30, dt)
xs = np.empty_like(ts)
all_data = np.empty((10000000, 10)) # Max allow 10 million bouts per simulation run
@jit(nopython=True)
def leaky_integrator_model1(dt, ts, xs, all_data, tau, noise_sigma, T, bout_clock_probability_below_threshold, bout_clock_probability_above_threshold):
bout_counter = 0
for fish_ID in range(8):
for trial in range(300):
for stim in range(4):
xs[0] = 0
previous_bout_time = -1
previous_heading_angle_change = -1
for i in range(1, len(ts)):
if 10 <= ts[i] <= 20:
coherence = [0, 0.25, 0.5, 1][stim]
else:
coherence = 0
dx = random.gauss(coherence, noise_sigma) - xs[i - 1]
xs[i] = xs[i - 1] + dx * dt / tau
if previous_bout_time != -1 and ts[i] - previous_bout_time <= 0.2:
continue
heading_angle_change = 0
if xs[i] > T:
if random.random() < bout_clock_probability_above_threshold:
heading_angle_change = random.gauss(30, 1)
elif xs[i] < -T:
if random.random() < bout_clock_probability_above_threshold:
heading_angle_change = random.gauss(-30, 1)
elif random.random() < bout_clock_probability_below_threshold:
if xs[i] < 0:
heading_angle_change = random.gauss(-30, 1)
else:
heading_angle_change = random.gauss(30, 1)
if heading_angle_change != 0:
if previous_bout_time != -1:
all_data[bout_counter, 0] = fish_ID
all_data[bout_counter, 1] = 0 # Genotype
all_data[bout_counter, 2] = trial
all_data[bout_counter, 3] = stim
all_data[bout_counter, 4] = ts[i]
all_data[bout_counter, 5] = 0
all_data[bout_counter, 6] = 0
all_data[bout_counter, 7] = ts[i] - previous_bout_time
all_data[bout_counter, 8] = heading_angle_change
all_data[bout_counter, 9] = np.sign(heading_angle_change) == np.sign(previous_heading_angle_change)
bout_counter += 1
if bout_counter >= all_data.shape[0]: # Array is full, must stop (this should not happen)
return bout_counter
previous_bout_time = ts[i]
previous_heading_angle_change = heading_angle_change
return bout_counter
@jit(nopython=True)
def leaky_integrator_model2(dt, ts, xs, all_data, tau, noise_sigma, T, bout_clock_probability_below_threshold, bout_clock_probability_above_threshold):
bout_counter = 0
for fish_ID in range(8):
for trial in range(300):
for stim in range(4):
xs[0] = 0
previous_bout_time = -1
previous_heading_angle_change = -1
for i in range(1, len(ts)):
if 10 <= ts[i] <= 20:
coherence = [0, 0.25, 0.5, 1][stim]
else:
coherence = 0
dx = random.gauss(coherence, noise_sigma) - xs[i - 1]
xs[i] = xs[i - 1] + dx * dt / tau
if previous_bout_time != -1 and ts[i] - previous_bout_time <= 0.2:
continue
heading_angle_change = 0
if xs[i] > T:
if random.random() < bout_clock_probability_above_threshold:
heading_angle_change = random.gauss(22, 25)
elif xs[i] < -T:
if random.random() < bout_clock_probability_above_threshold:
heading_angle_change = random.gauss(-22, 25)
elif random.random() < bout_clock_probability_below_threshold:
heading_angle_change = random.gauss(0, 5)
if heading_angle_change != 0:
if previous_bout_time != -1:
all_data[bout_counter, 0] = fish_ID
all_data[bout_counter, 1] = 0
all_data[bout_counter, 2] = trial
all_data[bout_counter, 3] = stim
all_data[bout_counter, 4] = ts[i]
all_data[bout_counter, 5] = 0
all_data[bout_counter, 6] = 0
all_data[bout_counter, 7] = ts[i] - previous_bout_time
all_data[bout_counter, 8] = heading_angle_change
all_data[bout_counter, 9] = np.sign(heading_angle_change) == np.sign(previous_heading_angle_change)
bout_counter += 1
if bout_counter >= all_data.shape[0]: # Array is full, must stop (this should not happen)
return bout_counter
previous_bout_time = ts[i]
previous_heading_angle_change = heading_angle_change
return bout_counter
def get_target_result(hdf5_path, genotype):
df_extracted_features = pd.read_hdf(hdf5_path, key="extracted_features")
df_extracted_binned_features = pd.read_hdf(hdf5_path, key="extracted_binned_features")
df_extracted_binned_features_same_direction = pd.read_hdf(hdf5_path, key="extracted_binned_features_same_direction")
df_extracted_binned_features_heading_angle_change_histograms = pd.read_hdf(hdf5_path, key="extracted_binned_features_heading_angle_change_histograms")
df_extracted_binned_features_inter_bout_interval_histograms = pd.read_hdf(hdf5_path, key="extracted_binned_features_inter_bout_interval_histograms")
df_gmm_fitting_results = pd.read_hdf(hdf5_path, key="gmm_fitting_results")
return df_extracted_features.query("genotype == @genotype").groupby("stim").mean()["correctness"], \
df_extracted_features.query("genotype == @genotype").groupby("stim").mean()["inter_bout_interval"], \
df_extracted_binned_features.query("genotype == @genotype").groupby(["stim", "bin"]).mean()["correctness"], \
df_extracted_binned_features_same_direction.query("genotype == @genotype").groupby(["bin"]).mean()["same_direction"], \
df_extracted_binned_features_heading_angle_change_histograms.query("genotype == @genotype").groupby(["stim", "bin"]).mean()["probability"], \
df_extracted_binned_features_inter_bout_interval_histograms.query("genotype == @genotype").groupby(["stim", "bin"]).mean()["probability"], \
df_gmm_fitting_results
def get_model_result(x):
tau = x[0]
noise_sigma = x[1]
T = x[2]
bout_clock_probability_below_threshold = x[3]
bout_clock_probability_above_threshold = x[4]
bout_counter = leaky_integrator_model2(dt, ts, xs, all_data,
tau,
noise_sigma,
T,
bout_clock_probability_below_threshold,
bout_clock_probability_above_threshold)
df = pd.DataFrame(all_data[:bout_counter - 1],
columns=["fish_ID",
"genotype",
"trial",
"stim",
"bout_time",
"bout_x",
"bout_y",
"inter_bout_interval",
"heading_angle_change",
"same_as_previous"]).astype(dtype={"trial": "int64",
"stim": "int64",
"same_as_previous": "bool"}, copy=False)
df.set_index(['fish_ID', "genotype", 'trial', 'stim'], inplace=True)
df.sort_index(inplace=True)
df_extracted_features, \
df_extracted_binned_features, \
df_extracted_binned_features_same_direction, \
df_extracted_binned_features_heading_angle_change_histograms, \
df_extracted_binned_features_inter_bout_interval_histograms, \
df_gmm_fitting_results = get_fish_info(df)
return df_extracted_features.groupby("stim").mean()["correctness"], \
df_extracted_features.groupby("stim").mean()["inter_bout_interval"], \
df_extracted_binned_features.groupby(["stim", "bin"]).mean()["correctness"], \
df_extracted_binned_features_same_direction.groupby(["bin"]).mean()["same_direction"], \
df_extracted_binned_features_heading_angle_change_histograms.groupby(["stim", "bin"]).mean()["probability"], \
df_extracted_binned_features_inter_bout_interval_histograms.groupby(["stim", "bin"]).mean()["probability"], \
df_gmm_fitting_results
class MyProblem(Problem):
def __init__(self, root_path, target_genotype,
**kwargs):
super().__init__(n_var=5,
n_obj=5,
n_constr=0,
xl=anp.array([ 0.1, 0, 0.1, 0.001, 0.001]),
xu=anp.array([ 15, 100, 5, 0.05, 0.05]),
elementwise_evaluation=True,
**kwargs)
self.target_df_correctness_as_function_of_coherence, \
self.target_df_inter_bout_interval_as_function_of_coherence, \
self.target_df_binned_correctness, \
self.target_df_binned_same_direction, \
self.target_df_binned_features_heading_angle_change_histograms, \
self.target_df_binned_features_inter_bout_interval_histograms, \
self.target_df_gmm_fitting_results = get_target_result(root_path / "all_data.h5", target_genotype)
#def compute_error(self, vals, target):
# return np.mean(((vals - target) / target)**2)
def MSLE(self, y_true, y_pred):
"""Same as sklearn.metrics import mean_squared_log_error but should work with nans.
Note that it cannot deal with negative values."""
n = len(y_true)
return np.nanmean([(np.log(y_pred[i] + 1) -
np.log(y_true[i] + 1)) ** 2.0 for i in range(n)])
def _evaluate(self, x, out, *args, **kwargs):
model_df_correctness_as_function_of_coherence, \
model_df_inter_bout_interval_as_function_of_coherence, \
model_df_binned_correctness, \
model_df_binned_same_direction, \
model_df_binned_features_heading_angle_change_histograms, \
model_df_binned_features_inter_bout_interval_histograms, \
model_df_gmm_fitting_results = get_model_result(x)
# # Calculate the errors
# e0 = ((model_df_correctness_as_function_of_coherence - self.target_df_correctness_as_function_of_coherence) ** 2).sum()
# e1 = ((model_df_inter_bout_interval_as_function_of_coherence - self.target_df_inter_bout_interval_as_function_of_coherence) ** 2).sum()
# e2 = ((model_df_binned_correctness.loc[1] - self.target_df_binned_correctness.loc[1]) ** 2).sum() + \
# ((model_df_binned_correctness.loc[2] - self.target_df_binned_correctness.loc[2]) ** 2).sum() + \
# ((model_df_binned_correctness.loc[3] - self.target_df_binned_correctness.loc[3]) ** 2).sum()
# e3 = ((model_df_binned_same_direction - self.target_df_binned_same_direction) ** 2).sum()
# Compute Error between estimated weights for the histograms
#e4 = ((model_df_gmm_fitting_results["w_left"] - self.target_df_gmm_fitting_results["w_left"]) ** 2).sum() + \
# ((model_df_gmm_fitting_results["w_center"] - self.target_df_gmm_fitting_results["w_center"]) ** 2).sum() + \
# ((model_df_gmm_fitting_results["w_right"] - self.target_df_gmm_fitting_results["w_right"]) ** 2).sum()
# 18. Mai 2021
# Reviewer comment: Compute the errors in a different way, using the mean squared log error
e0 = self.MSLE(model_df_correctness_as_function_of_coherence.values,
self.target_df_correctness_as_function_of_coherence.values)
e1 = self.MSLE(model_df_inter_bout_interval_as_function_of_coherence.values,
self.target_df_inter_bout_interval_as_function_of_coherence.values)
e2 = self.MSLE(model_df_binned_correctness.loc[1].values, self.target_df_binned_correctness.loc[1].values) + \
self.MSLE(model_df_binned_correctness.loc[2].values, self.target_df_binned_correctness.loc[2].values) + \
self.MSLE(model_df_binned_correctness.loc[3].values, self.target_df_binned_correctness.loc[3].values)
e3 = self.MSLE(model_df_binned_same_direction.values, self.target_df_binned_same_direction.values)
# Keep squared distance here
e4 = ((model_df_gmm_fitting_results["w_left"] - self.target_df_gmm_fitting_results["w_left"]) ** 2).sum() + \
((model_df_gmm_fitting_results["w_center"] - self.target_df_gmm_fitting_results["w_center"]) ** 2).sum() + \
((model_df_gmm_fitting_results["w_right"] - self.target_df_gmm_fitting_results["w_right"]) ** 2).sum()
#Save all error values
#out["F"] = [e0+e1+e2+e3+e4+e5+e6]
out["F"] = [e0, e1, e2, e3, e4]#, e2, e3, e4, e5, e6]
# e0 = self.compute_error(model_df_correctness_as_function_of_coherence, self.target_df_correctness_as_function_of_coherence)
# e1 = self.compute_error(model_df_inter_bout_interval_as_function_of_coherence, self.target_df_inter_bout_interval_as_function_of_coherence)
# e2 = self.compute_error(model_df_binned_correctness.loc[1], self.target_df_binned_correctness.loc[1])*0.333 + \
# self.compute_error(model_df_binned_correctness.loc[2], self.target_df_binned_correctness.loc[2])*0.333 + \
# self.compute_error(model_df_binned_correctness.loc[3], self.target_df_binned_correctness.loc[3])*0.333
# e3 = self.compute_error(model_df_binned_same_direction, self.target_df_binned_same_direction)
# e4 = self.compute_error(model_df_gmm_fitting_results["w_left"], self.target_df_gmm_fitting_results["w_left"])*0.333 + \
# self.compute_error(model_df_gmm_fitting_results["w_center"], self.target_df_gmm_fitting_results["w_center"])*0.333 + \
# self.compute_error(model_df_gmm_fitting_results["w_right"], self.target_df_gmm_fitting_results["w_right"])*0.333
#
# out["F"] = [e0 + e1 + e2 + e3 + e4]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Fits a behavioral model to experimental data.')
#parser.add_argument('-rp', '--root_path', type=str, help='Path to experimental folder containing the all_data.h5 file.', required=True)
parser.add_argument('-tg', '--target_genotype', type=str, help='The target genotype within the experimental data.', required=True)
parser.add_argument('-exp', '--experiment_i', type=int, help='Experiment index.', required=True)
parser.add_argument('-rs', '--random_seed', type=int, help='Random seed of the optimization algorithm.', required=True)
args = parser.parse_args()
experiment_i = args.experiment_i
target_genotype = args.target_genotype
random_seed = args.random_seed
experiment_names = ["surrogate_fish1",
"surrogate_fish2",
"surrogate_fish3",
"scn1lab_sa16474", # measured before corona lock down
"scn1lab_NIBR", # measured before corona lock down
"scn1lab_NIBR_20200708", # measured after corona lock down
"scn1lab_zirc_20200710", # measured after corona lock down
"immp2l_summer", # membrane transporter in the mitochondirum
"immp2l_NIBR",
"disc1_hetinx",
"chrna2a"] # not so important
root_path = Path("/n/home10/abahl/engert_storage_armin/ariel_paper/free_swimming_behavior_data/dot_motion_coherence") / experiment_names[experiment_i]
print(f"Starting. target_genotype: {target_genotype}; optimization_repeat: {random_seed}, root_path: {root_path}")
problem = MyProblem(root_path, target_genotype, parallelization=("threads", 51))
algorithm = NSGA2(
pop_size=51*8*2,
n_offsprings=51*8,
sampling=get_sampling("real_random"),
crossover=get_crossover("real_sbx", prob=0.9, eta=15),
mutation=get_mutation("real_pm", eta=20),
eliminate_duplicates=True)
termination = get_termination("n_gen", 80)
res = minimize(problem,
algorithm,
termination,
seed=random_seed,
pf=problem.pareto_front(use_cache=False),
save_history=True,
verbose=True)
# Collect the population in each generation
pop_each_gen = [a.pop for a in res.history]
F_each_gen = [pop.get("F") for pop in pop_each_gen]
X_each_gen = [pop.get("X") for pop in pop_each_gen]
print("Done...")
print("obj_each_gen", np.array(F_each_gen).shape)
print("X_each_gen", np.array(X_each_gen).shape)
# Save optimized values
np.save(root_path / f"review1_leaky_integrator_model2_X_{target_genotype}_{random_seed}.npy", np.array(X_each_gen))
np.save(root_path / f"review1_leaky_integrator_model2_F_{target_genotype}_{random_seed}.npy", np.array(F_each_gen))
| 50.591281 | 154 | 0.611084 |
d93c0ac401326d46617e072b1f8ae803dfb33b0a | 37,137 | py | Python | _texttable.py | zxx1819/- | 1c04ce5f6b8bdd0ccf592ad02b50b8f901c28cbb | [
"MIT"
] | null | null | null | _texttable.py | zxx1819/- | 1c04ce5f6b8bdd0ccf592ad02b50b8f901c28cbb | [
"MIT"
] | null | null | null | _texttable.py | zxx1819/- | 1c04ce5f6b8bdd0ccf592ad02b50b8f901c28cbb | [
"MIT"
] | null | null | null | """
Google's clitable.py is inherently integrated to Linux:
This is a workaround for that (basically include modified clitable code without anything
that is Linux-specific).
_clitable.py is identical to Google's as of 2017-12-17
_texttable.py is identical to Google's as of 2017-12-17
_terminal.py is a highly stripped down version of Google's such that clitable.py works
https://github.com/google/textfsm/blob/master/clitable.py
A module to represent and manipulate tabular text data.
A table of rows, indexed on row number. Each row is a ordered dictionary of row
elements that maintains knowledge of the parent table and column headings.
Tables can be created from CSV input and in-turn supports a number of display
formats such as CSV and variable sized and justified rows.
"""
# Some of this code is from Google with the following license:
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from functools import cmp_to_key
import textwrap
# pylint: disable=redefined-builtin
from six.moves import range
from _textfsm import _terminal as terminal
class Error(Exception):
"""Base class for errors."""
class TableError(Error):
"""Error in TextTable."""
class Row(dict):
"""Represents a table row. We implement this as an ordered dictionary.
The order is the chronological order of data insertion. Methods are supplied
to make it behave like a regular dict() and list().
Attributes:
row: int, the row number in the container table. 0 is the header row.
table: A TextTable(), the associated container table.
"""
def __init__(self, *args, **kwargs):
super(Row, self).__init__(*args, **kwargs)
self._keys = list()
self._values = list()
self.row = None
self.table = None
self._color = None
self._index = {}
def _BuildIndex(self):
"""Recreate the key index."""
self._index = {}
for i, k in enumerate(self._keys):
self._index[k] = i
def __getitem__(self, column):
"""Support for [] notation.
Args:
column: Tuple of column names, or a (str) column name, or positional
column number, 0-indexed.
Returns:
A list or string with column value(s).
Raises:
IndexError: The given column(s) were not found.
"""
if isinstance(column, (list, tuple)):
ret = []
for col in column:
ret.append(self[col])
return ret
try:
return self._values[self._index[column]]
except (KeyError, TypeError, ValueError):
pass
# Perhaps we have a range like '1', ':-1' or '1:'.
try:
return self._values[column]
except (IndexError, TypeError):
pass
raise IndexError('No such column "%s" in row.' % column)
def __contains__(self, value):
return value in self._values
def __setitem__(self, column, value):
for i in range(len(self)):
if self._keys[i] == column:
self._values[i] = value
return
# No column found, add a new one.
self._keys.append(column)
self._values.append(value)
self._BuildIndex()
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._keys)
def __str__(self):
ret = ""
for v in self._values:
ret += "%12s " % v
ret += "\n"
return ret
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, str(self))
def get(self, column, default_value=None):
"""Get an item from the Row by column name.
Args:
column: Tuple of column names, or a (str) column name, or positional
column number, 0-indexed.
default_value: The value to use if the key is not found.
Returns:
A list or string with column value(s) or default_value if not found.
"""
if isinstance(column, (list, tuple)):
ret = []
for col in column:
ret.append(self.get(col, default_value))
return ret
# Perhaps we have a range like '1', ':-1' or '1:'.
try:
return self._values[column]
except (IndexError, TypeError):
pass
try:
return self[column]
except IndexError:
return default_value
def index(self, column): # pylint: disable=C6409
"""Fetches the column number (0 indexed).
Args:
column: A string, column to fetch the index of.
Returns:
An int, the row index number.
Raises:
ValueError: The specified column was not found.
"""
for i, key in enumerate(self._keys):
if key == column:
return i
raise ValueError('Column "%s" not found.' % column)
def iterkeys(self):
return iter(self._keys)
def items(self):
# TODO(harro): self.get(k) should work here but didn't ?
return [(k, self.__getitem__(k)) for k in self._keys]
def _GetValues(self):
"""Return the row's values."""
return self._values
def _GetHeader(self):
"""Return the row's header."""
return self._keys
def _SetHeader(self, values):
"""Set the row's header from a list."""
if self._values and len(values) != len(self._values):
raise ValueError("Header values not equal to existing data width.")
if not self._values:
for _ in range(len(values)):
self._values.append(None)
self._keys = list(values)
self._BuildIndex()
def _SetColour(self, value_list):
"""Sets row's colour attributes to a list of values in terminal.SGR."""
if value_list is None:
self._color = None
return
colors = []
for color in value_list:
if color in terminal.SGR:
colors.append(color)
elif color in terminal.FG_COLOR_WORDS:
colors += terminal.FG_COLOR_WORDS[color]
elif color in terminal.BG_COLOR_WORDS:
colors += terminal.BG_COLOR_WORDS[color]
else:
raise ValueError("Invalid colour specification.")
self._color = list(set(colors))
def _GetColour(self):
if self._color is None:
return None
return list(self._color)
def _SetValues(self, values):
"""Set values from supplied dictionary or list.
Args:
values: A Row, dict indexed by column name, or list.
Raises:
TypeError: Argument is not a list or dict, or list is not equal row
length or dictionary keys don't match.
"""
def _ToStr(value):
"""Convert individul list entries to string."""
if isinstance(value, (list, tuple)):
result = []
for val in value:
result.append(str(val))
return result
else:
return str(value)
# Row with identical header can be copied directly.
if isinstance(values, Row):
if self._keys != values.header:
raise TypeError("Attempt to append row with mismatched header.")
self._values = copy.deepcopy(values.values)
elif isinstance(values, dict):
for key in self._keys:
if key not in values:
raise TypeError("Dictionary key mismatch with row.")
for key in self._keys:
self[key] = _ToStr(values[key])
elif isinstance(values, list) or isinstance(values, tuple):
if len(values) != len(self._values):
raise TypeError("Supplied list length != row length")
for (index, value) in enumerate(values):
self._values[index] = _ToStr(value)
else:
raise TypeError(
"Supplied argument must be Row, dict or list, not %s", type(values)
)
def Insert(self, key, value, row_index):
"""Inserts new values at a specified offset.
Args:
key: string for header value.
value: string for a data value.
row_index: Offset into row for data.
Raises:
IndexError: If the offset is out of bands.
"""
if row_index < 0:
row_index += len(self)
if not 0 <= row_index < len(self):
raise IndexError('Index "%s" is out of bounds.' % row_index)
new_row = Row()
for idx in self.header:
if self.index(idx) == row_index:
new_row[key] = value
new_row[idx] = self[idx]
self._keys = new_row.header
self._values = new_row.values
del new_row
self._BuildIndex()
color = property(_GetColour, _SetColour, doc="Colour spec of this row")
header = property(_GetHeader, _SetHeader, doc="List of row's headers.")
values = property(_GetValues, _SetValues, doc="List of row's values.")
class TextTable(object):
"""Class that provides data methods on a tabular format.
Data is stored as a list of Row() objects. The first row is always present as
the header row.
Attributes:
row_class: class, A class to use for the Row object.
separator: str, field separator when printing table.
"""
def __init__(self, row_class=Row):
"""Initialises a new table.
Args:
row_class: A class to use as the row object. This should be a
subclass of this module's Row() class.
"""
self.row_class = row_class
self.separator = ", "
self.Reset()
def Reset(self):
self._row_index = 1
self._table = [[]]
self._iterator = 0 # While loop row index
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, str(self))
def __str__(self):
"""Displays table with pretty formatting."""
return self.table
def __incr__(self, incr=1):
self._SetRowIndex(self._row_index + incr)
def __contains__(self, name):
"""Whether the given column header name exists."""
return name in self.header
def __getitem__(self, row):
"""Fetches the given row number."""
return self._table[row]
def __iter__(self):
"""Iterator that excludes the header row."""
return self.next()
def next(self):
# Maintain a counter so a row can know what index it is.
# Save the old value to support nested interations.
old_iter = self._iterator
try:
for r in self._table[1:]:
self._iterator = r.row
yield r
finally:
# Recover the original index after loop termination or exit with break.
self._iterator = old_iter
def __add__(self, other):
"""Merges two with identical columns."""
new_table = copy.copy(self)
for row in other:
new_table.Append(row)
return new_table
def __copy__(self):
"""Copy table instance."""
new_table = self.__class__()
# pylint: disable=protected-access
new_table._table = [self.header]
for row in self[1:]:
new_table.Append(row)
return new_table
def Filter(self, function=None):
"""Construct Textable from the rows of which the function returns true.
Args:
function: A function applied to each row which returns a bool. If
function is None, all rows with empty column values are
removed.
Returns:
A new TextTable()
Raises:
TableError: When an invalid row entry is Append()'d
"""
flat = (
lambda x: x if isinstance(x, str) else "".join([flat(y) for y in x])
) # noqa
if function is None:
function = lambda row: bool(flat(row.values)) # noqa
new_table = self.__class__()
# pylint: disable=protected-access
new_table._table = [self.header]
for row in self:
if function(row) is True:
new_table.Append(row)
return new_table
def Map(self, function):
"""Applies the function to every row in the table.
Args:
function: A function applied to each row.
Returns:
A new TextTable()
Raises:
TableError: When transform is not invalid row entry. The transform
must be compatible with Append().
"""
new_table = self.__class__()
# pylint: disable=protected-access
new_table._table = [self.header]
for row in self:
filtered_row = function(row)
if filtered_row:
new_table.Append(filtered_row)
return new_table
# pylint: disable=C6409
# pylint: disable=W0622
def sort(self, cmp=None, key=None, reverse=False):
"""Sorts rows in the texttable.
Args:
cmp: func, non default sort algorithm to use.
key: func, applied to each element before sorting.
reverse: bool, reverse order of sort.
"""
def _DefaultKey(value):
"""Default key func is to create a list of all fields."""
result = []
for key in self.header:
# Try sorting as numerical value if possible.
try:
result.append(float(value[key]))
except ValueError:
result.append(value[key])
return result
key = key or _DefaultKey
# Exclude header by copying table.
new_table = self._table[1:]
if cmp is not None:
key = cmp_to_key(cmp)
new_table.sort(key=key, reverse=reverse)
# Regenerate the table with original header
self._table = [self.header]
self._table.extend(new_table)
# Re-write the 'row' attribute of each row
for index, row in enumerate(self._table):
row.row = index
# pylint: enable=W0622
def extend(self, table, keys=None):
"""Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name.
"""
if keys:
for k in keys:
if k not in self._Header():
raise IndexError("Unknown key: '%s'", k)
extend_with = []
for column in table.header:
if column not in self.header:
extend_with.append(column)
if not extend_with:
return
for column in extend_with:
self.AddColumn(column)
if not keys:
for row1, row2 in zip(self, table):
for column in extend_with:
row1[column] = row2[column]
return
for row1 in self:
for row2 in table:
for k in keys:
if row1[k] != row2[k]:
break
else:
for column in extend_with:
row1[column] = row2[column]
break
# pylint: enable=C6409
def Remove(self, row):
"""Removes a row from the table.
Args:
row: int, the row number to delete. Must be >= 1, as the header
cannot be removed.
Raises:
TableError: Attempt to remove nonexistent or header row.
"""
if row == 0 or row > self.size:
raise TableError("Attempt to remove header row")
new_table = []
# pylint: disable=E1103
for t_row in self._table:
if t_row.row != row:
new_table.append(t_row)
if t_row.row > row:
t_row.row -= 1
self._table = new_table
def _Header(self):
"""Returns the header row."""
return self._table[0]
def _GetRow(self, columns=None):
"""Returns the current row as a tuple."""
row = self._table[self._row_index]
if columns:
result = []
for col in columns:
if col not in self.header:
raise TableError("Column header %s not known in table." % col)
result.append(row[self.header.index(col)])
row = result
return row
def _SetRow(self, new_values, row=0):
"""Sets the current row to new list.
Args:
new_values: List|dict of new values to insert into row.
row: int, Row to insert values into.
Raises:
TableError: If number of new values is not equal to row size.
"""
if not row:
row = self._row_index
if row > self.size:
raise TableError("Entry %s beyond table size %s." % (row, self.size))
self._table[row].values = new_values
def _SetHeader(self, new_values):
"""Sets header of table to the given tuple.
Args:
new_values: Tuple of new header values.
"""
row = self.row_class()
row.row = 0
for v in new_values:
row[v] = v
self._table[0] = row
def _SetRowIndex(self, row):
if not row or row > self.size:
raise TableError("Entry %s beyond table size %s." % (row, self.size))
self._row_index = row
def _GetRowIndex(self):
return self._row_index
def _GetSize(self):
"""Returns number of rows in table."""
if not self._table:
return 0
return len(self._table) - 1
def _GetTable(self):
"""Returns table, with column headers and separators.
Returns:
The whole table including headers as a string. Each row is
joined by a newline and each entry by self.separator.
"""
result = []
# Avoid the global lookup cost on each iteration.
lstr = str
for row in self._table:
result.append("%s\n" % self.separator.join(lstr(v) for v in row))
return "".join(result)
def _SetTable(self, table):
"""Sets table, with column headers and separators."""
if not isinstance(table, TextTable):
raise TypeError("Not an instance of TextTable.")
self.Reset()
self._table = copy.deepcopy(table._table) # pylint: disable=W0212
# Point parent table of each row back ourselves.
for row in self:
row.table = self
def _SmallestColSize(self, text):
"""Finds the largest indivisible word of a string.
...and thus the smallest possible column width that can contain that
word unsplit over rows.
Args:
text: A string of text potentially consisting of words.
Returns:
Integer size of the largest single word in the text.
"""
if not text:
return 0
stripped = terminal.StripAnsiText(text)
return max(len(word) for word in stripped.split())
def _TextJustify(self, text, col_size):
"""Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text.
"""
result = []
if "\n" in text:
for paragraph in text.split("\n"):
result.extend(self._TextJustify(paragraph, col_size))
return result
wrapper = textwrap.TextWrapper(
width=col_size - 2, break_long_words=False, expand_tabs=False
)
try:
text_list = wrapper.wrap(text)
except ValueError:
raise TableError("Field too small (minimum width: 3)")
if not text_list:
return [" " * col_size]
for current_line in text_list:
stripped_len = len(terminal.StripAnsiText(current_line))
ansi_color_adds = len(current_line) - stripped_len
# +2 for white space on either side.
if stripped_len + 2 > col_size:
raise TableError("String contains words that do not fit in column.")
result.append(" %-*s" % (col_size - 1 + ansi_color_adds, current_line))
return result
def FormattedTable(
self,
width=80,
force_display=False,
ml_delimiter=True,
color=True,
display_header=True,
columns=None,
):
"""Returns whole table, with whitespace padding and row delimiters.
Args:
width: An int, the max width we want the table to fit in.
force_display: A bool, if set to True will display table when the table
can't be made to fit to the width.
ml_delimiter: A bool, if set to False will not display the multi-line
delimiter.
color: A bool. If true, display any colours in row.colour.
display_header: A bool. If true, display header.
columns: A list of str, show only columns with these names.
Returns:
A string. The tabled output.
Raises:
TableError: Width too narrow to display table.
"""
def _FilteredCols():
"""Returns list of column names to display."""
if not columns:
return self._Header().values
return [col for col in self._Header().values if col in columns]
# Largest is the biggest data entry in a column.
largest = {}
# Smallest is the same as above but with linewrap i.e. largest unbroken
# word in the data stream.
smallest = {}
# largest == smallest for a column with a single word of data.
# Initialise largest and smallest for all columns.
for key in _FilteredCols():
largest[key] = 0
smallest[key] = 0
# Find the largest and smallest values.
# Include Title line in equation.
# pylint: disable=E1103
for row in self._table:
for key, value in row.items():
if key not in _FilteredCols():
continue
# Convert lists into a string.
if isinstance(value, list):
value = ", ".join(value)
value = terminal.StripAnsiText(value)
largest[key] = max(len(value), largest[key])
smallest[key] = max(self._SmallestColSize(value), smallest[key])
# pylint: enable=E1103
min_total_width = 0
multi_word = []
# Bump up the size of each column to include minimum pad.
# Find all columns that can be wrapped (multi-line).
# And the minimum width needed to display all columns (even if wrapped).
for key in _FilteredCols():
# Each column is bracketed by a space on both sides.
# So increase size required accordingly.
largest[key] += 2
smallest[key] += 2
min_total_width += smallest[key]
# If column contains data that 'could' be split over multiple lines.
if largest[key] != smallest[key]:
multi_word.append(key)
# Check if we have enough space to display the table.
if min_total_width > width and not force_display:
raise TableError("Width too narrow to display table.")
# We have some columns that may need wrapping over several lines.
if multi_word:
# Find how much space is left over for the wrapped columns to use.
# Also find how much space we would need if they were not wrapped.
# These are 'spare_width' and 'desired_width' respectively.
desired_width = 0
spare_width = width - min_total_width
for key in multi_word:
spare_width += smallest[key]
desired_width += largest[key]
# Scale up the space we give each wrapped column.
# Proportional to its size relative to 'desired_width' for all columns.
# Rinse and repeat if we changed the wrap list in this iteration.
# Once done we will have a list of columns that definitely need wrapping.
done = False
while not done:
done = True
for key in multi_word:
# If we scale past the desired width for this particular column,
# then give it its desired width and remove it from the wrapped list.
if largest[key] <= round(
(largest[key] / float(desired_width)) * spare_width
):
smallest[key] = largest[key]
multi_word.remove(key)
spare_width -= smallest[key]
desired_width -= largest[key]
done = False
# If we scale below the minimum width for this particular column,
# then leave it at its minimum and remove it from the wrapped list.
elif smallest[key] >= round(
(largest[key] / float(desired_width)) * spare_width
):
multi_word.remove(key)
spare_width -= smallest[key]
desired_width -= largest[key]
done = False
# Repeat the scaling algorithm with the final wrap list.
# This time we assign the extra column space by increasing 'smallest'.
for key in multi_word:
smallest[key] = int(
round((largest[key] / float(desired_width)) * spare_width)
)
total_width = 0
row_count = 0
result_dict = {}
# Format the header lines and add to result_dict.
# Find what the total width will be and use this for the ruled lines.
# Find how many rows are needed for the most wrapped line (row_count).
for key in _FilteredCols():
result_dict[key] = self._TextJustify(key, smallest[key])
if len(result_dict[key]) > row_count:
row_count = len(result_dict[key])
total_width += smallest[key]
# Store header in header_list, working down the wrapped rows.
header_list = []
for row_idx in range(row_count):
for key in _FilteredCols():
try:
header_list.append(result_dict[key][row_idx])
except IndexError:
# If no value than use whitespace of equal size.
header_list.append(" " * smallest[key])
header_list.append("\n")
# Format and store the body lines
result_dict = {}
body_list = []
# We separate multi line rows with a single line delimiter.
prev_muli_line = False
# Unless it is the first line in which there is already the header line.
first_line = True
for row in self:
row_count = 0
for key, value in row.items():
if key not in _FilteredCols():
continue
# Convert field contents to a string.
if isinstance(value, list):
value = ", ".join(value)
# Store results in result_dict and take note of wrapped line count.
result_dict[key] = self._TextJustify(value, smallest[key])
if len(result_dict[key]) > row_count:
row_count = len(result_dict[key])
if row_count > 1:
prev_muli_line = True
# If current or prior line was multi-line then include delimiter.
if not first_line and prev_muli_line and ml_delimiter:
body_list.append("-" * total_width + "\n")
if row_count == 1:
# Our current line was not wrapped, so clear flag.
prev_muli_line = False
row_list = []
for row_idx in range(row_count):
for key in _FilteredCols():
try:
row_list.append(result_dict[key][row_idx])
except IndexError:
# If no value than use whitespace of equal size.
row_list.append(" " * smallest[key])
row_list.append("\n")
if color and row.color is not None:
# Don't care about colors
body_list.append("".join(row_list))
# body_list.append(
# terminal.AnsiText(''.join(row_list)[:-1],
# command_list=row.color))
# body_list.append('\n')
else:
body_list.append("".join(row_list))
first_line = False
header = "".join(header_list) + "=" * total_width
if color and self._Header().color is not None:
pass
# header = terminal.AnsiText(header, command_list=self._Header().color)
# Add double line delimiter between header and main body.
if display_header:
return "%s\n%s" % (header, "".join(body_list))
return "%s" % "".join(body_list)
def LabelValueTable(self, label_list=None):
"""Returns whole table as rows of name/value pairs.
One (or more) column entries are used for the row prefix label.
The remaining columns are each displayed as a row entry with the
prefix labels appended.
Use the first column as the label if label_list is None.
Args:
label_list: A list of prefix labels to use.
Returns:
Label/Value formatted table.
Raises:
TableError: If specified label is not a column header of the table.
"""
label_list = label_list or self._Header()[0]
# Ensure all labels are valid.
for label in label_list:
if label not in self._Header():
raise TableError("Invalid label prefix: %s." % label)
sorted_list = []
for header in self._Header():
if header in label_list:
sorted_list.append(header)
label_str = "# LABEL %s\n" % ".".join(sorted_list)
body = []
for row in self:
# Some of the row values are pulled into the label, stored in label_prefix.
label_prefix = []
value_list = []
for key, value in row.items():
if key in sorted_list:
# Set prefix.
label_prefix.append(value)
else:
value_list.append("%s %s" % (key, value))
body.append(
"".join(["%s.%s\n" % (".".join(label_prefix), v) for v in value_list])
)
return "%s%s" % (label_str, "".join(body))
table = property(_GetTable, _SetTable, doc="Whole table")
row = property(_GetRow, _SetRow, doc="Current row")
header = property(_Header, _SetHeader, doc="List of header entries.")
row_index = property(_GetRowIndex, _SetRowIndex, doc="Current row.")
size = property(_GetSize, doc="Number of rows in table.")
def RowWith(self, column, value):
"""Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist.
"""
for row in self._table[1:]:
if row[column] == value:
return row
return None
def AddColumn(self, column, default="", col_index=-1):
"""Appends a new column to the table.
Args:
column: A string, name of the column to add.
default: Default value for entries. Defaults to ''.
col_index: Integer index for where to insert new column.
Raises:
TableError: Column name already exists.
"""
if column in self.table:
raise TableError("Column %r already in table." % column)
if col_index == -1:
self._table[0][column] = column
for i in range(1, len(self._table)):
self._table[i][column] = default
else:
self._table[0].Insert(column, column, col_index)
for i in range(1, len(self._table)):
self._table[i].Insert(column, default, col_index)
def Append(self, new_values):
"""Adds a new row (list) to the table.
Args:
new_values: Tuple, dict, or Row() of new values to append as a row.
Raises:
TableError: Supplied tuple not equal to table width.
"""
newrow = self.NewRow()
newrow.values = new_values
self._table.append(newrow)
def NewRow(self, value=""):
"""Fetches a new, empty row, with headers populated.
Args:
value: Initial value to set each row entry to.
Returns:
A Row() object.
"""
newrow = self.row_class()
newrow.row = self.size + 1
newrow.table = self
headers = self._Header()
for header in headers:
newrow[header] = value
return newrow
def CsvToTable(self, buf, header=True, separator=","):
"""Parses buffer into tabular format.
Strips off comments (preceded by '#').
Optionally parses and indexes by first line (header).
Args:
buf: String file buffer containing CSV data.
header: Is the first line of buffer a header.
separator: String that CSV is separated by.
Returns:
int, the size of the table created.
Raises:
TableError: A parsing error occurred.
"""
self.Reset()
header_row = self.row_class()
if header:
line = buf.readline()
header_str = ""
while not header_str:
# Remove comments.
header_str = line.split("#")[0].strip()
if not header_str:
line = buf.readline()
header_list = header_str.split(separator)
header_length = len(header_list)
for entry in header_list:
entry = entry.strip()
if entry in header_row:
raise TableError("Duplicate header entry %r." % entry)
header_row[entry] = entry
header_row.row = 0
self._table[0] = header_row
# xreadlines would be better but not supported by StringIO for testing.
for line in buf:
# Support commented lines, provide '#' is first character of line.
if line.startswith("#"):
continue
lst = line.split(separator)
lst = [l.strip() for l in lst]
if header and len(lst) != header_length:
# Silently drop illegal line entries
continue
if not header:
header_row = self.row_class()
header_length = len(lst)
header_row.values = dict(
zip(range(header_length), range(header_length))
)
self._table[0] = header_row
header = True
continue
new_row = self.NewRow()
new_row.values = lst
header_row.row = self.size + 1
self._table.append(new_row)
return self.size
def index(self, name=None): # pylint: disable=C6409
"""Returns index number of supplied column name.
Args:
name: string of column name.
Raises:
TableError: If name not found.
Returns:
Index of the specified header entry.
"""
try:
return self.header.index(name)
except ValueError:
raise TableError("Unknown index name %s." % name)
| 33.128457 | 89 | 0.57541 |
56afe8841f6756f37f300d81109986e68e6b808c | 229 | py | Python | registrobrepp/domain/nshostatt.py | ivcmartello/registrobrepp | dece39a451bcdb964d337df6aa7bd418a60c1a85 | [
"MIT"
] | null | null | null | registrobrepp/domain/nshostatt.py | ivcmartello/registrobrepp | dece39a451bcdb964d337df6aa7bd418a60c1a85 | [
"MIT"
] | null | null | null | registrobrepp/domain/nshostatt.py | ivcmartello/registrobrepp | dece39a451bcdb964d337df6aa7bd418a60c1a85 | [
"MIT"
] | null | null | null | from eppy.xmldict import XmlDictObject
class NsHostAtt(XmlDictObject):
def __init__(self, hostsattr: list):
dct = {
'hostAttr': hostsattr,
}
super(NsHostAtt, self).__init__(initdict=dct)
| 22.9 | 53 | 0.637555 |
08c5c2bbd1f73754e665374038fac7a53f892414 | 4,627 | py | Python | infrastructure/constructs/batch_job_queue.py | adisbladis/geostore | 79439c06b33414e1e26b3aa4b93a72fd7cbbae83 | [
"MIT"
] | 25 | 2021-05-19T08:05:07.000Z | 2022-03-14T02:48:58.000Z | infrastructure/constructs/batch_job_queue.py | adisbladis/geostore | 79439c06b33414e1e26b3aa4b93a72fd7cbbae83 | [
"MIT"
] | 311 | 2021-05-17T23:04:56.000Z | 2022-03-31T10:41:44.000Z | infrastructure/constructs/batch_job_queue.py | adisbladis/geostore | 79439c06b33414e1e26b3aa4b93a72fd7cbbae83 | [
"MIT"
] | 1 | 2022-01-03T05:38:32.000Z | 2022-01-03T05:38:32.000Z | import textwrap
from aws_cdk import aws_batch, aws_dynamodb, aws_ec2, aws_iam
from aws_cdk.core import Construct, Fn
from geostore.environment import PRODUCTION_ENVIRONMENT_NAME
APPLICATION_NAME_TAG_NAME = "ApplicationName"
APPLICATION_NAME = "geostore"
class BatchJobQueue(Construct):
def __init__(
self,
scope: Construct,
construct_id: str,
*,
env_name: str,
processing_assets_table: aws_dynamodb.Table,
):
# pylint: disable=too-many-locals
super().__init__(scope, construct_id)
if env_name == PRODUCTION_ENVIRONMENT_NAME:
instance_types = [
aws_ec2.InstanceType("c5.xlarge"),
aws_ec2.InstanceType("c5.2xlarge"),
aws_ec2.InstanceType("c5.4xlarge"),
aws_ec2.InstanceType("c5.9xlarge"),
]
else:
instance_types = [
aws_ec2.InstanceType("m5.large"),
aws_ec2.InstanceType("m5.xlarge"),
]
ec2_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name(
"service-role/AmazonEC2ContainerServiceforEC2Role"
)
batch_instance_role = aws_iam.Role(
self,
"batch-instance-role",
assumed_by=aws_iam.ServicePrincipal("ec2.amazonaws.com"), # type: ignore[arg-type]
managed_policies=[ec2_policy],
)
processing_assets_table.grant_read_write_data(batch_instance_role) # type: ignore[arg-type]
batch_instance_profile = aws_iam.CfnInstanceProfile(
self,
"batch-instance-profile",
roles=[batch_instance_role.role_name],
)
batch_launch_template_data = textwrap.dedent(
"""
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="
--==MYBOUNDARY==
Content-Type: text/x-shellscript; charset="us-ascii"
#!/bin/bash
echo ECS_IMAGE_PULL_BEHAVIOR=prefer-cached >> /etc/ecs/ecs.config
--==MYBOUNDARY==--
"""
)
launch_template_data = aws_ec2.CfnLaunchTemplate.LaunchTemplateDataProperty(
user_data=Fn.base64(batch_launch_template_data.strip())
)
cloudformation_launch_template = aws_ec2.CfnLaunchTemplate(
self,
"batch-launch-template",
launch_template_name=f"{env_name}-geostore-batch-launch-template",
launch_template_data=launch_template_data,
)
assert cloudformation_launch_template.launch_template_name is not None
launch_template = aws_batch.LaunchTemplateSpecification(
launch_template_name=cloudformation_launch_template.launch_template_name
)
# use existing VPC in LINZ AWS account.
# VPC with these tags is required to exist in AWS account before being deployed.
# A VPC will not be deployed by this project.
vpc = aws_ec2.Vpc.from_lookup(
self,
"geostore-vpc",
tags={
APPLICATION_NAME_TAG_NAME: APPLICATION_NAME,
"ApplicationLayer": "networking",
},
)
compute_resources = aws_batch.ComputeResources(
vpc=vpc,
minv_cpus=0,
desiredv_cpus=0,
maxv_cpus=1000,
instance_types=instance_types,
instance_role=batch_instance_profile.instance_profile_name,
allocation_strategy=aws_batch.AllocationStrategy("BEST_FIT_PROGRESSIVE"),
launch_template=launch_template,
)
batch_service_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name(
"service-role/AWSBatchServiceRole"
)
service_role = aws_iam.Role(
self,
"batch-service-role",
assumed_by=aws_iam.ServicePrincipal("batch.amazonaws.com"), # type: ignore[arg-type]
managed_policies=[batch_service_policy],
)
compute_environment = aws_batch.ComputeEnvironment(
self,
"compute-environment",
compute_resources=compute_resources,
service_role=service_role, # type: ignore[arg-type]
)
self.job_queue = aws_batch.JobQueue(
scope,
f"{construct_id}-job-queue",
compute_environments=[
aws_batch.JobQueueComputeEnvironment(
compute_environment=compute_environment, order=10 # type: ignore[arg-type]
),
],
priority=10,
)
| 35.320611 | 100 | 0.610979 |
8bf23614362c87649d81555fb80491f23d0a7945 | 268 | py | Python | mirari/mirari/taskapp.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | mirari/mirari/taskapp.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | 18 | 2019-12-27T19:58:20.000Z | 2022-02-27T08:17:49.000Z | mirari/mirari/taskapp.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from celery import shared_task
from django.apps import apps
@shared_task
def send_mail_task(app=None, model=None, pk=None):
apps.get_model(app, model).objects.get(pk=pk).send_mail()
return 'send_mail_task OK' | 33.5 | 58 | 0.813433 |
d33d1b6d098b6ca17298937f51fc4b8a4391c950 | 946 | py | Python | 01-Zaman_Bukme_Filtresi/ZamanBukmeFiltresi.py | gokdumano/Python | 69705688607364e2702f19cba3a9a31416b18666 | [
"Unlicense"
] | null | null | null | 01-Zaman_Bukme_Filtresi/ZamanBukmeFiltresi.py | gokdumano/Python | 69705688607364e2702f19cba3a9a31416b18666 | [
"Unlicense"
] | null | null | null | 01-Zaman_Bukme_Filtresi/ZamanBukmeFiltresi.py | gokdumano/Python | 69705688607364e2702f19cba3a9a31416b18666 | [
"Unlicense"
] | null | null | null | import cv2 as cv
import numpy as np
IPCAM = ...
VIDEO = cv.VideoCapture(IPCAM)
ONLINE, FRAME = VIDEO.read()
if ONLINE:
imW = int(VIDEO.get(cv.CAP_PROP_FRAME_WIDTH))
imH = int(VIDEO.get(cv.CAP_PROP_FRAME_HEIGHT))
FPS = int(VIDEO.get(cv.CAP_PROP_FPS))
ONESECOND = 1000
WAIT = int(ONESECOND/FPS)
COLOR, INDX = (0, 55,255), 0
CANVAS, THICC = np.zeros_like(FRAME), 3
while VIDEO.isOpened():
ONLINE, FRAME = VIDEO.read()
if INDX < imW:
CANVAS[:,INDX,:] = FRAME[:,INDX,:]
cv.line(FRAME, (INDX, 0), (INDX, imH), COLOR, thickness=THICC)
INDX += 1
cv.imshow("SONUC", np.hstack((FRAME, CANVAS)))
#if cv.waitKey(WAIT) & 0xFF == ord('q'):
if cv.waitKey(1) & 0xFF == ord('q'):
break
elif cv.waitKey(1) & 0xFF == ord('r'):
INDX = 0
if INDX == imW:
cv.imwrite("deneme.jpg", CANVAS)
INDX += 1
print(INDX)
if VIDEO is not None: VIDEO.release()
cv.destroyAllWindows()
| 28.666667 | 65 | 0.609937 |
a0664d29a96d24cae5c953cd6be505f4742bbcb3 | 5,260 | py | Python | cwru/utils/dataopenset.py | diagnosisda/dxda | a27071c56a106d32c18a77521a9384dec9663cb8 | [
"Apache-2.0"
] | 6 | 2019-12-10T14:04:24.000Z | 2021-12-14T11:59:47.000Z | cwru/utils/dataopenset.py | diagnosisda/dxda | a27071c56a106d32c18a77521a9384dec9663cb8 | [
"Apache-2.0"
] | 1 | 2019-12-13T20:07:54.000Z | 2019-12-13T20:15:24.000Z | cwru/utils/dataopenset.py | diagnosisda/dxda | a27071c56a106d32c18a77521a9384dec9663cb8 | [
"Apache-2.0"
] | 7 | 2019-12-10T09:05:13.000Z | 2022-02-22T07:18:33.000Z | # ETH Zurich, IBI-CIMS, Qin Wang (wang@qin.ee)
# Utils for PHM datasets
import numpy as np
import tensorflow as tf
#import matplotlib.pyplot as plt
from scipy.io import loadmat
from glob import glob
def get_cwru_list(load, dir="./data/cwru/", mode="all"):
""" Get file a list of cwru files for each condition under specific load
Args:
load: A int chosen from [0, 1, 2, 3] sepcifying the domain (beairng load)
dir: (Optional) Root directory for cwru dataset, where all the mat files
are.
mode: (Optional) Mode, "all", "20%", "50%"
Returns:
A dictionary of list of files. For example, get_cwru_list(1)[2] provides
us with a list of filenames under load 1 and class 2.
"""
if mode == "20%":
lists = {0: dir + "normal_" + str(load) + "*.mat",
1: dir + "12k_Drive_End_IR007_" + str(load) + "_*.mat"}
elif mode == "50%":
lists = {0: dir + "normal_" + str(load) + "*.mat",
1: dir + "12k_Drive_End_IR007_" + str(load) + "_*.mat",
2: dir + "12k_Drive_End_IR014_" + str(load) + "_*.mat",
3: dir + "12k_Drive_End_IR021_" + str(load) + "_*.mat",
4: dir + "12k_Drive_End_B007_" + str(load) + "_*.mat",
}
else:
lists = {0: dir + "normal_" + str(load) + "*.mat",
1: dir + "12k_Drive_End_IR007_" + str(load) + "_*.mat",
2: dir + "12k_Drive_End_IR014_" + str(load) + "_*.mat",
3: dir + "12k_Drive_End_IR021_" + str(load) + "_*.mat",
4: dir + "12k_Drive_End_B007_" + str(load) + "_*.mat",
5: dir + "12k_Drive_End_B014_" + str(load) + "_*.mat",
6: dir + "12k_Drive_End_B021_" + str(load) + "_*.mat",
7: dir + "12k_Drive_End_OR007@6_" + str(load) + "_*.mat",
8: dir + "12k_Drive_End_OR014@6_" + str(load) + "_*.mat",
9: dir + "12k_Drive_End_OR021@6_" + str(load) + "_*.mat"}
return {label: glob(lists[label]) for label in lists}
def read_cwru_mat(filename, length=1024, sample=200, scaling=False, fft=True,
truncate=False):
""" Read a single .mat file and preprocess it.
Args:
filename: A String, name of the .mat file.
length: An Int, telling us the length of each raw sample.
sample: An Int, Number of samples we choose uniformaly from the series.
scaling: A boolean, scaling the features or notself.
fft: A boolean, FFT feature extraction or not.
truncate: A boolean(False) or an int, specifying if we are using only
part of the signal
Returns:
A list of preprocessed samples from the specific file.
"""
data = loadmat(filename)
key = [k for k in data.keys() if "DE_time" in k][0]
data = data[key].reshape([-1])
assert(sample <= len(data) - length + 1)
if "normal" in filename:
data = data[::4]
if truncate:
print("filename", filename)
print("Before Truncate:", len(data))
if truncate: # 120000
data = data[:truncate]
print("After Truncate:", len(data))
# Split one signal to samples
data = [data[i:i + length] for i in range(0, len(data) - length + 1 , (len(data) - length)//(sample - 1) )]
# In some cases where (len(data) - length)//(sample - 1) ) is not an
# integer, it is possible the resulted data's length > sample
data = data[:sample]
if fft:
# Symmetric, so //2
if scaling:
fft = lambda sig: abs(np.fft.fft(sig, norm="ortho")[:len(sig)//2])
else:
fft = lambda sig: abs(np.fft.fft(sig)[:len(sig)//2])
data = [fft(x) for x in data]
return data
def load_cwru(load, dir="./data/cwru/" , shuffle=False, length=1024, sample=200,
scaling=False, fft=True, truncate=False, mode="all"):
""" Load cwru to numpy arrays
Args:
load: An int from [0, 1, 2, 3], specifying the bearing load.
dir: (Optional) Root directory for cwru dataset, where all the mat files
are.
shuffle: A boolean, shuffle data or not.
length: An Int, telling us the length of each raw sample.
sample: An Int, How many samples do you want uniformly from the series.
scaling: A boolean, scaling the features or notself.
fft: A boolean, FFT feature extraction or not.
truncate: A boolean(False) or an int, specifying if we are using only
part of the signal
mode: (Optional) Mode, "all", "healthy", "fault"
Returns:
Two numpy arrays (data, labels).
"""
filelists = get_cwru_list(load, dir=dir, mode=mode)
data, labels = [], []
for label in filelists:
for filename in filelists[label]:
datum = read_cwru_mat(filename, length=length, sample=sample,
scaling=scaling, fft=fft, truncate=truncate)
data.extend(datum)
labels.extend([label] * len(datum))
data, labels = np.array(data), np.array(labels)
print(data.shape)
assert(data.shape[1] == length or data.shape[1] == length//2)
if shuffle:
idx = np.random.permutation(len(data))
data, labels = data[idx], labels[idx]
data=np.expand_dims(data, axis=-1)
return np.float32(data), labels
| 40.775194 | 111 | 0.591635 |
4e712e984914dbb533126b52c2d28575e807cdd4 | 884 | py | Python | chapter11-iface-abc/tombola_runner.py | cgDeepLearn/fluentpython | ff89ad5a7da59c71f57b6392c9f5d5c6178e0475 | [
"MIT"
] | 1 | 2019-11-23T05:57:02.000Z | 2019-11-23T05:57:02.000Z | chapter11-iface-abc/tombola_runner.py | cgDeepLearn/fluentpython | ff89ad5a7da59c71f57b6392c9f5d5c6178e0475 | [
"MIT"
] | null | null | null | chapter11-iface-abc/tombola_runner.py | cgDeepLearn/fluentpython | ff89ad5a7da59c71f57b6392c9f5d5c6178e0475 | [
"MIT"
] | 1 | 2019-11-23T05:57:43.000Z | 2019-11-23T05:57:43.000Z | # -*- coding: utf-8 -*-
"""
tombola_runner.py
Tombola子类的测试运行程序
"""
import doctest
from tombola import Tombola
import bingo
import lotto
import tombolist
import drum # 要测试的模块
TEST_FILE = 'tombola_tests.rst'
TEST_MSG = '{0:16} {1.attempted:2} tests, {1.failed:2} failed - {2}'
def main(argv):
verbose = '-v' in argv
real_subclasses = Tombola.__subclasses__() # 2
virtual_subclasses = list(Tombola._abc_registry) # 3
for cls in real_subclasses + virtual_subclasses: # 4
test(cls, verbose)
def test(cls, verbose=False):
res = doctest.testfile(
TEST_FILE,
globs={'ConcreteTombola': cls}, # 5
verbose=verbose,
optionflags=doctest.REPORT_ONLY_FIRST_FAILURE)
tag = 'FAIL' if res.failed else 'OK'
print(TEST_MSG.format(cls.__name__, res, tag)) # 6
if __name__ == '__main__':
import sys
main(sys.argv)
| 21.560976 | 68 | 0.66629 |
4272752368164524ca3095b8f18b9e7f4e3ba31b | 422 | py | Python | src/waldur_slurm/migrations/0023_drop_spl.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_slurm/migrations/0023_drop_spl.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_slurm/migrations/0023_drop_spl.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | from django.db import migrations
from waldur_core.core.migration_utils import build_spl_migrations
class Migration(migrations.Migration):
dependencies = [
('structure', '0021_project_backend_id'),
('waldur_slurm', '0022_allocation_user_usage_mandatory_fields'),
]
operations = build_spl_migrations(
'waldur_slurm', 'SlurmService', 'SlurmServiceProjectLink', ('allocation',),
)
| 26.375 | 83 | 0.727488 |
b5d963ec37af27ec1c948dde95c0127129aa627f | 550 | py | Python | Chapter07/crash_course_qt_for_python/hello_connect_simple.py | HowToBeCalculated/Hands-On-Blockchain-for-Python-Developers | f9634259dd3dc509f36a5ccf3a5182c0d2ec79c4 | [
"MIT"
] | 62 | 2019-03-18T04:41:41.000Z | 2022-03-31T05:03:13.000Z | Chapter07/crash_course_qt_for_python/hello_connect_simple.py | HowToBeCalculated/Hands-On-Blockchain-for-Python-Developers | f9634259dd3dc509f36a5ccf3a5182c0d2ec79c4 | [
"MIT"
] | 2 | 2020-06-14T21:56:03.000Z | 2022-01-07T05:32:01.000Z | Chapter07/crash_course_qt_for_python/hello_connect_simple.py | HowToBeCalculated/Hands-On-Blockchain-for-Python-Developers | f9634259dd3dc509f36a5ccf3a5182c0d2ec79c4 | [
"MIT"
] | 42 | 2019-02-22T03:10:36.000Z | 2022-02-20T04:47:04.000Z | import sys
from PySide2.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton, QLabel
from PySide2 import QtCore
app = QApplication(sys.argv)
hello_button = QPushButton("Hello")
world_label = QLabel("Sun")
layout = QVBoxLayout()
layout.addWidget(hello_button)
layout.addWidget(world_label)
def set_text_in_world_label():
world_label.setText("World")
hello_button.connect(QtCore.SIGNAL('clicked()'), set_text_in_world_label)
window = QWidget()
window.setLayout(layout)
window.resize(200, 200)
window.show()
sys.exit(app.exec_())
| 22 | 85 | 0.785455 |
9e3b0396850919ca7ccb3ae2e4d4444ed90cb797 | 4,077 | py | Python | naive_stringify.py | alanamarzoev/structured-realm | 23f54711c18a209fc6a0f1c030a74d140c4e23f3 | [
"Apache-2.0"
] | 1 | 2021-07-27T02:45:34.000Z | 2021-07-27T02:45:34.000Z | naive_stringify.py | alanamarzoev/structured-realm | 23f54711c18a209fc6a0f1c030a74d140c4e23f3 | [
"Apache-2.0"
] | null | null | null | naive_stringify.py | alanamarzoev/structured-realm | 23f54711c18a209fc6a0f1c030a74d140c4e23f3 | [
"Apache-2.0"
] | null | null | null | import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
import tensorflow_hub as hub
import json
import pandas as pd
from absl import app
from absl import flags
import abc
from concurrent import futures
import time
import os
import bert
from bert import tokenization
from absl import logging
from language.realm import featurization
from language.realm import parallel
from language.realm import profile
from language.realm.refresh_doc_embeds import load_featurizer
from subprocess import check_call
import numpy as np
def get_dataframes():
with open('tables.jsonl', 'r') as j:
lines = j.readlines()
tbls = {}
for line in lines:
contents = json.loads(line)
table = {}
col_order = []
for i, col in enumerate(contents['tableHeaders'][0]):
col_name = col['text']
table[col_name] = []
col_order.append(col_name)
for row_cell in range(len(contents['tableData'])):
for col_cell in range(len(contents['tableData'][row_cell])):
col_name = col_order[col_cell]
data = contents['tableData'][row_cell][col_cell]['text']
if data == '':
continue
table[col_name].append(data)
try:
print('tbls len: {}'.format(len(tbls)))
if len(tbls) > 1000:
break
tbl = pd.DataFrame.from_dict(table)
caption = contents['tableCaption']
title = contents['pgTitle']
sec_title = contents['sectionTitle']
table_info = {}
table_info['body'] = tbl.to_json()
table_info['sec_title'] = sec_title
table_info['pgtitle'] = title
table_info['id'] = contents['tableId']
table_info['title'] = caption
tbls[caption] = table_info
except Exception as e:
print(e)
continue
with open('tables_preproc_large.jsonl', 'a+') as g:
for k, v in tbls.items():
g.write(json.dumps(v) + '\n')
return tbls
# def load_doc(tbls):
# print(tf.executing_eagerly())
# docs = []
# # params_path = os.path.join('out', "estimator_params.json")
# # with tf.gfile.GFile(params_path) as f:
# # params = json.load(f)
# # tokenizer = featurization.Tokenizer(
# # vocab_path=params["vocab_path"], do_lower_case=params["do_lower_case"])
# bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1",
# trainable=False)
# vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
# # to_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
# tokenizer = tokenization.FullTokenizer(
# vocab_file=vocab_file, do_lower_case=True)
# for capt, tbl_info in tbls.items():
# title = capt
# body = tbl_info['data'].to_string()
# doc_uid = tbl_info['id']
# title_token_ids = tokenizer.tokenize(title)
# title_token_ids = tokenizer.convert_tokens_to_ids(title_token_ids)
# body_token_ids = tokenizer.tokenize(body)
# body_token_ids = tokenizer.convert_tokens_to_ids(body_token_ids)
# doc = featurization.Document(
# uid=doc_uid,
# title_token_ids=title_token_ids,
# body_token_ids=body_token_ids)
# docs.append(doc)
# return docs
def main(argv):
# example_path = '/raid/lingo/marzoev/structured-realm/language/realm-data-small/pretrain_corpus_small/wikipedia_annotated_with_dates_public-00000-of-00020.tfrecord.gz'
# load_doc(example_path)
tbls = get_dataframes()
# convert_dataframes(tbls)
load_doc(tbls)
FLAGS = flags.FLAGS
# flags.DEFINE_boolean('preserve_unused_tokens', True, '')
if __name__ == '__main__':
app.run(main)
# main()
| 32.879032 | 172 | 0.600442 |
a55a07a88dd2b2c1a06252838a1c63193541047b | 965 | py | Python | statisitc.py | LeeJiangWei/sensors-positioning | 7e2421190755aa73a32c9e8e2ce82c294ce5d435 | [
"WTFPL"
] | null | null | null | statisitc.py | LeeJiangWei/sensors-positioning | 7e2421190755aa73a32c9e8e2ce82c294ce5d435 | [
"WTFPL"
] | null | null | null | statisitc.py | LeeJiangWei/sensors-positioning | 7e2421190755aa73a32c9e8e2ce82c294ce5d435 | [
"WTFPL"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
def plot_fitness_history(file: str, real_file: str):
history = np.load(file)
real_history = np.load(real_file)
x = np.arange(0, len(history))
rx = np.arange(0, len(real_history)) * 20
plt.plot(x, history, label=file)
plt.plot(rx, real_history, label=real_file)
def plot_points(anchors, sensors, max_range):
fig, ax = plt.subplots()
plt.xlim(0, max_range)
plt.ylim(0, max_range)
ax.scatter(sensors[:, 0], sensors[:, 1], s=1, c="blue")
ax.scatter(anchors[:, 0], anchors[:, 1], c="red")
plt.show()
if __name__ == '__main__':
plot_fitness_history("./history/NP100CR0.1F0.5.npy", "./history/NP100CR0.1F0.5_real.npy")
plot_fitness_history("./history/NP100CR0.1F0.5_jade.npy", "./history/NP100CR0.1F0.5_real_jade.npy")
plt.legend()
plt.show()
from main import anchors, sensors, LOCATION_RANGE
# plot_points(anchors, sensors, LOCATION_RANGE)
| 30.15625 | 103 | 0.676684 |
da89dce1a882b17b3f3945b6c5878f193a141fec | 4,088 | py | Python | day14.py | coocos/advent-of-code-2018 | fc238ccc0903d9f1f272b2c59b8069d4223c7c8a | [
"MIT"
] | null | null | null | day14.py | coocos/advent-of-code-2018 | fc238ccc0903d9f1f272b2c59b8069d4223c7c8a | [
"MIT"
] | null | null | null | day14.py | coocos/advent-of-code-2018 | fc238ccc0903d9f1f272b2c59b8069d4223c7c8a | [
"MIT"
] | null | null | null | from typing import Optional
class Node:
"""
Simple linked list with append and move operations
"""
def __init__(self, value: int) -> None:
self.value = value
self.next: Optional['Node'] = None
def append(self, node: 'Node') -> 'Node':
"""
Appends to the end of the linked list
"""
head = self
while head.next:
head = head.next
head.next = node
return head.next
def move(self, root: 'Node') -> 'Node':
"""
Returns the node value + 1 steps further in the linked list.
If the end of the linked list is reached before value + 1 steps then
the head is set to the passed node and the iteration continues until
value + 1 steps are reached.
"""
node = self
for _ in range(1 + self.value):
node = node.next
if node is None:
node = root
return node
def first_half(root: Node, offset: int, n: int) -> str:
"""
This solution constructs a circular linked list of recipes up to input + n
recipes according to the recipe construction rules. Once the linked list
has been constructed it is traversed to the input'th recipe and the rest of
the recipes are returned as a combined string.
"""
first = root
second = root.next
last = second
recipes = 2 # Amount of recipes
while recipes <= offset + n:
# Add next recipes to the list
next_recipes = first.value + second.value
first_recipe, second_recipe = (next_recipes // 10, next_recipes % 10)
if first_recipe:
last = last.append(Node(first_recipe))
recipes += 1
last = last.append(Node(second_recipe))
recipes += 1
# Move elves to the next recipes
first = first.move(root)
second = second.move(root)
# Traverse until the recipe offset point is reached
head = root
for _ in range(offset):
head = head.next
# Construct string with n recipes after the recipe offset point
scores = ''
for _ in range(n):
scores += str(head.value)
head = head.next
return scores
def second_half(root: Node, sequence: str) -> int:
"""
This solution is similar to the first half but instead it keeps
constructing the linked list until the last n recipes match the target
sequence recipes. At that point the number of recipes before the sequence
started is returned.
This solution is fairly ugly and not that great performance-wise, oh well.
"""
first = root
second = root.next
last = second
recipes = 2 # Amount of recipes
matches = 0 # How many recipes in the sequence we've matched
target = [int(c) for c in sequence]
while True:
# Construct next recipes
next_recipes = first.value + second.value
first_recipe, second_recipe = (next_recipes // 10, next_recipes % 10)
try:
if first_recipe:
last = last.append(Node(first_recipe))
recipes += 1
if first_recipe == target[matches]:
matches += 1
else:
matches = 0
last = last.append(Node(second_recipe))
recipes += 1
if second_recipe == target[matches]:
matches += 1
else:
matches = 0
if matches == 0 and second_recipe == target[matches]:
matches += 1
# We've matched target sequence so we're done
except IndexError:
return recipes - len(sequence) - 1
# Move elves to the next recipes
first = first.move(root)
second = second.move(root)
if __name__ == '__main__':
# Solve first half the puzzle
root = Node(3)
root.next = Node(7)
assert first_half(root, 909441, 10) == '2615161213'
# Solve second half of the puzzle
root = Node(3)
root.next = Node(7)
assert second_half(root, '909441') == 20403320
| 29.2 | 79 | 0.58635 |
c0cbe0ab668f5e7cb067272051dcafcec54d6e55 | 6,813 | py | Python | Lib/site-packages/nltk/tag/__init__.py | gamroder/TEST | 6da6ba96c6ff19b0db4ac44429d3de5623f24ff7 | [
"bzip2-1.0.6"
] | 7 | 2020-11-29T02:33:23.000Z | 2021-06-28T04:45:31.000Z | env/lib/python3.7/site-packages/nltk/tag/__init__.py | Victorpc98/CE888-Project | 99c20adc78eb53ac4d3c87543ef8da1ef4d10adc | [
"MIT"
] | 6 | 2020-09-05T01:40:23.000Z | 2022-03-12T00:40:58.000Z | env/lib/python3.7/site-packages/nltk/tag/__init__.py | Victorpc98/CE888-Project | 99c20adc78eb53ac4d3c87543ef8da1ef4d10adc | [
"MIT"
] | 3 | 2020-09-24T18:26:36.000Z | 2021-06-30T10:55:26.000Z | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Taggers
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (minor additions)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
NLTK Taggers
This package contains classes and interfaces for part-of-speech
tagging, or simply "tagging".
A "tag" is a case-sensitive string that specifies some property of a token,
such as its part of speech. Tagged tokens are encoded as tuples
``(tag, token)``. For example, the following tagged token combines
the word ``'fly'`` with a noun part of speech tag (``'NN'``):
>>> tagged_tok = ('fly', 'NN')
An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset:
>>> from nltk import pos_tag, word_tokenize
>>> pos_tag(word_tokenize("John's big idea isn't all that bad."))
[('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
A Russian tagger is also available if you specify lang="rus". It uses
the Russian National Corpus tagset:
>>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP
[('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'),
('бумажку', 'S'), ('.', 'NONLEX')]
This package defines several taggers, which take a list of tokens,
assign a tag to each one, and return the resulting list of tagged tokens.
Most of the taggers are built automatically based on a training corpus.
For example, the unigram tagger tags each word *w* by checking what
the most frequent tag for *w* was in a training corpus:
>>> from nltk.corpus import brown
>>> from nltk.tag import UnigramTagger
>>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500])
>>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment']
>>> for word, tag in tagger.tag(sent):
... print(word, '->', tag)
Mitchell -> NP
decried -> None
the -> AT
high -> JJ
rate -> NN
of -> IN
unemployment -> None
Note that words that the tagger has not seen during training receive a tag
of ``None``.
We evaluate a tagger on data that was not seen during training:
>>> tagger.evaluate(brown.tagged_sents(categories='news')[500:600])
0.73...
For more information, please consult chapter 5 of the NLTK Book.
"""
from __future__ import print_function
from nltk.tag.api import TaggerI
from nltk.tag.util import str2tuple, tuple2str, untag
from nltk.tag.sequential import (
SequentialBackoffTagger,
ContextTagger,
DefaultTagger,
NgramTagger,
UnigramTagger,
BigramTagger,
TrigramTagger,
AffixTagger,
RegexpTagger,
ClassifierBasedTagger,
ClassifierBasedPOSTagger,
)
from nltk.tag.brill import BrillTagger
from nltk.tag.brill_trainer import BrillTaggerTrainer
from nltk.tag.tnt import TnT
from nltk.tag.hunpos import HunposTagger
from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger
from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer
from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger
from nltk.tag.mapping import tagset_mapping, map_tag
from nltk.tag.crf import CRFTagger
from nltk.tag.perceptron import PerceptronTagger
from nltk.data import load, find
RUS_PICKLE = (
'taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle'
)
def _get_tagger(lang=None):
if lang == 'rus':
tagger = PerceptronTagger(False)
ap_russian_model_loc = 'file:' + str(find(RUS_PICKLE))
tagger.load(ap_russian_model_loc)
else:
tagger = PerceptronTagger()
return tagger
def _pos_tag(tokens, tagset=None, tagger=None, lang=None):
# Currently only supoorts English and Russian.
if lang not in ['eng', 'rus']:
raise NotImplementedError(
"Currently, NLTK pos_tag only supports English and Russian "
"(i.e. lang='eng' or lang='rus')"
)
else:
tagged_tokens = tagger.tag(tokens)
if tagset: # Maps to the specified tagset.
if lang == 'eng':
tagged_tokens = [
(token, map_tag('en-ptb', tagset, tag))
for (token, tag) in tagged_tokens
]
elif lang == 'rus':
# Note that the new Russion pos tags from the model contains suffixes,
# see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018
tagged_tokens = [
(token, map_tag('ru-rnc-new', tagset, tag.partition('=')[0]))
for (token, tag) in tagged_tokens
]
return tagged_tokens
def pos_tag(tokens, tagset=None, lang='eng'):
"""
Use NLTK's currently recommended part of speech tagger to
tag the given list of tokens.
>>> from nltk.tag import pos_tag
>>> from nltk.tokenize import word_tokenize
>>> pos_tag(word_tokenize("John's big idea isn't all that bad."))
[('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
>>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal')
[('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'),
("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')]
NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence.
:param tokens: Sequence of tokens to be tagged
:type tokens: list(str)
:param tagset: the tagset to be used, e.g. universal, wsj, brown
:type tagset: str
:param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
:type lang: str
:return: The tagged tokens
:rtype: list(tuple(str, str))
"""
tagger = _get_tagger(lang)
return _pos_tag(tokens, tagset, tagger, lang)
def pos_tag_sents(sentences, tagset=None, lang='eng'):
"""
Use NLTK's currently recommended part of speech tagger to tag the
given list of sentences, each consisting of a list of tokens.
:param tokens: List of sentences to be tagged
:type tokens: list(list(str))
:param tagset: the tagset to be used, e.g. universal, wsj, brown
:type tagset: str
:param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
:type lang: str
:return: The list of tagged sentences
:rtype: list(list(tuple(str, str)))
"""
tagger = _get_tagger(lang)
return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences]
| 37.640884 | 107 | 0.644356 |
39fed0299ea0db3bb937ff899f0abacfbf90b521 | 207 | py | Python | meta_features/__init__.py | mostafaelaraby/generalization-gap-features-tensorflow | efec157d790a43a3fdeed81f3bf521023f6a83af | [
"Apache-2.0"
] | null | null | null | meta_features/__init__.py | mostafaelaraby/generalization-gap-features-tensorflow | efec157d790a43a3fdeed81f3bf521023f6a83af | [
"Apache-2.0"
] | null | null | null | meta_features/__init__.py | mostafaelaraby/generalization-gap-features-tensorflow | efec157d790a43a3fdeed81f3bf521023f6a83af | [
"Apache-2.0"
] | null | null | null | from .margin_distribution import MarginDistribution
from .weights import Weights
from .spectral_margin import SpectralMargin
Enabled_Features = [
MarginDistribution,
Weights,
SpectralMargin,
]
| 18.818182 | 51 | 0.797101 |
87bcab42119713ff3e6582bb9cf7b19e39f7ff4d | 78 | py | Python | lib/vehicles/__init__.py | jkjung-avt/py-faster-rcnn | 4f9d6a5164844f651e41d431512cfac0c570fae9 | [
"BSD-2-Clause"
] | 2 | 2018-05-29T01:58:26.000Z | 2020-04-28T09:21:17.000Z | lib/vehicles/__init__.py | jkjung-avt/py-faster-rcnn | 4f9d6a5164844f651e41d431512cfac0c570fae9 | [
"BSD-2-Clause"
] | null | null | null | lib/vehicles/__init__.py | jkjung-avt/py-faster-rcnn | 4f9d6a5164844f651e41d431512cfac0c570fae9 | [
"BSD-2-Clause"
] | 2 | 2019-03-06T00:59:58.000Z | 2019-04-07T03:32:03.000Z | # For vehicle detection, illegal parking detection and traffic counting, etc.
| 39 | 77 | 0.807692 |
efddaf2b2acd59562108f2bdbb15cd6c6eec4ffd | 1,244 | py | Python | app/tests/test_regressions_tests.py | FundingCircle/DjanGoat | 013c7367294682955daf9eba205270bd2f9725cd | [
"MIT"
] | 1 | 2019-05-07T09:49:25.000Z | 2019-05-07T09:49:25.000Z | app/tests/test_regressions_tests.py | FundingCircle/DjanGoat | 013c7367294682955daf9eba205270bd2f9725cd | [
"MIT"
] | null | null | null | app/tests/test_regressions_tests.py | FundingCircle/DjanGoat | 013c7367294682955daf9eba205270bd2f9725cd | [
"MIT"
] | null | null | null |
from django.test import TestCase, RequestFactory, Client
from app.tests.mixins import AuthRouteTestingWithKwargs
from app.views import user_retirement_views
class PassingWrongArgumentRegressionTests(TestCase, AuthRouteTestingWithKwargs):
def setUp(self):
AuthRouteTestingWithKwargs.__init__(self)
""" Class for testing api index view"""
self.factory = RequestFactory()
self.client = Client()
self.route_name = 'app:user_retirement_index'
self.route = "/users/"+ str(self.mixin_model.id) +"/retirement"
self.view = user_retirement_views.user_retirement_index
self.responses = {
'exists': 200,
'GET': 200,
'POST': 200,
'PUT': 405,
'PATCH': 405,
'DELETE': 405,
'HEAD': 405,
'OPTIONS': 405,
'TRACE': 405
}
self.kwargs = {'user_id': int(self.mixin_model.id) }
self.expected_response_content = "Employer Contribution"
def dummy_function_that_represents_a_view(self, user_id):
return type(user_id) is int
def test_kwarg_type_is_correct(self):
self.assertTrue(self.dummy_function_that_represents_a_view(**self.kwargs))
| 31.897436 | 82 | 0.643891 |
9046505b1b9ae9bd750b9a1828e9dbbaabe4b2af | 2,741 | py | Python | rhea/models/elink/_easic.py | meetps/rhea | f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0 | [
"MIT"
] | 1 | 2022-03-16T23:56:09.000Z | 2022-03-16T23:56:09.000Z | rhea/models/elink/_easic.py | meetps/rhea | f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0 | [
"MIT"
] | null | null | null | rhea/models/elink/_easic.py | meetps/rhea | f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0 | [
"MIT"
] | null | null | null |
from __future__ import absolute_import
from myhdl import *
from rhea.cores.elink import ELink
from rhea.cores.elink import EMeshPacket
from rhea.models import FIFO
def elink_asic_model(elink):
""" Model a simple ELink device (something like Epiphany)
:param elink: Interface to the external ELink enabled device
:return: myhdl generators
not convertible.
"""
assert isinstance(elink, ELink)
# get the tx and rx links based on this logical location
tx, rx = elink.connect('south')
gclk = tx.instances()
pkt_i_fifo = FIFO(depth=128)
pkt_o_fifo = FIFO(depth=128)
@instance
def p_rx_packets():
""" receive packets and push onto processing FIFO """
while True:
yield rx.frame.posedge
bytes = []
yield rx.lclk.posedge
while rx.frame:
bytes.append(intbv(int(rx.data))[8:])
if len(bytes) == 13:
pkt = EMeshPacket()
pkt.frombytes(bytes)
yield delay(1)
pkt_i_fifo.write(pkt)
# print("[easic] RX packet {} {}".format(pkt, pkt_i_fifo))
# @todo: if FIFO full assert wait
bytes = []
yield rx.lclk.posedge
# @todo: if len(bytes) != 13 report error - partial packet
# @todo: simulate EMesh routing
@instance
def p_proc_packets():
""" process packets """
idelay = False
while True:
pkt = pkt_i_fifo.read()
if pkt is not None and pkt.access:
if not idelay:
yield delay(17)
idelay = True
pkt_o_fifo.write(pkt)
# print("[easic] PROC packet {} {}".format(pkt, pkt_o_fifo))
if pkt_i_fifo.is_empty():
idelay = False
yield pkt_i_fifo.empty.negedge
@instance
def p_tx_packets():
""" transmit processed packets """
while True:
if not pkt_o_fifo.is_empty():
pkt = pkt_o_fifo.read()
# print("[easic] TX packet {} {}".format(pkt, pkt_o_fifo))
# @todo: if len of FIFO > 2, shuffle order
bytes = pkt.tobytes()
for bb in bytes:
tx.frame.next = True
tx.data.next = bb
yield tx.lclk.posedge
# packet complete clear frame
tx.frame.next = False
if pkt_o_fifo.is_empty():
yield pkt_o_fifo.empty.negedge
else:
yield tx.lclk.posedge
return gclk, p_rx_packets, p_proc_packets, p_tx_packets
| 31.147727 | 78 | 0.529734 |
3302ae171567e147f697340731ad8a79988028b7 | 10,359 | py | Python | python/dgl/_ffi/ndarray.py | mori97/dgl | 646d1ab186b3280b99267e106da6b0b8ad12a8ba | [
"Apache-2.0"
] | 1 | 2019-01-28T06:36:05.000Z | 2019-01-28T06:36:05.000Z | python/dgl/_ffi/ndarray.py | tbmihailov/dgl | ed1948b5555106dee133cef91ed9ecfd3bd4310d | [
"Apache-2.0"
] | null | null | null | python/dgl/_ffi/ndarray.py | tbmihailov/dgl | ed1948b5555106dee133cef91ed9ecfd3bd4310d | [
"Apache-2.0"
] | 1 | 2022-02-03T09:45:53.000Z | 2022-02-03T09:45:53.000Z | # pylint: disable=invalid-name, unused-import
"""Runtime NDArray api"""
from __future__ import absolute_import
import sys
import ctypes
import numpy as np
from .base import _LIB, check_call, c_array, string_types, _FFI_MODE, c_str
from .runtime_ctypes import DGLType, DGLContext, DGLArray, DGLArrayHandle
from .runtime_ctypes import TypeCode, dgl_shape_index_t
IMPORT_EXCEPT = RuntimeError if _FFI_MODE == "cython" else ImportError
try:
# pylint: disable=wrong-import-position
if _FFI_MODE == "ctypes":
raise ImportError()
if sys.version_info >= (3, 0):
from ._cy3.core import _set_class_ndarray, _reg_extension, _make_array, _from_dlpack
from ._cy3.core import NDArrayBase as _NDArrayBase
else:
from ._cy2.core import _set_class_ndarray, _reg_extension, _make_array, _from_dlpack
from ._cy2.core import NDArrayBase as _NDArrayBase
except IMPORT_EXCEPT:
# pylint: disable=wrong-import-position
from ._ctypes.ndarray import _set_class_ndarray, _reg_extension, _make_array, _from_dlpack
from ._ctypes.ndarray import NDArrayBase as _NDArrayBase
def context(dev_type, dev_id=0):
"""Construct a DGL context with given device type and id.
Parameters
----------
dev_type: int or str
The device type mask or name of the device.
dev_id : int, optional
The integer device id
Returns
-------
ctx: DGLContext
The corresponding context.
Examples
--------
Context can be used to create reflection of context by
string representation of the device type.
.. code-block:: python
assert dgl.context("cpu", 1) == dgl.cpu(1)
assert dgl.context("gpu", 0) == dgl.gpu(0)
assert dgl.context("cuda", 0) == dgl.gpu(0)
"""
if isinstance(dev_type, string_types):
dev_type = dev_type.split()[0]
if dev_type not in DGLContext.STR2MASK:
raise ValueError("Unknown device type %s" % dev_type)
dev_type = DGLContext.STR2MASK[dev_type]
return DGLContext(dev_type, dev_id)
def numpyasarray(np_data):
"""Return a DGLArray representation of a numpy array.
"""
data = np_data
assert data.flags['C_CONTIGUOUS']
arr = DGLArray()
shape = c_array(dgl_shape_index_t, data.shape)
arr.data = data.ctypes.data_as(ctypes.c_void_p)
arr.shape = shape
arr.strides = None
arr.dtype = DGLType(np.dtype(data.dtype).name)
arr.ndim = data.ndim
# CPU device
arr.ctx = context(1, 0)
return arr, shape
def empty(shape, dtype="float32", ctx=context(1, 0)):
"""Create an empty array given shape and device
Parameters
----------
shape : tuple of int
The shape of the array
dtype : type or str
The data type of the array.
ctx : DGLContext
The context of the array
Returns
-------
arr : dgl.nd.NDArray
The array dgl supported.
"""
shape = c_array(dgl_shape_index_t, shape)
ndim = ctypes.c_int(len(shape))
handle = DGLArrayHandle()
dtype = DGLType(dtype)
check_call(_LIB.DGLArrayAlloc(
shape, ndim,
ctypes.c_int(dtype.type_code),
ctypes.c_int(dtype.bits),
ctypes.c_int(dtype.lanes),
ctx.device_type,
ctx.device_id,
ctypes.byref(handle)))
return _make_array(handle, False)
def from_dlpack(dltensor):
"""Produce an array from a DLPack tensor without memory copy.
Retrieves the underlying DLPack tensor's pointer to create an array from the
data. Removes the original DLPack tensor's destructor as now the array is
responsible for destruction.
Parameters
----------
dltensor : DLPack tensor
Input DLManagedTensor, can only be consumed once.
Returns
-------
arr: dgl.nd.NDArray
The array view of the tensor data.
"""
return _from_dlpack(dltensor)
class NDArrayBase(_NDArrayBase):
"""A simple Device/CPU Array object in runtime."""
@property
def shape(self):
"""Shape of this array"""
return tuple(self.handle.contents.shape[i] for i in range(self.handle.contents.ndim))
@property
def dtype(self):
"""Type of this array"""
return str(self.handle.contents.dtype)
@property
def ctx(self):
"""context of this array"""
return self.handle.contents.ctx
@property
def context(self):
"""context of this array"""
return self.ctx
def __hash__(self):
return ctypes.cast(self.handle, ctypes.c_void_p).value
def __eq__(self, other):
return self.same_as(other)
def __ne__(self, other):
return not self.__eq__(other)
def same_as(self, other):
"""Check object identity equality
Parameters
----------
other : object
The other object to compare to
Returns
-------
same : bool
Whether other is same as self.
"""
if not isinstance(other, NDArrayBase):
return False
return self.__hash__() == other.__hash__()
def __setitem__(self, in_slice, value):
"""Set ndarray value"""
if (not isinstance(in_slice, slice) or
in_slice.start is not None
or in_slice.stop is not None):
raise ValueError('Array only support set from numpy array')
if isinstance(value, NDArrayBase):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, (np.ndarray, np.generic)):
self.copyfrom(value)
else:
raise TypeError('type %s not supported' % str(type(value)))
def copyfrom(self, source_array):
"""Perform a synchronized copy from the array.
Parameters
----------
source_array : array_like
The data source we should like to copy from.
Returns
-------
arr : NDArray
Reference to self.
"""
if isinstance(source_array, NDArrayBase):
source_array.copyto(self)
return self
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=self.dtype)
except:
raise TypeError('array must be an array_like data,' +
'type %s is not supported' % str(type(source_array)))
t = DGLType(self.dtype)
shape, dtype = self.shape, self.dtype
if t.lanes > 1:
shape = shape + (t.lanes,)
t.lanes = 1
dtype = str(t)
if source_array.shape != shape:
raise ValueError("array shape do not match the shape of NDArray {0} vs {1}".format(
source_array.shape, shape))
source_array = np.ascontiguousarray(source_array, dtype=dtype)
assert source_array.flags['C_CONTIGUOUS']
data = source_array.ctypes.data_as(ctypes.c_void_p)
nbytes = ctypes.c_size_t(source_array.size * source_array.dtype.itemsize)
check_call(_LIB.DGLArrayCopyFromBytes(self.handle, data, nbytes))
return self
def __repr__(self):
res = "<dgl.NDArray shape={0}, {1}>\n".format(self.shape, self.context)
res += self.asnumpy().__repr__()
return res
def __str__(self):
return str(self.asnumpy())
def asnumpy(self):
"""Convert this array to numpy array
Returns
-------
np_arr : numpy.ndarray
The corresponding numpy array.
"""
t = DGLType(self.dtype)
shape, dtype = self.shape, self.dtype
if t.lanes > 1:
shape = shape + (t.lanes,)
t.lanes = 1
dtype = str(t)
np_arr = np.empty(shape, dtype=dtype)
assert np_arr.flags['C_CONTIGUOUS']
data = np_arr.ctypes.data_as(ctypes.c_void_p)
nbytes = ctypes.c_size_t(np_arr.size * np_arr.dtype.itemsize)
check_call(_LIB.DGLArrayCopyToBytes(self.handle, data, nbytes))
return np_arr
def copyto(self, target):
"""Copy array to target
Parameters
----------
target : NDArray
The target array to be copied, must have same shape as this array.
"""
if isinstance(target, DGLContext):
target = empty(self.shape, self.dtype, target)
if isinstance(target, NDArrayBase):
check_call(_LIB.DGLArrayCopyFromTo(
self.handle, target.handle, None))
else:
raise ValueError("Unsupported target type %s" % str(type(target)))
return target
def free_extension_handle(handle, type_code):
"""Free c++ extension type handle
Parameters
----------
handle : ctypes.c_void_p
The handle to the extension type.
type_code : int
The tyoe code
"""
check_call(_LIB.DGLExtTypeFree(handle, ctypes.c_int(type_code)))
def register_extension(cls, fcreate=None):
"""Register a extension class to DGL.
After the class is registered, the class will be able
to directly pass as Function argument generated by DGL.
Parameters
----------
cls : class
The class object to be registered as extension.
Note
----
The registered class is requires one property: _dgl_handle and a class attribute _dgl_tcode.
- ```_dgl_handle``` returns integer represents the address of the handle.
- ```_dgl_tcode``` gives integer represents type code of the class.
Returns
-------
cls : class
The class being registered.
fcreate : function, optional
The creation function to create a class object given handle value.
Example
-------
The following code registers user defined class
MyTensor to be DLTensor compatible.
.. code-block:: python
@dgl.register_extension
class MyTensor(object):
_dgl_tcode = dgl.TypeCode.ARRAY_HANDLE
def __init__(self):
self.handle = _LIB.NewDLTensor()
@property
def _dgl_handle(self):
return self.handle.value
"""
if fcreate and cls._dgl_tcode < TypeCode.EXT_BEGIN:
raise ValueError("Cannot register create when extension tcode is same as buildin")
_reg_extension(cls, fcreate)
return cls
| 30.026087 | 96 | 0.621392 |
b4576c6b83d51be701543a5a125e36249eaf8321 | 8,073 | py | Python | examples/mujoco/train_ppo_batch_gym.py | xinyuewang1/chainerrl | 49425d09cb0749968f4e364e281670e752a46791 | [
"MIT"
] | null | null | null | examples/mujoco/train_ppo_batch_gym.py | xinyuewang1/chainerrl | 49425d09cb0749968f4e364e281670e752a46791 | [
"MIT"
] | null | null | null | examples/mujoco/train_ppo_batch_gym.py | xinyuewang1/chainerrl | 49425d09cb0749968f4e364e281670e752a46791 | [
"MIT"
] | null | null | null | """An example of training PPO against OpenAI Gym Envs.
This script is an example of training a PPO agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_ppo_gym.py --env CartPole-v0
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import functools
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import gym.spaces
import numpy as np
import chainerrl
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--env', type=str, default='Hopper-v2')
parser.add_argument('--num-envs', type=int, default=1)
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=10 ** 6)
parser.add_argument('--eval-interval', type=int, default=10000)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--standardize-advantages', action='store_true')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--window-size', type=int, default=100)
parser.add_argument('--update-interval', type=int, default=2048)
parser.add_argument('--log-interval', type=int, default=1000)
parser.add_argument('--batchsize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--entropy-coef', type=float, default=0.0)
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
def make_batch_env(test):
return chainerrl.envs.MultiprocessVectorEnv(
[functools.partial(make_env, idx, test)
for idx, env in enumerate(range(args.num_envs))])
# Only for getting timesteps, and obs-action spaces
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Normalize observations based on their empirical mean and variance
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
winit_last = chainer.initializers.LeCunNormal(1e-2)
# Switch policy types accordingly to action space types
if isinstance(action_space, gym.spaces.Discrete):
n_actions = action_space.n
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, n_actions, initialW=winit_last),
chainerrl.distribution.SoftmaxDistribution,
)
elif isinstance(action_space, gym.spaces.Box):
action_size = action_space.low.size
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, action_size, initialW=winit_last),
chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x), # Parameterize log std
var_param_init=0, # log std = 0 => std = 1
),
)
else:
print("""\
This example only supports gym.spaces.Box or gym.spaces.Discrete action spaces.""") # NOQA
return
vf = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, 1),
)
# Combine a policy and a value function into a single model
model = chainerrl.links.Branched(policy, vf)
opt = chainer.optimizers.Adam(alpha=args.lr, eps=1e-5)
opt.setup(model)
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = PPO(model, opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batchsize, epochs=args.epochs,
clip_eps_vf=None, entropy_coef=args.entropy_coef,
standardize_advantages=args.standardize_advantages,
)
if args.load:
agent.load(args.load)
if args.demo:
env = make_batch_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
# Linearly decay the learning rate to zero
def lr_setter(env, agent, value):
agent.optimizer.alpha = value
lr_decay_hook = experiments.LinearInterpolationHook(
args.steps, args.lr, 0, lr_setter)
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(False),
eval_env=make_batch_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
log_interval=args.log_interval,
return_window_size=args.window_size,
max_episode_len=timestep_limit,
save_best_so_far_agent=False,
step_hooks=[
lr_decay_hook,
],
)
if __name__ == '__main__':
main()
| 37.901408 | 91 | 0.651802 |
1fb5026a87f17398a143b5409f8fb0e3774c88d0 | 269 | py | Python | filter_demo/book/views.py | gaohj/2001django | 0da7227acb37b7cdb3a9595bd96e0e1afd63e760 | [
"Apache-2.0"
] | null | null | null | filter_demo/book/views.py | gaohj/2001django | 0da7227acb37b7cdb3a9595bd96e0e1afd63e760 | [
"Apache-2.0"
] | null | null | null | filter_demo/book/views.py | gaohj/2001django | 0da7227acb37b7cdb3a9595bd96e0e1afd63e760 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from datetime import datetime
# Create your views here.
def index(request):
context = {
'times':datetime(year=2020,month=7,day=15,hour=16,minute=30,second=10)
}
return render(request,'index.html',context=context) | 29.888889 | 78 | 0.72119 |
fa2bb51c6bfa85d102bee245b278ded69dd7615b | 1,436 | py | Python | transformation.py | msqz/CarND-Advanced-Lane-Lines | 84c1034ae7fbecbb7b88b53b351cd31dcb98f39a | [
"MIT"
] | null | null | null | transformation.py | msqz/CarND-Advanced-Lane-Lines | 84c1034ae7fbecbb7b88b53b351cd31dcb98f39a | [
"MIT"
] | null | null | null | transformation.py | msqz/CarND-Advanced-Lane-Lines | 84c1034ae7fbecbb7b88b53b351cd31dcb98f39a | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import glob
import helpers
warping_from = np.float32([[200, 720], [604, 450], [696, 450], [1120, 720]])
warping_to = np.float32([[200, 720], [200, 0], [1120, 0], [1120, 720]])
def calibrate():
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6*9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
objpoints = []
imgpoints = []
images = glob.glob('camera_cal/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if ret is True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(
gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objpoints, imgpoints, gray.shape[::-1], None, None)
return mtx, dist
def undistort(img, mtx, dist):
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
mtx, dist, (w, h), 1, (w, h))
undistorted = cv2.undistort(img, mtx, dist, None, newcameramtx)
x, y, w, h = roi
return cv2.resize(undistorted[y:y+h, x:x+w], (1280, 720))
def warp(img):
M = cv2.getPerspectiveTransform(warping_from, warping_to)
return cv2.warpPerspective(img, M, (1280, 720)), M
| 31.217391 | 78 | 0.610028 |
38f01e171464aac8afc96a6238c3b8708fa1912f | 8,920 | py | Python | Attendance_System_U_FR/main.py | Mr-Parveen/AttendanceSystem_Using_Face_Recognition | 163594b18a31353c9972b6fff237bf8c0acac96f | [
"MIT"
] | null | null | null | Attendance_System_U_FR/main.py | Mr-Parveen/AttendanceSystem_Using_Face_Recognition | 163594b18a31353c9972b6fff237bf8c0acac96f | [
"MIT"
] | null | null | null | Attendance_System_U_FR/main.py | Mr-Parveen/AttendanceSystem_Using_Face_Recognition | 163594b18a31353c9972b6fff237bf8c0acac96f | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import cv2
from PIL import Image,ImageTk
import captureWin
import attendance
root = Tk()
root.title("Attendance System")
root.geometry("550x300") # width x height
root.minsize(550, 300)
root.maxsize(550, 300)
# ================= frame =========================
Manage_frame = Frame(root, bd=5, relief=RIDGE, bg="white")
Manage_frame.place(x=5, y=48, width=350, height=245)
Manage_frame2 = Frame(root, bd=5, relief=RIDGE, bg="white")
Manage_frame2.place(x=357, y=48, width=185, height=245)
# ================= title =========================
title = Label(root, text="Welcome To Attendance System", bd=5,
relief=GROOVE, font=("poppins", 20, "bold"), bg="red", fg="white")
title.pack(side=TOP, fill=X)
# ============================================ Exit ===================================================================================
def exit():
choice = messagebox.askyesno(message='Do you want to exit?')
if choice == True:
root.destroy()
else:
pass
# ============================================ About ===================================================================================
def About(): # This is a constructor
root = Tk()
root.title("About")
root.geometry("400x100")
root.resizable(False, False)
# =================== Lable ================================
intro_lab = Label(root, text="Welcome To Attendance System")
intro_lab.place(x=40, y=15)
intro_lab = Label(root, text="Contact : parveen@example.com")
intro_lab.place(x=40, y=35)
intro_lab = Label(root, text="Developed By : Parveen Biswas")
intro_lab.place(x=40, y=55)
# =================== Button ===============================
OK_Exit = Button(root, text="(OK)", command=lambda: root.destroy(), font=(
"poppins", 10), width=7, bd=2)
OK_Exit.place(x=290, y=30)
root.mainloop()
# ============================================ Student detail ===================================================================================
def student(): # This is a constructor
root = Tk()
root.title("Student Details")
root.geometry("1100x650+0+0") # height x width x X-axis x Y-axis
root.minsize(1100, 650)
root.maxsize(1100, 650)
title = Label(root, text="Student Details", bd=5, relief=GROOVE, font=(
"times new roman", 25, "bold"), bg="#c4c4c4", fg="red")
title.pack(side=TOP, fill=X)
# ================== Frame ==================================
Manage_frame = Frame(root, bd=5, relief=RIDGE, bg="#d3ceb6")
Manage_frame.place(x=250, y=50, width=600, height=190)
Manage_frame2 = Frame(root, bd=5, relief=RIDGE, bg="#d3ceb6")
Manage_frame2.place(x=25, y=50, width=220, height=190)
Manage_frame3 = Frame(root, bd=5, relief=RIDGE, bg="#d3ceb6")
Manage_frame3.place(x=855, y=50, width=216, height=190)
Detail_frame = Frame(root, bd=5, relief=RIDGE, bg="#d3ceb6")
Detail_frame.place(x=1, y=245, width=1095, height=400)
# ================== dialog Box =====================================
roll_lbl = Label(Manage_frame, text="Roll no.", font=(
"times new roman", 15, "bold"), bg="#d3ceb6")
roll_lbl.place(x=15, y=15)
roll_txt = Entry(Manage_frame, font=(
"times new roman", 10, "bold"), bd=5, relief=GROOVE)
roll_txt.place(x=95, y=15)
name_lbl = Label(Manage_frame, text="Name", font=(
"times new roman", 15, "bold"), bg="#d3ceb6")
name_lbl.place(x=15, y=55)
name_txt = Entry(Manage_frame, font=(
"times new roman", 10, "bold"), bd=5, relief=GROOVE)
name_txt.place(x=95, y=55)
brh_lbl = Label(Manage_frame, text="Branch", font=(
"times new roman", 15, "bold"), bg="#d3ceb6")
brh_lbl.place(x=15, y=95)
brh_txt = Entry(Manage_frame, font=(
"times new roman", 10, "bold"), bd=5, relief=GROOVE)
brh_txt.place(x=95, y=95)
strm_lbl = Label(Manage_frame, text="Stream", font=(
"times new roman", 15, "bold"), bg="#d3ceb6")
strm_lbl.place(x=15, y=135)
strm_txt = Entry(Manage_frame, font=(
"times new roman", 10, "bold"), bd=5, relief=GROOVE)
strm_txt.place(x=95, y=135)
sem_lbl = Label(Manage_frame, text="Semester", font=(
"times new roman", 15, "bold"), bg="#d3ceb6")
sem_lbl.place(x=300, y=15)
# combobox for options which is readable only
sem_txt = ttk.Combobox(Manage_frame, font=(
"times new roman", 10, "bold"), state='readonly')
sem_txt['values'] = ("1st", "2nd", "3rd", "4th",
"5th", "6th", "7th", "8th")
sem_txt.place(x=400, y=19)
eml_lbl = Label(Manage_frame, text="Email", font=(
"times new roman", 15, "bold"), bg="#d3ceb6")
eml_lbl.place(x=300, y=55)
eml_txt = Entry(Manage_frame, font=(
"times new roman", 10, "bold"), bd=5, relief=GROOVE)
eml_txt.place(x=400, y=55)
phn_lbl = Label(Manage_frame, text="Phone no.", font=(
"times new roman", 15, "bold"), bg="#d3ceb6")
phn_lbl.place(x=300, y=95)
phn_txt = Entry(Manage_frame, font=(
"times new roman", 10, "bold"), bd=5, relief=GROOVE)
phn_txt.place(x=400, y=95)
# ======================= Button ==================================================
uplod_btn = Button(Manage_frame2, text="Upload photo", width=18, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
uplod_btn.place(x=10, y=145)
add_btn = Button(Manage_frame3, text="Add", width=18, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
add_btn.place(x=10, y=5)
updt_btn = Button(Manage_frame3, text="Update", width=18, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
updt_btn.place(x=10, y=40)
dlt_btn = Button(Manage_frame3, text="Delete", width=18, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
dlt_btn.place(x=10, y=75)
abt_btn = Button(Manage_frame3, text="About", command=About, width=18, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
abt_btn.place(x=10, y=110)
ext_btn = Button(Manage_frame3, text="Exit", command=lambda: root.destroy(), width=18, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
ext_btn.place(x=10, y=145)
# ======================= Search Dialog Box ========================================
srh_lbl = Label(Detail_frame, text="Search By",
font=("poppins", 12, "bold"), bg="#d3ceb6")
srh_lbl.place(x=60, y=12)
# combobox for options which is readable only
srh_bx = ttk.Combobox(Detail_frame, font=(
"times new roman", 10, "bold"), state='readonly')
srh_bx['values'] = ("Roll no.", "Name", "Phone no.", "Email")
srh_bx.place(x=160, y=10, width=100, height=27)
srh_txt = Entry(Detail_frame, font=(
"times new roman", 10, "bold"), relief=GROOVE)
srh_txt.place(x=275, y=10, width=180, height=29)
srh_btn = Button(Detail_frame, text="Search", font=(
"poppins", 8, 'bold'), fg="#333331")
srh_btn.place(x=465, y=10, width=80)
sho_btn = Button(Detail_frame, text="Show all",
font=("poppins", 8, 'bold'), fg="#333331")
sho_btn.place(x=550, y=10, width=80)
# ======================= Table Frame ===============================================
tbl_frame = Frame(Detail_frame, bd=3, relief=RIDGE, bg="white")
tbl_frame.place(x=1, y=45, width=1083, height=345)
srol_x = Scrollbar(tbl_frame, orient=HORIZONTAL)
srol_y = Scrollbar(tbl_frame, orient=VERTICAL)
std_tbl = ttk.Treeview(tbl_frame, column=("roll", "name", "branch", "stream", "semester",
"email", "phone"), xscrollcommand=srol_x.set, yscrollcommand=srol_y.set)
srol_x.pack(side=BOTTOM, fill=X)
srol_y.pack(side=RIGHT, fill=Y)
srol_x.config(command=std_tbl.xview)
srol_y.config(command=std_tbl.yview)
root.mainloop()
# ================= Button ========================
tk_Att_btn = Button(Manage_frame2, text="Take Attendance", command= attendance.Attendance, width=14, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
tk_Att_btn.place(x=10, y=20)
up_Pt_btn = Button(Manage_frame2, text="Upload Photo", command=captureWin.UploadPhoto, width=14, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
up_Pt_btn.place(x=10, y=60)
st_Dt_btn = Button(Manage_frame2, text="Student Details", command=student, width=14, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
st_Dt_btn.place(x=10, y=100)
abt_btn = Button(Manage_frame2, text="About", command=About, width=14, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
abt_btn.place(x=10, y=140)
ext_btn = Button(Manage_frame2, text="Exit", command=exit, width=14, font=(
"poppins", 10, 'bold'), fg="#333331", height=1)
ext_btn.place(x=10, y=180)
root.mainloop()
| 39.122807 | 145 | 0.561323 |
537d1a2f169f0ea0bbc0f199b1751b1dcbaa964a | 8,159 | py | Python | docs/conf.py | alixedi/django_popcorn | fbe643d7f0edae723a9ca587d8e336d3be2425e5 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | alixedi/django_popcorn | fbe643d7f0edae723a9ca587d8e336d3be2425e5 | [
"BSD-3-Clause"
] | 1 | 2016-03-13T19:34:35.000Z | 2016-03-13T19:34:35.000Z | docs/conf.py | alixedi/django_popcorn | fbe643d7f0edae723a9ca587d8e336d3be2425e5 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import django_popcorn
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django_popcorn'
copyright = u'2013, Ali Zaidi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = django_popcorn.__version__
# The full version, including alpha/beta/rc tags.
release = django_popcorn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django_popcorndoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django_popcorn.tex', u'django_popcorn Documentation',
u'Ali Zaidi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django_popcorn', u'django_popcorn Documentation',
[u'Ali Zaidi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django_popcorn', u'django_popcorn Documentation',
u'Ali Zaidi', 'django_popcorn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | 32.122047 | 80 | 0.719328 |
b1c44555be6d8c8ab786f1ee0b3f0f7723aeafad | 16,945 | py | Python | msgraph-cli-extensions/beta/teams_beta/azext_teams_beta/vendored_sdks/teams/aio/operations/_teams_schedule_time_cards_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/beta/teams_beta/azext_teams_beta/vendored_sdks/teams/aio/operations/_teams_schedule_time_cards_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/beta/teams_beta/azext_teams_beta/vendored_sdks/teams/aio/operations/_teams_schedule_time_cards_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TeamsScheduleTimeCardsOperations:
"""TeamsScheduleTimeCardsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~teams.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def clock_out(
self,
team_id: str,
time_card_id: str,
body: "models.Paths1Hehk7XTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphClockoutPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> "models.MicrosoftGraphTimeCard":
"""Invoke action clockOut.
Invoke action clockOut.
:param team_id: key: id of team.
:type team_id: str
:param time_card_id: key: id of timeCard.
:type time_card_id: str
:param body: Action parameters.
:type body: ~teams.models.Paths1Hehk7XTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphClockoutPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphTimeCard, or the result of cls(response)
:rtype: ~teams.models.MicrosoftGraphTimeCard
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphTimeCard"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.clock_out.metadata['url'] # type: ignore
path_format_arguments = {
'team-id': self._serialize.url("team_id", team_id, 'str'),
'timeCard-id': self._serialize.url("time_card_id", time_card_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1Hehk7XTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphClockoutPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphTimeCard', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clock_out.metadata = {'url': '/teams/{team-id}/schedule/timeCards/{timeCard-id}/microsoft.graph.clockOut'} # type: ignore
async def confirm(
self,
team_id: str,
time_card_id: str,
**kwargs
) -> "models.MicrosoftGraphTimeCard":
"""Invoke action confirm.
Invoke action confirm.
:param team_id: key: id of team.
:type team_id: str
:param time_card_id: key: id of timeCard.
:type time_card_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphTimeCard, or the result of cls(response)
:rtype: ~teams.models.MicrosoftGraphTimeCard
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphTimeCard"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.confirm.metadata['url'] # type: ignore
path_format_arguments = {
'team-id': self._serialize.url("team_id", team_id, 'str'),
'timeCard-id': self._serialize.url("time_card_id", time_card_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphTimeCard', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
confirm.metadata = {'url': '/teams/{team-id}/schedule/timeCards/{timeCard-id}/microsoft.graph.confirm'} # type: ignore
async def end_break(
self,
team_id: str,
time_card_id: str,
body: "models.Paths2PwcywTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphEndbreakPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> "models.MicrosoftGraphTimeCard":
"""Invoke action endBreak.
Invoke action endBreak.
:param team_id: key: id of team.
:type team_id: str
:param time_card_id: key: id of timeCard.
:type time_card_id: str
:param body: Action parameters.
:type body: ~teams.models.Paths2PwcywTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphEndbreakPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphTimeCard, or the result of cls(response)
:rtype: ~teams.models.MicrosoftGraphTimeCard
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphTimeCard"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.end_break.metadata['url'] # type: ignore
path_format_arguments = {
'team-id': self._serialize.url("team_id", team_id, 'str'),
'timeCard-id': self._serialize.url("time_card_id", time_card_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths2PwcywTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphEndbreakPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphTimeCard', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
end_break.metadata = {'url': '/teams/{team-id}/schedule/timeCards/{timeCard-id}/microsoft.graph.endBreak'} # type: ignore
async def start_break(
self,
team_id: str,
time_card_id: str,
body: "models.Paths1Ckmy1BTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphStartbreakPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> "models.MicrosoftGraphTimeCard":
"""Invoke action startBreak.
Invoke action startBreak.
:param team_id: key: id of team.
:type team_id: str
:param time_card_id: key: id of timeCard.
:type time_card_id: str
:param body: Action parameters.
:type body: ~teams.models.Paths1Ckmy1BTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphStartbreakPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphTimeCard, or the result of cls(response)
:rtype: ~teams.models.MicrosoftGraphTimeCard
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphTimeCard"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.start_break.metadata['url'] # type: ignore
path_format_arguments = {
'team-id': self._serialize.url("team_id", team_id, 'str'),
'timeCard-id': self._serialize.url("time_card_id", time_card_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1Ckmy1BTeamsTeamIdScheduleTimecardsTimecardIdMicrosoftGraphStartbreakPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphTimeCard', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
start_break.metadata = {'url': '/teams/{team-id}/schedule/timeCards/{timeCard-id}/microsoft.graph.startBreak'} # type: ignore
async def clock_in(
self,
team_id: str,
body: "models.Paths1CnzyehTeamsTeamIdScheduleTimecardsMicrosoftGraphClockinPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> "models.MicrosoftGraphTimeCard":
"""Invoke action clockIn.
Invoke action clockIn.
:param team_id: key: id of team.
:type team_id: str
:param body: Action parameters.
:type body: ~teams.models.Paths1CnzyehTeamsTeamIdScheduleTimecardsMicrosoftGraphClockinPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphTimeCard, or the result of cls(response)
:rtype: ~teams.models.MicrosoftGraphTimeCard
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphTimeCard"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.clock_in.metadata['url'] # type: ignore
path_format_arguments = {
'team-id': self._serialize.url("team_id", team_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1CnzyehTeamsTeamIdScheduleTimecardsMicrosoftGraphClockinPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphTimeCard', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clock_in.metadata = {'url': '/teams/{team-id}/schedule/timeCards/microsoft.graph.clockIn'} # type: ignore
| 47.069444 | 170 | 0.683919 |
c360d9571622baff91384539e189b59f51e786e5 | 773 | py | Python | tests/gis_tests/rasterapp/models.py | JBKahn/django | 32265361279b3316f5bce8efa71f2049409461e3 | [
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2018-01-29T14:16:02.000Z | 2019-02-05T21:33:05.000Z | tests/gis_tests/rasterapp/models.py | djk2/django | 6b00af50146335485d8414c42efec7d8dd5397fc | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-05-06T15:32:21.000Z | 2019-05-06T15:32:21.000Z | tests/gis_tests/rasterapp/models.py | djk2/django | 6b00af50146335485d8414c42efec7d8dd5397fc | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2019-12-05T17:24:36.000Z | 2021-11-22T21:21:32.000Z | from django.contrib.gis.db import models
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
class RasterModel(models.Model):
rast = models.RasterField('A Verbose Raster Name', null=True, srid=4326, spatial_index=True, blank=True)
rastprojected = models.RasterField('A Projected Raster Table', srid=3086, null=True)
geom = models.PointField(null=True)
class Meta:
required_db_features = ['supports_raster']
def __str__(self):
return str(self.id)
class RasterRelatedModel(models.Model):
rastermodel = models.ForeignKey(RasterModel, models.CASCADE)
class Meta:
required_db_features = ['supports_raster']
def __str__(self):
return str(self.id)
| 32.208333 | 112 | 0.667529 |
5625371b05d7d39012fd2d61213679faab7413ba | 1,229 | py | Python | common/data/achievements.py | ggoshanov/vas3k.club | cfcb6375823d9f811d22c8a8e018604868d211e1 | [
"MIT"
] | null | null | null | common/data/achievements.py | ggoshanov/vas3k.club | cfcb6375823d9f811d22c8a8e018604868d211e1 | [
"MIT"
] | null | null | null | common/data/achievements.py | ggoshanov/vas3k.club | cfcb6375823d9f811d22c8a8e018604868d211e1 | [
"MIT"
] | null | null | null | ACHIEVEMENTS = [
("old", {
"name": "Олды тут",
"description": "Участник Клуба с момента его основания. "
"Входил в первую тысячу активных пользователей. "
"Видел, как Си-лучи мерцают во тьме близ врат Тангейзера. "
"Штурмовые корабли в огне на подступах к Ориону и вот это всё.",
"image": "/static/images/badges/year-2.png",
"style": "color: #FFF; background-color: #65c3ba;",
}),
("investor", {
"name": "Инвестор",
"description": "Человек, вложивший $1000 и более на развитие Клуба. "
"Учитываются все ежемесячные платежи, внутренние покупки и переводы.",
"image": "",
"style": "",
}),
("contributor", {
"name": "Контрибьютор",
"description": "Коммитил в код Клуба на гитхабе. ",
"image": "",
"style": "",
}),
("moderator", {
"name": "Модератор",
"description": "Активный модератор Клуба. Может редактировать, удалять и банить. "
"Наличие этого бейджика подтверждает, что человек действительно назначен администрацией.",
"image": "",
"style": "",
}),
]
| 38.40625 | 113 | 0.534581 |
09fe24ba3c75fd33acfbde96d46af0ca01859db4 | 4,696 | py | Python | src/bin/hub.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | 1 | 2021-05-09T04:51:28.000Z | 2021-05-09T04:51:28.000Z | src/bin/hub.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | null | null | null | src/bin/hub.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import asyncio, asyncssh, sys
import concurrent.futures
from functools import partial
from collections import OrderedDict
import config, biothings
biothings.config_for_app(config)
import logging
# shut some mouths...
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("boto").setLevel(logging.ERROR)
logging.info("Hub DB backend: %s" % biothings.config.HUB_DB_BACKEND)
logging.info("Hub database: %s" % biothings.config.DATA_HUB_DB_DATABASE)
from biothings.utils.manager import JobManager
loop = asyncio.get_event_loop()
process_queue = concurrent.futures.ProcessPoolExecutor(max_workers=config.HUB_MAX_WORKERS)
thread_queue = concurrent.futures.ThreadPoolExecutor()
loop.set_default_executor(process_queue)
max_mem = type(config.HUB_MAX_MEM_USAGE) == int and config.HUB_MAX_MEM_USAGE * 1024**3 or config.HUB_MAX_MEM_USAGE
job_manager = JobManager(loop,num_workers=config.HUB_MAX_WORKERS,
max_memory_usage=config.HUB_MAX_MEM_USAGE)
import hub.dataload
import biothings.hub.dataload.uploader as uploader
import biothings.hub.dataload.dumper as dumper
import biothings.hub.databuild.builder as builder
import biothings.hub.databuild.differ as differ
import biothings.hub.databuild.syncer as syncer
import biothings.hub.dataindex.indexer as indexer
from hub.databuild.builder import MyChemDataBuilder
from hub.dataindex.indexer import DrugIndexer
# will check every 10 seconds for sources to upload
upload_manager = uploader.UploaderManager(poll_schedule = '* * * * * */10', job_manager=job_manager)
upload_manager.register_sources(hub.dataload.__sources_dict__)
upload_manager.poll()
dump_manager = dumper.DumperManager(job_manager=job_manager)
dump_manager.register_sources(hub.dataload.__sources_dict__)
dump_manager.schedule_all()
build_manager = builder.BuilderManager(builder_class=MyChemDataBuilder,job_manager=job_manager)
build_manager.configure()
differ_manager = differ.DifferManager(job_manager=job_manager)
differ_manager.configure()
syncer_manager = syncer.SyncerManager(job_manager=job_manager)
syncer_manager.configure()
pindexer = partial(DrugIndexer,es_host=config.ES_TEST_HOST)
index_manager = indexer.IndexerManager(pindexer=pindexer,
job_manager=job_manager)
index_manager.configure()
from biothings.utils.hub import schedule, pending, done
COMMANDS = OrderedDict()
# dump commands
COMMANDS["dump"] = dump_manager.dump_src
COMMANDS["dump_all"] = dump_manager.dump_all
# upload commands
COMMANDS["upload"] = upload_manager.upload_src
COMMANDS["upload_all"] = upload_manager.upload_all
# building/merging
COMMANDS["merge"] = partial(build_manager.merge,"drug")
COMMANDS["es_sync_test"] = partial(syncer_manager.sync,"es",target_backend=config.ES_TEST)
COMMANDS["es_sync_prod"] = partial(syncer_manager.sync,"es",target_backend=config.ES_PROD)
COMMANDS["es_test"] = config.ES_TEST
COMMANDS["es_prod"] = config.ES_PROD
# diff
COMMANDS["diff"] = partial(differ_manager.diff,"jsondiff-selfcontained")
COMMANDS["publish_diff"] = partial(differ_manager.publish_diff,config.S3_APP_FOLDER)
COMMANDS["report"] = differ_manager.diff_report
COMMANDS["release_note"] = differ_manager.release_note
# indexing commands
COMMANDS["index"] = index_manager.index
COMMANDS["snapshot"] = index_manager.snapshot
COMMANDS["publish_snapshot"] = partial(index_manager.publish_snapshot,config.S3_APP_FOLDER)
# admin/advanced
EXTRA_NS = {
"dm" : dump_manager,
"um" : upload_manager,
"bm" : build_manager,
"sm" : syncer_manager,
"dim" : differ_manager,
"im" : index_manager,
"jm" : job_manager,
## admin/advanced
#"loop" : loop,
"q" : job_manager.process_queue,
"t" : job_manager.thread_queue,
"g": globals(),
"l":loop,
"j":job_manager,
"sch" : partial(schedule,loop),
"top" : job_manager.top,
"pending" : pending,
"done" : done,
}
passwords = {
'guest': '', # guest account with no password
}
from biothings.utils.hub import start_server
server = start_server(loop,"MyChem.info hub",passwords=passwords,
port=config.SSH_HUB_PORT,commands=COMMANDS,extra_ns=EXTRA_NS)
try:
loop.run_until_complete(server)
except (OSError, asyncssh.Error) as exc:
sys.exit('Error starting server: ' + str(exc))
loop.run_forever()
| 37.269841 | 232 | 0.734668 |
36ae57a64eb5154a2059dce63886f36eabb33e35 | 6,395 | py | Python | ruts/basic_stats.py | SergeyShk/ruTS | 6af376ab606ed539a37f0c87c833ec4820e30fec | [
"MIT"
] | 72 | 2019-12-22T09:45:33.000Z | 2022-03-29T11:39:57.000Z | ruts/basic_stats.py | SergeyShk/ruTS | 6af376ab606ed539a37f0c87c833ec4820e30fec | [
"MIT"
] | 6 | 2020-01-09T13:37:28.000Z | 2022-03-12T01:06:01.000Z | ruts/basic_stats.py | SergeyShk/ruTS | 6af376ab606ed539a37f0c87c833ec4820e30fec | [
"MIT"
] | 16 | 2019-12-29T13:01:37.000Z | 2021-08-30T12:34:37.000Z | from typing import Dict, Union
from collections import Counter
from spacy.tokens import Doc
from .constants import BASIC_STATS_DESC, COMPLEX_SYL_FACTOR, PUNCTUATIONS, RU_LETTERS, SPACES
from .extractors import SentsExtractor, WordsExtractor
from .utils import count_syllables
class BasicStats(object):
"""
Класс для вычисления основных статистик текста
Пример использования:
>>> from ruts import BasicStats
>>> text = "Существуют три вида лжи: ложь, наглая ложь и статистика"
>>> bs = BasicStats(text)
>>> bs.get_stats()
{'c_letters': {1: 1, 3: 2, 4: 3, 6: 1, 10: 2},
'c_syllables': {1: 5, 2: 1, 3: 1, 4: 2},
'n_chars': 55,
'n_complex_words': 2,
'n_letters': 45,
'n_long_words': 3,
'n_monosyllable_words': 5,
'n_polysyllable_words': 4,
'n_punctuations': 2,
'n_sents': 1,
'n_simple_words': 7,
'n_spaces': 8,
'n_syllables': 18,
'n_unique_words': 8,
'n_words': 9}
Аргументы:
source (str|Doc): Источник данных (строка или объект Doc)
sents_extractor (SentsExtractor): Инструмент для извлечения предложений
words_extractor (WordsExtractor): Инструмент для извлечения слов
normalize (bool): Вычислять нормализованные статистики
Атрибуты:
c_letters (dict[int, int]): Распределение слов по количеству букв
c_syllables (dict[int, int]): Распределение слов по количеству слогов
n_sents (int): Количество предложений
n_words (int): Количество слов
n_unique_words (int): Количество уникальных слов
n_long_words (int): Количество длинных слов
n_complex_words (int): Количество сложных слов
n_simple_words (int): Количество простых слов
n_monosyllable_words (int): Количество односложных слов
n_polysyllable_words (int): Количество многосложных слов
n_chars (int): Количество символов
n_letters (int): Количество букв
n_spaces (int): Количество пробелов
n_syllables (int): Количество слогов
n_punctuations (int): Количество знаков препинания
p_unique_words (float): Нормализованное количество уникальных слов
p_long_words (float): Нормализованное количество длинных слов
p_complex_words (float): Нормализованное количество сложных слов
p_simple_words (float): Нормализованное количество простых слов
p_monosyllable_words (float): Нормализованное количество односложных слов
p_polysyllable_words (float): Нормализованное количество многосложных слов
p_letters (float): Нормализованное количество букв
p_spaces (float): Нормализованное количество пробелов
p_punctuations (float): Нормализованное количество знаков препинания
Методы:
get_stats: Получение вычисленных статистик текста
print_stats: Отображение вычисленных статистик текста с описанием на экран
Исключения:
TypeError: Если передаваемое значение не является строкой или объектом Doc
ValueError: Если в источнике данных отсутствуют слова
"""
def __init__(
self,
source: Union[str, Doc],
sents_extractor: SentsExtractor = None,
words_extractor: WordsExtractor = None,
normalize: bool = False,
):
if isinstance(source, Doc):
text = source.text
sents = source.sents
words = tuple(word.text for word in source)
elif isinstance(source, str):
text = source
if not sents_extractor:
sents_extractor = SentsExtractor()
sents = sents_extractor.extract(text)
if not words_extractor:
words_extractor = WordsExtractor()
words = words_extractor.extract(text)
else:
raise TypeError("Некорректный источник данных")
if not words:
raise ValueError("В источнике данных отсутствуют слова")
letters_per_word = tuple(len(word) for word in words)
syllables_per_word = tuple(count_syllables(word) for word in words)
self.c_letters = dict(sorted(Counter(letters_per_word).items()))
self.c_syllables = dict(sorted(Counter(syllables_per_word).items()))
self.n_sents = sum(1 for sent in sents)
self.n_words = len(words)
self.n_unique_words = len({word.lower() for word in words})
self.n_long_words = sum(1 for cpw in letters_per_word if cpw >= 6)
self.n_complex_words = sum(1 for spw in syllables_per_word if spw >= COMPLEX_SYL_FACTOR)
self.n_simple_words = sum(1 for spw in syllables_per_word if COMPLEX_SYL_FACTOR > spw > 0)
self.n_monosyllable_words = self.c_syllables.get(1, 0)
self.n_polysyllable_words = (
self.n_words - self.c_syllables.get(1, 0) - self.c_syllables.get(0, 0)
)
self.n_chars = len(text.replace("\n", ""))
self.n_letters = sum((1 for char in text if char in RU_LETTERS))
self.n_spaces = sum((1 for char in text if char in SPACES))
self.n_syllables = sum(syllables_per_word)
self.n_punctuations = sum((1 for char in text if char in PUNCTUATIONS))
if normalize:
self.p_unique_words = self.n_unique_words / self.n_words
self.p_long_words = self.n_long_words / self.n_words
self.p_complex_words = self.n_complex_words / self.n_words
self.p_simple_words = self.n_simple_words / self.n_words
self.p_monosyllable_words = self.n_monosyllable_words / self.n_words
self.p_polysyllable_words = self.n_polysyllable_words / self.n_words
self.p_letters = self.n_letters / self.n_chars
self.p_spaces = self.n_spaces / self.n_chars
self.p_punctuations = self.n_punctuations / self.n_chars
def get_stats(self) -> Dict[str, int]:
"""
Получение вычисленных статистик текста
Вывод:
dict[str, int]: Справочник вычисленных статистик текста
"""
return vars(self)
def print_stats(self):
"""Отображение вычисленных статистик текста с описанием на экран"""
print(f"{'Статистика':^20}|{'Значение':^10}")
print("-" * 30)
for stat, value in BASIC_STATS_DESC.items():
print(f"{value:20}|{self.get_stats().get(stat):^10}")
| 43.209459 | 98 | 0.65864 |
a2b8739980d159d38f603ef3c71335eb35d60bde | 14,547 | py | Python | mqbench/deploy/deploy_onnx_qnn.py | a1trl9/MQBench | 279c5a9e6e48778fe2913ba708b53ab9c2b0520c | [
"Apache-2.0"
] | 51 | 2021-08-20T01:28:50.000Z | 2021-09-22T05:34:39.000Z | mqbench/deploy/deploy_onnx_qnn.py | a1trl9/MQBench | 279c5a9e6e48778fe2913ba708b53ab9c2b0520c | [
"Apache-2.0"
] | 7 | 2021-08-24T09:46:31.000Z | 2021-09-22T05:43:06.000Z | mqbench/deploy/deploy_onnx_qnn.py | a1trl9/MQBench | 279c5a9e6e48778fe2913ba708b53ab9c2b0520c | [
"Apache-2.0"
] | 6 | 2021-08-20T09:18:48.000Z | 2021-09-19T04:35:13.000Z | import onnx
import numpy as np
from mqbench.utils.logger import logger
from .common import ONNXGraph
FAKE_QUANTIZE_OP = ['FakeQuantizeLearnablePerchannelAffine', 'FixedPerChannelAffine', 'FakeQuantizeDSQPerchannel',
'LearnablePerTensorAffine', 'FixedPerTensorAffine', 'FakeQuantizeDSQPertensor']
class ONNXQNNPass(object):
def __init__(self, onnx_model_path):
self.onnx_model = ONNXGraph(onnx_model_path)
@property
def qlinear_op_type(self):
return ['QuantizeLinear', 'QLinearConv', 'QLinearAdd', 'QLinearGemm', 'QLinearGlobalAveragePool',
'QLinearAveragePool', 'QLinearConcat']
@staticmethod
def attribute_to_kwarg(attribute):
'''
Convert attribute to kwarg format for use with onnx.helper.make_node.
:parameter attribute: attribute in AttributeProto format.
:return: attribute in {key: value} format.
'''
if (attribute.type == 0):
raise ValueError('attribute {} does not have type specified.'.format(attribute.name))
# Based on attribute type definitions from AttributeProto
# definition in https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
if (attribute.type == 1):
value = attribute.f
elif (attribute.type == 2):
value = attribute.i
elif (attribute.type == 3):
value = attribute.s
elif (attribute.type == 4):
value = attribute.t
elif (attribute.type == 5):
value = attribute.g
elif (attribute.type == 6):
value = attribute.floats
elif (attribute.type == 7):
value = attribute.ints
elif (attribute.type == 8):
value = attribute.strings
elif (attribute.type == 9):
value = attribute.tensors
elif (attribute.type == 10):
value = attribute.graphs
else:
raise ValueError('attribute {} has unsupported type {}.'.format(attribute.name, attribute.type))
return {attribute.name: value}
def quantize_weight(self, weight_name, scale_name, zero_point_name):
weight = self.onnx_model.get_initializer(weight_name)
scale = self.onnx_model.get_initializer(scale_name)
zero_point = self.onnx_model.get_initializer(zero_point_name)
return ((weight / scale).round() + zero_point).astype(np.uint8)
def quantize_bias(self, bias, x_scale, w_scale):
x_scale = self.onnx_model.get_initializer(x_scale)
w_scale = self.onnx_model.get_initializer(w_scale)
bias = self.onnx_model.get_initializer(bias)
return (bias / (x_scale * w_scale)).astype(np.int32)
@property
def node_without_qparams(self):
return ['Flatten']
def replace_conv_gemm(self, node, idx, is_conv):
# Input scale
qlinear_conv_inputs = []
input_fake_quant_node = self.onnx_model.get_tensor_producer(node.input[0])
assert input_fake_quant_node.op_type in FAKE_QUANTIZE_OP
x_scale, x_zero_point = input_fake_quant_node.input[1], input_fake_quant_node.input[2]
# Output scale
qlinear_conv_output = node.output
y_scale, y_zero_point = self.get_node_output_qparams(node)
# Weight scale
weight_fake_quant_node = self.onnx_model.get_tensor_producer(node.input[1])
w_scale, w_zero_point = weight_fake_quant_node.input[1], weight_fake_quant_node.input[2]
weight_name = weight_fake_quant_node.input[0]
W = self.quantize_weight(weight_name, w_scale, w_zero_point)
self.onnx_model.set_initializer(weight_name, W)
qlinear_conv_inputs.extend([node.input[0], x_scale, x_zero_point,
weight_name, w_scale, w_zero_point,
y_scale, y_zero_point])
# Bias
if len(node.input) == 3:
bias_name = node.input[2]
B = self.quantize_bias(bias_name, x_scale, w_scale)
self.onnx_model.set_initializer(bias_name, B)
qlinear_conv_inputs.append(bias_name)
kwargs = {}
for attribute in node.attribute:
kwargs.update(ONNXQNNPass.attribute_to_kwarg(attribute))
node_type = "QLinearConv" if is_conv else "QLinearGemm"
qlinear_conv_node = onnx.helper.make_node(node_type,
qlinear_conv_inputs,
qlinear_conv_output,
node.name + '_quantized',
**kwargs)
self.onnx_model.remove_node_purely(node)
self.onnx_model.remove_node_purely(weight_fake_quant_node)
self.onnx_model.insert_node_purely(qlinear_conv_node, idx)
self.onnx_model.topologize_graph()
def replace_add_to_qlinearadd(self, node, idx):
# First input
qlinear_add_input = []
qlinear_add_output = node.output
first_input_node = self.onnx_model.get_tensor_producer(node.input[0])
assert first_input_node.op_type in FAKE_QUANTIZE_OP
first_input_quantized = first_input_node.output[0]
first_scale = first_input_node.input[1]
first_zero_point = first_input_node.input[2]
# Second input
second_input_node = self.onnx_model.get_tensor_producer(node.input[1])
assert second_input_node.op_type in FAKE_QUANTIZE_OP
second_input_quantized = second_input_node.output[0]
second_scale = second_input_node.input[1]
second_zero_point = second_input_node.input[2]
# Output
output_scale, output_zero_point = self.get_node_output_qparams(node)
qlinear_add_input.extend([first_input_quantized, first_scale, first_zero_point,
second_input_quantized, second_scale, second_zero_point,
output_scale, output_zero_point])
kwargs = {}
for attribute in node.attribute:
kwargs.update(ONNXQNNPass.attribute_to_kwarg(attribute))
qlinear_add_node = onnx.helper.make_node("QLinearAdd",
qlinear_add_input,
qlinear_add_output,
node.name + '_quantized',
domain='com.microsoft',
**kwargs)
self.onnx_model.insert_node_purely(qlinear_add_node, idx)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
def replace_pool_to_qlinearpool(self, node, idx, is_global):
qlinear_pool_input = []
prev_node = self.onnx_model.get_tensor_producer(node.input[0])
assert prev_node.op_type in FAKE_QUANTIZE_OP
x_scale, x_zero_point = prev_node.input[1], prev_node.input[2]
y_scale, y_zero_point = self.get_node_output_qparams(node)
qlinear_pool_input.extend([node.input[0], x_scale, x_zero_point,
y_scale, y_zero_point])
kwargs = {}
for attribute in node.attribute:
kwargs.update(ONNXQNNPass.attribute_to_kwarg(attribute))
qlinear_add_output = node.output
node_type = "QLinearGlobalAveragePool" if is_global else "QLinearAveragePool"
qlinear_pool_node = onnx.helper.make_node(node_type,
qlinear_pool_input,
qlinear_add_output,
node.name + '_quantized',
domain='com.microsoft',
**kwargs)
self.onnx_model.insert_node_purely(qlinear_pool_node, idx)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
def get_node_output_qparams(self, node):
fake_quantize_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
while fake_quantize_node.op_type not in FAKE_QUANTIZE_OP:
assert fake_quantize_node.op_type in self.node_without_qparams
fake_quantize_node = self.onnx_model.get_tensor_consumer(fake_quantize_node.output[0])[0]
return fake_quantize_node.input[1], fake_quantize_node.input[2]
def replace_op_pass(self):
# Replace Conv / Gemm / Add / AvgPool / Concat / LeakyRelu.
for idx, node in enumerate(self.onnx_model.graph.node):
if node.op_type == 'Conv':
self.replace_conv_gemm(node, idx, is_conv=True)
if node.op_type == 'Gemm':
pass
# onnxruntime and tvm is not supported yet.
# self.replace_conv_gemm(node, idx, is_conv=False)
if node.op_type == 'Add':
self.replace_add_to_qlinearadd(node, idx)
if node.op_type == 'GlobalAveragePool':
self.replace_pool_to_qlinearpool(node, idx, is_global=True)
if node.op_type == 'AveragePool':
self.replace_pool_to_qlinearpool(node, idx, is_global=False)
# TODO
if node.op_type == 'Concat':
pass
if node.op_type == 'LeakyRelu':
pass
def replace_qlinear_layer_pass(self):
# Replace FakeQuantize
for node in self.onnx_model.graph.node:
if node.op_type in FAKE_QUANTIZE_OP:
prev_node = self.onnx_model.get_tensor_producer(node.input[0])
next_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
if prev_node != 'INPUT_TOKEN' and prev_node.op_type in self.qlinear_op_type and \
next_node != 'OUTPUT_TOKEN' and next_node.op_type in self.qlinear_op_type:
self.onnx_model.remove_node_purely(node)
for _next_node in self.onnx_model.get_tensor_consumer(node.output[0]):
assert _next_node.op_type in self.qlinear_op_type
for idx, _input_name in enumerate(_next_node.input):
if _input_name == node.output[0]:
_next_node.input[idx] = node.input[0]
self.onnx_model.topologize_graph()
elif prev_node != 'INPUT_TOKEN' and prev_node.op_type in self.qlinear_op_type:
dequantize_linear_node = onnx.helper.make_node("DequantizeLinear",
node.input[0:3],
node.output,
node.name + '_dequantized')
self.onnx_model.insert_node_purely(dequantize_linear_node)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
else:
quantize_linear_node = onnx.helper.make_node("QuantizeLinear",
node.input[0:3],
node.output,
node.name + '_quantized')
self.onnx_model.insert_node_purely(quantize_linear_node)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
def merge_relu_pass(self):
for node in self.onnx_model.graph.node:
if node.op_type == 'Relu':
next_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
assert next_node.op_type in FAKE_QUANTIZE_OP
# Input idx2 is zero point.
self.onnx_model.set_initializer(next_node.input[2], np.array([0], dtype=np.uint8), raw=False)
self.onnx_model.remove_node_purely(node)
next_node.input[0] = node.input[0]
if node.op_type == 'Clip':
next_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
assert next_node.op_type in FAKE_QUANTIZE_OP
# Input idx2 is zero point.
scale = self.onnx_model.get_initializer(next_node.input[1])
scale = min(scale, 6.0 / 255)
self.onnx_model.set_initializer(next_node.input[1], np.array([scale], dtype=np.float32), raw=False)
self.onnx_model.set_initializer(next_node.input[2], np.array([0], dtype=np.uint8), raw=False)
self.onnx_model.remove_node_purely(node)
next_node.input[0] = node.input[0]
self.onnx_model.topologize_graph()
def format_qlinear_dtype_pass(self):
for node in self.onnx_model.graph.node:
if node.op_type in FAKE_QUANTIZE_OP:
scale, zero_point, qmin, qmax = node.input[1], node.input[2], node.input[3], node.input[4]
qmin = self.onnx_model.get_constant(qmin)
qmax = self.onnx_model.get_constant(qmax)
assert qmax - qmin == 2 ** 8 - 1, "Only 8 bit quantization support deploy to QNN."
scale_proto = self.onnx_model.initializer[scale][0]
if scale_proto.raw_data != b'' and scale_proto.dims[0] == 1:
scale_data = self.onnx_model.get_initializer(scale)
self.onnx_model.set_initializer(scale, scale_data.astype(np.float32), raw=False)
zero_point_proto = self.onnx_model.initializer[zero_point][0]
zero_point_data = self.onnx_model.get_initializer(zero_point)
# Align sym and asym scheme.
zero_point_data = (zero_point_data - qmin).reshape((1,))
self.onnx_model.set_initializer(zero_point, zero_point_data.astype(np.uint8), raw=False)
def run(self, model_name):
self.format_qlinear_dtype_pass()
self.merge_relu_pass()
self.replace_op_pass()
self.replace_qlinear_layer_pass()
self.onnx_model.optimize_model()
self.onnx_model.set_opset_version('com.microsoft', 1)
try:
onnx.checker.check_model(self.onnx_model.model)
except onnx.checker.ValidationError as e:
logger.critical('The model is invalid: %s' % e)
self.onnx_model.save_onnx_model('{}.onnx'.format(model_name))
| 52.139785 | 115 | 0.599161 |
12c2816996e93c4bc2aa4cb16a98fa51ab3f217f | 1,348 | py | Python | openstack_dashboard/test/unit/test_themes.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | 930 | 2015-01-04T08:06:03.000Z | 2022-03-13T18:47:13.000Z | openstack_dashboard/test/unit/test_themes.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | 26 | 2015-02-23T16:37:31.000Z | 2020-07-02T08:37:41.000Z | openstack_dashboard/test/unit/test_themes.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | 1,040 | 2015-01-01T18:48:28.000Z | 2022-03-19T08:35:18.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.test.utils import override_settings
from openstack_dashboard.templatetags import themes
from openstack_dashboard.test import helpers as test
class SelectableThemeTest(test.TestCase):
def test_selectable_theme_defaults(self):
selectable = settings.SELECTABLE_THEMES
available = settings.AVAILABLE_THEMES
# NOTE(e0ne): veryfy that by default 'selectable' are the same as
# 'available' list
self.assertEqual(selectable, available)
@override_settings(SELECTABLE_THEMES=[
('default', 'Default', 'themes/default'),
])
def test_selectable_override(self):
selectable = themes.themes()
available = themes.settings.AVAILABLE_THEMES
self.assertNotEqual(selectable, available)
| 38.514286 | 75 | 0.746291 |
baabc7da85158a96ed765e4e329451ce818145cd | 5,551 | py | Python | objectModel/Python/cdm/storage/remote.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/cdm/storage/remote.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | 3 | 2021-05-11T23:57:12.000Z | 2021-08-04T05:03:05.000Z | objectModel/Python/cdm/storage/remote.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.
# All rights reserved.
# ----------------------------------------------------------------------
import datetime
import json
from typing import Dict, List, Optional
import uuid
from cdm.storage.network import NetworkAdapter
from cdm.utilities.network.cdm_http_client import CdmHttpClient
from .base import StorageAdapterBase
class RemoteAdapter(NetworkAdapter, StorageAdapterBase):
"""Remote file system storage adapter"""
def __init__(self, hosts: Optional[Dict[str, str]] = None) -> None:
super().__init__()
self.location_hint = None # type: Optional[str]
# --- internal ---
self._hosts = {} # type: Dict[str, str]
self._sources = {} # type: Dict[str, str]
self._sources_by_id = {} # type: Dict[str, Dict[str, str]]
self._type = 'remote'
self._http_client = CdmHttpClient() # type: CdmHttpClient
if hosts:
self.hosts = hosts
@property
def hosts(self) -> Dict[str, str]:
return self._hosts
@hosts.setter
def hosts(self, hosts: Dict[str, str]) -> None:
self._hosts = hosts
for key, value in hosts.items():
self._fetch_or_register_host_info(value, key)
def can_read(self) -> bool:
return True
def can_write(self) -> bool:
return False
async def read_async(self, corpus_path: str) -> str:
url = self.create_adapter_path(corpus_path)
request = self._set_up_cdm_request(url, {'User-Agent': 'CDM'}, 'GET')
return await super()._read(request)
async def write_async(self, corpus_path: str, data: str) -> None:
raise NotImplementedError()
def create_adapter_path(self, corpus_path: str) -> str:
if not corpus_path:
return None
host_key_end = corpus_path.find('/', 1)
host_key = corpus_path[1:host_key_end]
if host_key_end == -1 or host_key not in self._sources_by_id:
raise ValueError('Host ID not identified by remote adapter. Make sure to use create_corpus_path to get the corpus path.')
protocol = self._sources_by_id[host_key]['protocol']
host = self._sources_by_id[host_key]['host']
path = corpus_path[host_key_end:]
return protocol + '://' + host + path
def create_corpus_path(self, adapter_path: str) -> Optional[str]:
protocol_index = adapter_path.find('://')
if protocol_index == -1:
return None
path_index = adapter_path.find('/', protocol_index + 3)
path = adapter_path[path_index:] if path_index != -1 else ''
host_info = self._fetch_or_register_host_info(adapter_path)
return '/{}{}'.format(host_info['key'], path)
def clear_cache(self) -> None:
self._sources = {}
self._sources_by_id = {}
async def compute_last_modified_time_async(self, adapter_path: str) -> Optional[datetime.datetime]:
return datetime.datetime.now()
async def fetch_all_files_async(self, folder_corpus_path: str) -> List[str]:
# TODO: implement
return None
def fetch_config(self) -> str:
result_config = {'type': self._type}
config_object = {}
# Go through the hosts dictionary and build a dictionary for each item.
hosts_array = [{key: value} for key, value in self.hosts.items()]
config_object['hosts'] = hosts_array
# Try constructing network configs.
config_object.update(self.fetch_network_config())
if self.location_hint:
config_object['locationHint'] = self.location_hint
result_config['config'] = config_object
return json.dumps(result_config)
def update_config(self, config: str) -> None:
if not config:
raise Exception('Remote adapter needs a config.')
self.update_network_config(config)
config_json = json.loads(config)
if config_json.get('locationHint'):
self.location_hint = config_json['locationHint']
hosts = config_json['hosts']
# Create a temporary dictionary.
hosts_dict = {}
# Iterate through all of the items in the hosts array.
for host in hosts:
# Get the property's key and value and save it to the dictionary.
for key, value in host.items():
hosts_dict[key] = value
# Assign the temporary dictionary to the hosts dictionary.
self.hosts = hosts_dict
def _fetch_or_register_host_info(self, adapter_path: str, key: Optional[str] = None) -> Dict[str, str]:
protocol_index = adapter_path.find('://')
if protocol_index == -1:
return None
path_index = adapter_path.find('/', protocol_index + 3)
host_index = path_index if path_index != -1 else len(adapter_path)
protocol = adapter_path[0: protocol_index]
host = adapter_path[protocol_index + 3: host_index]
full_host = adapter_path[0: host_index]
if not self._sources.get(full_host) or (key is not None and self._sources[full_host] != key):
guid = key if key else str(uuid.uuid4())
self._sources[full_host] = guid
self._sources_by_id[guid] = {
'protocol': protocol,
'host': host
}
return {
'key': self._sources[full_host],
'protocol': protocol,
'host': host
}
| 32.461988 | 133 | 0.61016 |
751bd8b2decc6f89023a92c52110591541f1da1a | 18,149 | py | Python | cipher/JPEGImageCipher.py | JaySon-Huang/SecertPhotos | e741cc26c19a5b249d45cc70959ac6817196cb8a | [
"MIT"
] | null | null | null | cipher/JPEGImageCipher.py | JaySon-Huang/SecertPhotos | e741cc26c19a5b249d45cc70959ac6817196cb8a | [
"MIT"
] | 3 | 2015-05-19T08:43:46.000Z | 2015-06-10T17:55:28.000Z | cipher/JPEGImageCipher.py | JaySon-Huang/SecertPhotos | e741cc26c19a5b249d45cc70959ac6817196cb8a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#encoding=utf-8
'''
定义一个类似`接口`的存在
'''
import math
from pyjpegtbx.constants import DCTSIZE2
from .utils import exgcd, multiplicative_inver, pow_mod
class Base_JPEGImageCipher(object):
def encrypt(self, image):
'''返回一个加密之后的JPEGImage对象
'''
raise NotImplementedError
def decrypt(self, image):
'''返回一个解密之后的JPEGImage对象
'''
raise NotImplementedError
def encrtptAndEmbData(self, image, data=b'Attack at dawn!'):
'''返回一个嵌入信息并加密之后的JPEGImage对象
'''
raise NotImplementedError
def embData(self, image, data):
'''返回一个嵌入信息之后的JPEGImage对象
'''
raise NotImplementedError
def extractData(self, image):
'''返回提取到的信息
'''
raise NotImplementedError
def decryptAndExtractData(self, image):
'''返回解密后的图像以及提取到的信息
'''
raise NotImplementedError
class FixedLogisticShuffeler(object):
def __init__(self, seed):
'''x0: (0, 1) without 0.5'''
self.seed = seed
def next(self):
self.seed = 4 * self.seed * (1-self.seed)
return 2 / math.pi * math.asin(math.sqrt(self.seed))
def shuffle(self, lst):
k = len(lst)-1
while k > 0:
ind = int(k*self.next())
tmp = lst[k]
lst[k] = lst[ind]
lst[ind] = tmp
k -= 1
class JPEGImageCipher0(object):
'''
广义Arnold变换方法, 默认为典型Arnold变换(猫映射)
default :(a, b) (1, 1)
(c, d) = (1, 2)
'''
MAX_NBITS_MESSAGE_LENGTH = 16
MAX_MESSAGE_LENGTH = (1 << MAX_NBITS_MESSAGE_LENGTH)-1
def __init__(self, seed=0.362, abcd=(1, 1, 1, 2), sqrtN=8):
'''
abcd: 4-int-tuple
sqrtN: sqrt of N
'''
super().__init__()
self.a, self.b, self.c, self.d = abcd
self.sqrtN = sqrtN
gcd, _, _ = exgcd(self.a*self.d - self.b*self.c, sqrtN*sqrtN)
if gcd != 1:
raise ValueError("Must satisfy gcd(ad-bc, N)=1")
self.shuffler = FixedLogisticShuffeler(seed)
def encrypt(self, image):
'''返回一个加密之后的JPEGImage对象
'''
ec_image = image.copy()
for com in range(3):
for i, block in enumerate(ec_image.data[com]):
block = self.scrambledBlock(block)
ec_image.data[com][i] = block
ec_image.data[com] = self.shuffledComponemt(ec_image.data[com])
return ec_image
def scrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = (self.a * x + self.b * y) % self.sqrtN
yy = (self.c * x + self.d * y) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def shuffledComponemt(self, comp):
length = len(comp)
ptrlst = [_ for _ in range(length)]
self.shuffler.shuffle(ptrlst)
ncomp = [None] * length
for i, block in enumerate(comp):
ncomp[i] = comp[ptrlst[i]]
return ncomp
def decrypt(self, image):
'''返回一个解密之后的JPEGImage对象
'''
dc_image = image.copy()
for com in range(3):
for i, block in enumerate(dc_image.data[com]):
block = self.unscrambledBlock(block)
dc_image.data[com][i] = block
dc_image.data[com] = self.unshuffledComponemt(dc_image.data[com])
return dc_image
def unscrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
inver = multiplicative_inver(
self.a*self.d-self.b*self.c, self.sqrtN
)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = inver*(self.d * x - self.b * y) % self.sqrtN
yy = inver*(-self.c * x + self.a * y) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def unshuffledComponemt(self, comp):
length = len(comp)
ptrlst = [_ for _ in range(length)]
self.shuffler.shuffle(ptrlst)
ncomp = [None] * length
for i, block in enumerate(comp):
ncomp[ptrlst[i]] = comp[i]
return ncomp
def encrtptAndEmbData(self, image, data=b'Attack at dawn!'):
image = self.encrypt(image)
self.embData(image, data)
return image
def embData(self, image, data):
length = len(data) * 8
# 使用 MAX_NBITS_MESSAGE_LENGTH 个bit来存储长度信息
# 最多可以存储 MAX_MESSAGE_LENGTH 长度的信息
assert length < self.MAX_MESSAGE_LENGTH, \
"嵌入数据量太大: %d(MAX: %d)" % (length, self.MAX_MESSAGE_LENGTH)
# _id = image.comp_infos[0]['component_id']
# _index = image.comp_infos[0]['component_index']
# hist0 = ColorSpaceHistorgram(_id, image.data[_index])
# print('embData before', hist0.at(1))
pos_infos = self.__shiftData(0, image, length)
# _id = image.comp_infos[0]['component_id']
# _index = image.comp_infos[0]['component_index']
# hist1 = ColorSpaceHistorgram(_id, image.data[_index])
bs = BitInputStream(data)
pos_infos_index = 0
for i in range(self.MAX_NBITS_MESSAGE_LENGTH):
bit = 1 if (length & (0x01 << i)) > 0 else 0
_index, coef_index, val_index, ori = pos_infos[pos_infos_index]
if bit == 0:
image.data[_index][coef_index][val_index] = ori - 1
elif bit == 1:
image.data[_index][coef_index][val_index] = ori + 1
pos_infos_index += 1
for bit in bs.read():
_index, coef_index, val_index, ori = pos_infos[pos_infos_index]
if bit == 0:
image.data[_index][coef_index][val_index] = ori-1
elif bit == 1:
image.data[_index][coef_index][val_index] = ori+1
pos_infos_index += 1
# _id = image.comp_infos[0]['component_id']
# _index = image.comp_infos[0]['component_index']
# hist2 = ColorSpaceHistorgram(_id, image.data[_index])
# print('embData after ', hist2.at(1))
def __shiftData(self, cindex, image, need):
_id = image.comp_infos[cindex]['component_id']
_index = image.comp_infos[cindex]['component_index']
hist = ColorSpaceHistorgram(_id, image.data[_index])
pos_infos = []
nalloc = 0
for val_index in range(1, DCTSIZE2):
for coef_index, coef_block in enumerate(image.data[_index]):
topVal, topNum = hist.top(val_index)
# TODO: 没有处理使用多个slot嵌入数据的功能, 导致可嵌入数据量较少
# 50% 也有待商榷?
assert int(topNum*0.5) > need, \
"嵌入数据量太大: %d(MAX: %d)" % (need, )
val = coef_block[val_index]
if val < topVal:
coef_block[val_index] -= 1
elif val > topVal:
coef_block[val_index] += 1
else: # 峰值位置, 记录可平移位置信息
pos_infos.append((_index, coef_index, val_index, topVal))
nalloc += 1
return pos_infos
def extractData(self, image):
_id = image.comp_infos[0]['component_id']
_index = image.comp_infos[0]['component_index']
hist = ColorSpaceHistorgram(_id, image.data[_index])
bout = BitOutputStream()
isGettingMsg = False
try:
for val_index in range(1, DCTSIZE2):
for coef_index, coef_block in enumerate(image.data[_index]):
topVal, _ = hist.top(val_index)
val = coef_block[val_index]
if val == topVal - 1:
bout.write(0)
elif val == topVal + 1:
bout.write(1)
if not isGettingMsg:
if len(bout) == 16:
# 前MAX_NBITS_MESSAGE_LENGTH bit存储嵌入了多长的数据
emb_message_length = bout.getInt(
nbit=self.MAX_NBITS_MESSAGE_LENGTH
)
isGettingMsg = True
elif len(bout) == emb_message_length:
# 已经获取全部嵌入数据
raise Exception
except Exception:
pass
msg = bytearray(bout._bytes)
return msg
def clearData(self, image):
_id = image.comp_infos[0]['component_id']
_index = image.comp_infos[0]['component_index']
hist = ColorSpaceHistorgram(_id, image.data[_index])
# print('clearData before', hist.at(1))
hasGetLength = False
bout = BitOutputStream()
for val_index in range(1, DCTSIZE2):
for coef_block in image.data[_index]:
topVal, _ = hist.top(val_index)
val = coef_block[val_index]
if val == topVal - 1:
bout.write(0)
elif val == topVal + 1:
bout.write(1)
if val < topVal:
coef_block[val_index] += 1
elif val > topVal:
coef_block[val_index] -= 1
if not hasGetLength:
if len(bout) == 16: # 前16bit用来存储嵌入了多长的数据
emb_message_length = bout.getInt(
nbit=self.MAX_NBITS_MESSAGE_LENGTH
)
hasGetLength = True
# TODO: 没有处理使用多个slot嵌入数据的功能
# if hasGetLength and emb_message_length < :
break
# hist1 = ColorSpaceHistorgram(_id, image.data[_index])
# print('clearData after ', hist1.at(1))
def decryptAndExtractData(self, image):
bdata = self.extractData(image)
self.clearData(image)
image = self.decrypt(image)
return image, bdata
class JPEGImageCipher1(JPEGImageCipher0):
def __init__(self, seed=0.362):
super().__init__(seed)
self.k = 24
self.p = 2
self.r = 300
def f(self, x):
return self.k * (x ** self.p) + self.r
def scrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = (self.a * x + self.b * y) % self.sqrtN
yy = (self.c * x + self.d * y + self.f(xx)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def unscrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
inver = int(multiplicative_inver(self.a*self.d-self.b*self.c, self.sqrtN))
for x in range(self.sqrtN):
fx = self.f(x)
for y in range(self.sqrtN):
xx = inver*(self.d * x - self.b * (y - fx)) % self.sqrtN
yy = inver*(-self.c * x + self.a * (y - fx)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
class JPEGImageCipher2(JPEGImageCipher1):
def f(self, x, mod):
# return (self.k * pow_mod(x, self.p, mod) + self.r) % mod
return (self.k * ((x**self.p) % mod) + self.r) % mod
def scrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = (self.a * x + self.b * y) % self.sqrtN
yy = (self.c * x + self.d * y + self.f(xx, self.sqrtN)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def unscrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
inver = int(multiplicative_inver(self.a*self.d-self.b*self.c, self.sqrtN))
for x in range(self.sqrtN):
fx = self.f(x, self.sqrtN)
for y in range(self.sqrtN):
xx = inver*(self.d * x - self.b * (y - fx)) % self.sqrtN
yy = inver*(-self.c * x + self.a * (y - fx)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
class ColorSpaceHistorgram(object):
def __init__(self, cid, component_datas):
self.slots = [None] * DCTSIZE2
for coef_block in component_datas:
for i, val in enumerate(coef_block):
if not self.slots[i]:
self.slots[i] = {}
self.slots[i][val] = self.slots[i].get(val, 0) + 1
def min_max(self, slot_index=None):
'''slot_index=None:返回所有slot的min、max值
如果指定slot_index, 则返回该slot的min、max值
'''
try:
_ = self._min_max_vals
except AttributeError:
ret = []
for slot in self.slots:
keys = slot.keys()
ret.append((min(keys), max(keys)))
self._min_max_vals = tuple(ret)
finally:
if slot_index is None:
return self._min_max_vals
else:
return self._min_max_vals[slot_index]
def top(self, slot_index=None):
try:
_ = self._topVals
except AttributeError:
ret = []
for slot in self.slots:
items = sorted(slot.items(), key=lambda pair: pair[1])
ret.append(items[-1])
self._topVals = tuple(ret)
finally:
if slot_index is None:
return self._topVals
else:
return self._topVals[slot_index]
def at(self, slot_index):
return sorted(self.slots[slot_index].items())
def __str__(self):
lst = []
for i, slot in enumerate(self.slots):
keys = slot.keys()
maxVal, minVal = max(keys), min(keys)
items = sorted(slot.items(), key=lambda pair: pair[1])
topPos, topVal = items[-1]
lst.append(
'''`%2d`: { [top @%2d:%5d] range: %4d ~ %4d }'''
% (i, topPos, topVal, minVal, maxVal)
)
return '\n'.join(lst)
def __repr__(self):
return self.__str__()
class BitInputStream(object):
def __init__(self, _bytes):
self._bytes = _bytes
def read(self):
for ch in self._bytes:
for i in range(8):
yield 1 if (ch & (0x1 << i) > 0) else 0
def __len__(self):
return len(self._bytes) * 8
class BitOutputStream(object):
def __init__(self):
self._bytes = []
self._curByte = 0
self._curShift = 0
def write(self, bit):
self._curByte |= (bit << self._curShift)
self._curShift += 1
if self._curShift == 8:
self._bytes.append(self._curByte)
self._curByte = 0
self._curShift = 0
def hexdump(self):
return ''.join(map(lambda x: '%02x' % x, self._bytes))
def __len__(self):
return len(self._bytes) * 8 + self._curShift
def getInt(self, nbit=32):
ret = 0
for byte in reversed(self._bytes[:nbit//8]):
ret <<= 8
ret += byte
self._bytes = self._bytes[nbit//8:]
return ret
def encdec(img, cls):
cipher = cls()
encImg = cipher.encrypt(img)
# cipher = cls()
# decImg = cipher.decrypt(encImg)
def main():
## 图片加密解密的基本case
# from pyjpegtbx import JPEGImage
# img = JPEGImage.open('../sos.jpg')
# cipher = JPEGImageCipher2()
# encImg = cipher.encrypt(img)
# encImg.save('lfs_enc.jpg')
# cipher = JPEGImageCipher2()
# decImg = cipher.decrypt(encImg)
# decImg.save('lfs_dec.jpg')
# rg = FixedLogisticShuffeler(0.500001)
## 混沌序列的结果
# for _ in range(100):
# print(rg.next())
## 利用混沌序列进行置乱和恢复
# length = 100
# target = [_ for _ in range(length)]
# enc = [0] * length
# dec = [0] * length
# ptrlst = [_ for _ in range(length)]
# print('ori', target)
# rg.shuffle(ptrlst)
# print('ptr', ptrlst)
# for x in range(length):
# enc[x] = target[ptrlst[x]]
# print('enc', enc)
# for x in range(length):
# dec[ptrlst[x]] = enc[x]
# print('dec', dec)
## 三种图像加密方式的时间对比
# import time
# from pyjpegtbx import JPEGImage
# img = JPEGImage('sos.jpg')
# clses = [JPEGImageCipher0, JPEGImageCipher1, JPEGImageCipher2]
# for cls in clses:
# beg = time.time()
# encdec(img, cls)
# end = time.time()
# print("Time for %s:%f" % (cls, end - beg))
## 快速幂和`**`运算的时间对比
# import time
# run_round = 100000
# for i in range(2):
# beg = time.time()
# if i == 0:
# for x in range(run_round):
# p = (x**20) % 1007
# print(p)
# elif i == 1:
# for x in range(run_round):
# p = pow_mod(x, 20, 1007)
# print(p)
# end = time.time()
# print("Time :%f" % (end - beg))
## 直方图功能函数测试
# from pyjpegtbx import JPEGImage
# img = JPEGImage.open('../lfs.jpg')
# historgrams = []
# for comp_info in img.comp_infos:
# _id = comp_info['component_id']
# _index = comp_info['component_index']
# historgrams.append(
# ColorSpaceHistorgram(
# _id, img.data[_index]
# )
# )
# import IPython
# IPython.embed()
# print(historgrams[0].top())
# print(historgrams[0].min_max())
# print(str(historgrams[0]))
# print(str(historgrams[0].at(0)))
## 位流的测试
# bs = BitInputStream(b'\xff\x01\x30')
# for i, bit in enumerate(bs.read()):
# print(bit, end='')
# if i % 8 == 7:
# print()
## 图像隐写部分
from pyjpegtbx import JPEGImage
img = JPEGImage.open('../sos.jpg')
cipher = JPEGImageCipher0()
encImg = cipher.encrtptAndEmbData(img, '冰菓如茶'.encode('utf-8'))
encImg.save('lfs_enc.jpg')
cipher = JPEGImageCipher0()
decImg, data = cipher.decryptAndExtractData(encImg)
decImg.save('lfs_dec.jpg')
print(data.decode('utf-8'))
if __name__ == '__main__':
main()
| 32.878623 | 84 | 0.53193 |
be3a96e89314ba50b8a40027faac834b4eadddf3 | 1,330 | py | Python | tutorials/warp_affine.py | shaunster0/kornia | 71acf455ee36f2050b7be5ea993b6db773f502eb | [
"ECL-2.0",
"Apache-2.0"
] | 51 | 2019-10-11T18:47:30.000Z | 2021-05-03T06:42:37.000Z | tutorials/warp_affine.py | shaunster0/kornia | 71acf455ee36f2050b7be5ea993b6db773f502eb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tutorials/warp_affine.py | shaunster0/kornia | 71acf455ee36f2050b7be5ea993b6db773f502eb | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2019-10-11T19:59:05.000Z | 2020-07-10T02:28:52.000Z | """
Rotate image using warp affine transform
========================================
"""
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import kornia
# read the image with OpenCV
img: np.ndarray = cv2.imread('./data/bennett_aden.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# convert to torch tensor
data: torch.tensor = kornia.image_to_tensor(img, keepdim=False) # BxCxHxW
# create transformation (rotation)
alpha: float = 45.0 # in degrees
angle: torch.tensor = torch.ones(1) * alpha
# define the rotation center
center: torch.tensor = torch.ones(1, 2)
center[..., 0] = data.shape[3] / 2 # x
center[..., 1] = data.shape[2] / 2 # y
# define the scale factor
scale: torch.tensor = torch.ones(1, 2)
# compute the transformation matrix
M: torch.tensor = kornia.get_rotation_matrix2d(center, angle, scale)
# apply the transformation to original image
_, _, h, w = data.shape
data_warped: torch.tensor = kornia.warp_affine(data.float(), M, dsize=(h, w))
# convert back to numpy
img_warped: np.ndarray = kornia.tensor_to_image(data_warped.byte()[0])
# create the plot
fig, axs = plt.subplots(1, 2, figsize=(16, 10))
axs = axs.ravel()
axs[0].axis('off')
axs[0].set_title('image source')
axs[0].imshow(img)
axs[1].axis('off')
axs[1].set_title('image warped')
axs[1].imshow(img_warped)
| 24.181818 | 77 | 0.692481 |
791f01ba40835e5cfd107af8e2ec492cccffcf27 | 25,934 | py | Python | src/zenml/steps/base_step.py | SKRohit/zenml | ba84f56d8fc6043cdcb59832ab47bc8261e25d12 | [
"Apache-2.0"
] | null | null | null | src/zenml/steps/base_step.py | SKRohit/zenml | ba84f56d8fc6043cdcb59832ab47bc8261e25d12 | [
"Apache-2.0"
] | null | null | null | src/zenml/steps/base_step.py | SKRohit/zenml | ba84f56d8fc6043cdcb59832ab47bc8261e25d12 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import hashlib
import inspect
import json
import random
from abc import abstractmethod
from typing import (
Any,
ClassVar,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from tfx.types.channel import Channel
from zenml.artifacts.base_artifact import BaseArtifact
from zenml.exceptions import MissingStepParameterError, StepInterfaceError
from zenml.logger import get_logger
from zenml.materializers.base_materializer import BaseMaterializer
from zenml.materializers.default_materializer_registry import (
default_materializer_registry,
)
from zenml.steps.base_step_config import BaseStepConfig
from zenml.steps.step_context import StepContext
from zenml.steps.step_output import Output
from zenml.steps.utils import (
INSTANCE_CONFIGURATION,
INTERNAL_EXECUTION_PARAMETER_PREFIX,
PARAM_ENABLE_CACHE,
PARAM_PIPELINE_PARAMETER_NAME,
SINGLE_RETURN_OUT_NAME,
STEP_INNER_FUNC_NAME,
_ZenMLSimpleComponent,
generate_component_class,
)
logger = get_logger(__name__)
class BaseStepMeta(type):
"""Metaclass for `BaseStep`.
Checks whether everything passed in:
* Has a matching materializer.
* Is a subclass of the Config class
"""
def __new__(
mcs, name: str, bases: Tuple[Type[Any], ...], dct: Dict[str, Any]
) -> "BaseStepMeta":
"""Set up a new class with a qualified spec."""
dct.setdefault("PARAM_SPEC", {})
dct.setdefault("INPUT_SPEC", {})
dct.setdefault("OUTPUT_SPEC", {})
cls = cast(Type["BaseStep"], super().__new__(mcs, name, bases, dct))
cls.INPUT_SIGNATURE = {}
cls.OUTPUT_SIGNATURE = {}
cls.CONFIG_PARAMETER_NAME = None
cls.CONFIG_CLASS = None
cls.CONTEXT_PARAMETER_NAME = None
# Get the signature of the step function
step_function_signature = inspect.getfullargspec(
getattr(cls, STEP_INNER_FUNC_NAME)
)
if bases:
# We're not creating the abstract `BaseStep` class
# but a concrete implementation. Make sure the step function
# signature does not contain variable *args or **kwargs
variable_arguments = None
if step_function_signature.varargs:
variable_arguments = f"*{step_function_signature.varargs}"
elif step_function_signature.varkw:
variable_arguments = f"**{step_function_signature.varkw}"
if variable_arguments:
raise StepInterfaceError(
f"Unable to create step '{name}' with variable arguments "
f"'{variable_arguments}'. Please make sure your step "
f"functions are defined with a fixed amount of arguments."
)
step_function_args = (
step_function_signature.args + step_function_signature.kwonlyargs
)
# Remove 'self' from the signature if it exists
if step_function_args and step_function_args[0] == "self":
step_function_args.pop(0)
# Verify the input arguments of the step function
for arg in step_function_args:
arg_type = step_function_signature.annotations.get(arg, None)
if not arg_type:
raise StepInterfaceError(
f"Missing type annotation for argument '{arg}' when "
f"trying to create step '{name}'. Please make sure to "
f"include type annotations for all your step inputs "
f"and outputs."
)
if issubclass(arg_type, BaseStepConfig):
# Raise an error if we already found a config in the signature
if cls.CONFIG_CLASS is not None:
raise StepInterfaceError(
f"Found multiple configuration arguments "
f"('{cls.CONFIG_PARAMETER_NAME}' and '{arg}') when "
f"trying to create step '{name}'. Please make sure to "
f"only have one `BaseStepConfig` subclass as input "
f"argument for a step."
)
cls.CONFIG_PARAMETER_NAME = arg
cls.CONFIG_CLASS = arg_type
elif issubclass(arg_type, StepContext):
if cls.CONTEXT_PARAMETER_NAME is not None:
raise StepInterfaceError(
f"Found multiple context arguments "
f"('{cls.CONTEXT_PARAMETER_NAME}' and '{arg}') when "
f"trying to create step '{name}'. Please make sure to "
f"only have one `StepContext` as input "
f"argument for a step."
)
cls.CONTEXT_PARAMETER_NAME = arg
else:
# Can't do any check for existing materializers right now
# as they might get be defined later, so we simply store the
# argument name and type for later use.
cls.INPUT_SIGNATURE.update({arg: arg_type})
# Parse the returns of the step function
return_type = step_function_signature.annotations.get("return", None)
if return_type is not None:
if isinstance(return_type, Output):
cls.OUTPUT_SIGNATURE = dict(return_type.items())
else:
cls.OUTPUT_SIGNATURE[SINGLE_RETURN_OUT_NAME] = return_type
# Raise an exception if input and output names of a step overlap as
# tfx requires them to be unique
# TODO [ENG-155]: Can we prefix inputs and outputs to avoid this
# restriction?
shared_input_output_keys = set(cls.INPUT_SIGNATURE).intersection(
set(cls.OUTPUT_SIGNATURE)
)
if shared_input_output_keys:
raise StepInterfaceError(
f"There is an overlap in the input and output names of "
f"step '{name}': {shared_input_output_keys}. Please make "
f"sure that your input and output names are distinct."
)
return cls
T = TypeVar("T", bound="BaseStep")
class BaseStep(metaclass=BaseStepMeta):
"""Abstract base class for all ZenML steps.
Attributes:
step_name: The name of this step.
pipeline_parameter_name: The name of the pipeline parameter for which
this step was passed as an argument.
enable_cache: A boolean indicating if caching is enabled for this step.
requires_context: A boolean indicating if this step requires a
`StepContext` object during execution.
"""
# TODO [ENG-156]: Ensure these are ordered
INPUT_SIGNATURE: ClassVar[Dict[str, Type[Any]]] = None # type: ignore[assignment] # noqa
OUTPUT_SIGNATURE: ClassVar[Dict[str, Type[Any]]] = None # type: ignore[assignment] # noqa
CONFIG_PARAMETER_NAME: ClassVar[Optional[str]] = None
CONFIG_CLASS: ClassVar[Optional[Type[BaseStepConfig]]] = None
CONTEXT_PARAMETER_NAME: ClassVar[Optional[str]] = None
PARAM_SPEC: Dict[str, Any] = {}
INPUT_SPEC: Dict[str, Type[BaseArtifact]] = {}
OUTPUT_SPEC: Dict[str, Type[BaseArtifact]] = {}
INSTANCE_CONFIGURATION: Dict[str, Any] = {}
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.step_name = self.__class__.__name__
self.pipeline_parameter_name: Optional[str] = None
kwargs.update(getattr(self, INSTANCE_CONFIGURATION))
self.enable_cache = kwargs.pop(PARAM_ENABLE_CACHE, True)
self.requires_context = bool(self.CONTEXT_PARAMETER_NAME)
self._explicit_materializers: Dict[str, Type[BaseMaterializer]] = {}
self._component: Optional[_ZenMLSimpleComponent] = None
self._verify_arguments(*args, **kwargs)
@abstractmethod
def entrypoint(self, *args: Any, **kwargs: Any) -> Any:
"""Abstract method for core step logic."""
def get_materializers(
self, ensure_complete: bool = False
) -> Dict[str, Type[BaseMaterializer]]:
"""Returns available materializers for the outputs of this step.
Args:
ensure_complete: If set to `True`, this method will raise a
`StepInterfaceError` if no materializer can be found for an
output.
Returns:
A dictionary mapping output names to `BaseMaterializer` subclasses.
If no explicit materializer was set using
`step.with_return_materializers(...)`, this checks the
default materializer registry to find a materializer for the
type of the output. If no materializer is registered, the
output of this method will not contain an entry for this output.
Raises:
StepInterfaceError: (Only if `ensure_complete` is set to `True`)
If an output does not have an explicit materializer assigned
to it and we there is no default materializer registered for
the output type.
"""
materializers = self._explicit_materializers
for output_name, output_type in self.OUTPUT_SIGNATURE.items():
if output_name in materializers:
# Materializer for this output was set explicitly
pass
elif default_materializer_registry.is_registered(output_type):
materializer = default_materializer_registry[output_type]
materializers[output_name] = materializer
else:
if ensure_complete:
raise StepInterfaceError(
f"Unable to find materializer for output "
f"'{output_name}' of type `{output_type}` in step "
f"'{self.step_name}'. Please make sure to either "
f"explicitly set a materializer for step outputs "
f"using `step.with_return_materializers(...)` or "
f"registering a default materializer for specific "
f"types by subclassing `BaseMaterializer` and setting "
f"its `ASSOCIATED_TYPES` class variable."
)
return materializers
@property
def _internal_execution_parameters(self) -> Dict[str, Any]:
"""ZenML internal execution parameters for this step."""
parameters = {
PARAM_PIPELINE_PARAMETER_NAME: self.pipeline_parameter_name
}
if self.enable_cache:
# Caching is enabled so we compute a hash of the step function code
# and materializers to catch changes in the step behavior
def _get_hashed_source(value: Any) -> str:
"""Returns a hash of the objects source code."""
source_code = inspect.getsource(value)
return hashlib.sha256(source_code.encode("utf-8")).hexdigest()
source_fn = getattr(self, STEP_INNER_FUNC_NAME)
parameters["step_source"] = _get_hashed_source(source_fn)
for name, materializer in self.get_materializers().items():
key = f"{name}_materializer_source"
parameters[key] = _get_hashed_source(materializer)
else:
# Add a random string to the execution properties to disable caching
random_string = f"{random.getrandbits(128):032x}"
parameters["disable_cache"] = random_string
return {
INTERNAL_EXECUTION_PARAMETER_PREFIX + key: value
for key, value in parameters.items()
}
def _verify_arguments(self, *args: Any, **kwargs: Any) -> None:
"""Verifies the initialization args and kwargs of this step.
This method makes sure that there is only a config object passed at
initialization and that it was passed using the correct name and
type specified in the step declaration.
If the correct config object was found, additionally saves the
config parameters to `self.PARAM_SPEC`.
Args:
*args: The args passed to the init method of this step.
**kwargs: The kwargs passed to the init method of this step.
Raises:
StepInterfaceError: If there are too many arguments or arguments
with a wrong name/type.
"""
maximum_arg_count = 1 if self.CONFIG_CLASS else 0
arg_count = len(args) + len(kwargs)
if arg_count > maximum_arg_count:
raise StepInterfaceError(
f"Too many arguments ({arg_count}, expected: "
f"{maximum_arg_count}) passed when creating a "
f"'{self.step_name}' step."
)
if self.CONFIG_PARAMETER_NAME and self.CONFIG_CLASS:
if args:
config = args[0]
elif kwargs:
key, config = kwargs.popitem()
if key != self.CONFIG_PARAMETER_NAME:
raise StepInterfaceError(
f"Unknown keyword argument '{key}' when creating a "
f"'{self.step_name}' step, only expected a single "
f"argument with key '{self.CONFIG_PARAMETER_NAME}'."
)
else:
# This step requires configuration parameters but no config
# object was passed as an argument. The parameters might be
# set via default values in the config class or in a
# configuration file, so we continue for now and verify
# that all parameters are set before running the step
return
if not isinstance(config, self.CONFIG_CLASS):
raise StepInterfaceError(
f"`{config}` object passed when creating a "
f"'{self.step_name}' step is not a "
f"`{self.CONFIG_CLASS.__name__}` instance."
)
self.PARAM_SPEC = config.dict()
def _update_and_verify_parameter_spec(self) -> None:
"""Verifies and prepares the config parameters for running this step.
When the step requires config parameters, this method:
- checks if config parameters were set via a config object or file
- tries to set missing config parameters from default values of the
config class
Raises:
MissingStepParameterError: If no value could be found for one or
more config parameters.
StepInterfaceError: If a config parameter value couldn't be
serialized to json.
"""
if self.CONFIG_CLASS:
# we need to store a value for all config keys inside the
# metadata store to make sure caching works as expected
missing_keys = []
for name, field in self.CONFIG_CLASS.__fields__.items():
if name in self.PARAM_SPEC:
# a value for this parameter has been set already
continue
if field.required:
# this field has no default value set and therefore needs
# to be passed via an initialized config object
missing_keys.append(name)
else:
# use default value from the pydantic config class
self.PARAM_SPEC[name] = field.default
if missing_keys:
raise MissingStepParameterError(
self.step_name, missing_keys, self.CONFIG_CLASS
)
def _prepare_input_artifacts(
self, *artifacts: Channel, **kw_artifacts: Channel
) -> Dict[str, Channel]:
"""Verifies and prepares the input artifacts for running this step.
Args:
*artifacts: Positional input artifacts passed to
the __call__ method.
**kw_artifacts: Keyword input artifacts passed to
the __call__ method.
Returns:
Dictionary containing both the positional and keyword input
artifacts.
Raises:
StepInterfaceError: If there are too many or too few artifacts.
"""
input_artifact_keys = list(self.INPUT_SIGNATURE.keys())
if len(artifacts) > len(input_artifact_keys):
raise StepInterfaceError(
f"Too many input artifacts for step '{self.step_name}'. "
f"This step expects {len(input_artifact_keys)} artifact(s) "
f"but got {len(artifacts) + len(kw_artifacts)}."
)
combined_artifacts = {}
for i, artifact in enumerate(artifacts):
if not isinstance(artifact, Channel):
raise StepInterfaceError(
f"Wrong argument type (`{type(artifact)}`) for positional "
f"argument {i} of step '{self.step_name}'. Only outputs "
f"from previous steps can be used as arguments when "
f"connecting steps."
)
key = input_artifact_keys[i]
combined_artifacts[key] = artifact
for key, artifact in kw_artifacts.items():
if key in combined_artifacts:
# an artifact for this key was already set by
# the positional input artifacts
raise StepInterfaceError(
f"Unexpected keyword argument '{key}' for step "
f"'{self.step_name}'. An artifact for this key was "
f"already passed as a positional argument."
)
if not isinstance(artifact, Channel):
raise StepInterfaceError(
f"Wrong argument type (`{type(artifact)}`) for argument "
f"'{key}' of step '{self.step_name}'. Only outputs from "
f"previous steps can be used as arguments when "
f"connecting steps."
)
combined_artifacts[key] = artifact
# check if there are any missing or unexpected artifacts
expected_artifacts = set(self.INPUT_SIGNATURE.keys())
actual_artifacts = set(combined_artifacts.keys())
missing_artifacts = expected_artifacts - actual_artifacts
unexpected_artifacts = actual_artifacts - expected_artifacts
if missing_artifacts:
raise StepInterfaceError(
f"Missing input artifact(s) for step "
f"'{self.step_name}': {missing_artifacts}."
)
if unexpected_artifacts:
raise StepInterfaceError(
f"Unexpected input artifact(s) for step "
f"'{self.step_name}': {unexpected_artifacts}. This step "
f"only requires the following artifacts: {expected_artifacts}."
)
return combined_artifacts
def __call__(
self, *artifacts: Channel, **kw_artifacts: Channel
) -> Union[Channel, List[Channel]]:
"""Generates a component when called."""
# TODO [ENG-157]: replaces Channels with ZenML class (BaseArtifact?)
self._update_and_verify_parameter_spec()
# Make sure that the input/output artifact types exist in the signature
if not all(k in self.OUTPUT_SIGNATURE for k in self.OUTPUT_SPEC):
raise StepInterfaceError(
"Failed to create the step. The predefined artifact types "
"for the input does not match the input signature."
)
# Prepare the input artifacts and spec
input_artifacts = self._prepare_input_artifacts(
*artifacts, **kw_artifacts
)
self.INPUT_SPEC = {
arg_name: artifact_type.type # type:ignore[misc]
for arg_name, artifact_type in input_artifacts.items()
}
# make sure we have registered materializers for each output
materializers = self.get_materializers(ensure_complete=True)
# Prepare the output artifacts and spec
from zenml.artifacts.type_registery import type_registry
for key, value in self.OUTPUT_SIGNATURE.items():
verified_types = type_registry.get_artifact_type(value)
if key not in self.OUTPUT_SPEC:
self.OUTPUT_SPEC[key] = verified_types[0]
else:
if self.OUTPUT_SPEC[key] not in verified_types:
raise StepInterfaceError(
f"Type {key} can not be interpreted as a "
f"{self.OUTPUT_SPEC[key]}"
)
execution_parameters = {
**self.PARAM_SPEC,
**self._internal_execution_parameters,
}
# Convert execution parameter values to strings
try:
execution_parameters = {
k: json.dumps(v) for k, v in execution_parameters.items()
}
except TypeError as e:
raise StepInterfaceError(
f"Failed to serialize execution parameters for step "
f"'{self.step_name}'. Please make sure to only use "
f"json serializable parameter values."
) from e
source_fn = getattr(self, STEP_INNER_FUNC_NAME)
component_class = generate_component_class(
step_name=self.step_name,
step_module=self.__module__,
input_spec=self.INPUT_SPEC,
output_spec=self.OUTPUT_SPEC,
execution_parameter_names=set(execution_parameters),
step_function=source_fn,
materializers=materializers,
)
self._component = component_class(
**input_artifacts, **execution_parameters
)
# Resolve the returns in the right order.
returns = [self.component.outputs[key] for key in self.OUTPUT_SPEC]
# If its one return we just return the one channel not as a list
if len(returns) == 1:
return returns[0]
else:
return returns
@property
def component(self) -> _ZenMLSimpleComponent:
"""Returns a TFX component."""
if not self._component:
raise StepInterfaceError(
"Trying to access the step component "
"before creating it via calling the step."
)
return self._component
def with_return_materializers(
self: T,
materializers: Union[
Type[BaseMaterializer], Dict[str, Type[BaseMaterializer]]
],
) -> T:
"""Register materializers for step outputs.
If a single materializer is passed, it will be used for all step
outputs. Otherwise, the dictionary keys specify the output names
for which the materializers will be used.
Args:
materializers: The materializers for the outputs of this step.
Returns:
The object that this method was called on.
Raises:
StepInterfaceError: If a materializer is not a `BaseMaterializer`
subclass or a materializer for a non-existent output is given.
"""
def _is_materializer_class(value: Any) -> bool:
"""Checks whether the given object is a `BaseMaterializer`
subclass."""
is_class = isinstance(value, type)
return is_class and issubclass(value, BaseMaterializer)
if isinstance(materializers, dict):
allowed_output_names = set(self.OUTPUT_SIGNATURE)
for output_name, materializer in materializers.items():
if output_name not in allowed_output_names:
raise StepInterfaceError(
f"Got unexpected materializers for non-existent "
f"output '{output_name}' in step '{self.step_name}'. "
f"Only materializers for the outputs "
f"{allowed_output_names} of this step can"
f" be registered."
)
if not _is_materializer_class(materializer):
raise StepInterfaceError(
f"Got unexpected object `{materializer}` as "
f"materializer for output '{output_name}' of step "
f"'{self.step_name}'. Only `BaseMaterializer` "
f"subclasses are allowed."
)
self._explicit_materializers[output_name] = materializer
elif _is_materializer_class(materializers):
# Set the materializer for all outputs of this step
self._explicit_materializers = {
key: materializers for key in self.OUTPUT_SIGNATURE
}
else:
raise StepInterfaceError(
f"Got unexpected object `{materializers}` as output "
f"materializer for step '{self.step_name}'. Only "
f"`BaseMaterializer` subclasses or dictionaries mapping "
f"output names to `BaseMaterializer` subclasses are allowed "
f"as input when specifying return materializers."
)
return self
| 41.099842 | 94 | 0.601643 |
246b064170d49ac3529272d00f14f17e74083dbc | 3,818 | py | Python | faceNet/faceNet.py | EscVM/Virtual_Security_Assistant | e0be2eec2599aaf6d74a0201fe62394d3e0e8dc6 | [
"MIT"
] | 13 | 2019-12-23T00:16:54.000Z | 2022-01-27T07:03:02.000Z | faceNet/faceNet.py | EscVM/Virtual_Security_Assistant | e0be2eec2599aaf6d74a0201fe62394d3e0e8dc6 | [
"MIT"
] | null | null | null | faceNet/faceNet.py | EscVM/Virtual_Security_Assistant | e0be2eec2599aaf6d74a0201fe62394d3e0e8dc6 | [
"MIT"
] | 4 | 2020-12-19T05:28:29.000Z | 2022-01-30T05:36:26.000Z | #--------------------------
#Date: 19/12/2019
#Place: Turin, PIC4SeR
#Author: Fra, Vitto
#Project: faceAssistant
#---------------------------
##################################################################
# Networks adapted from https://github.com/davidsandberg/facenet #
##################################################################
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # avoid annoying logs
import tensorflow as tf
import time
import faceNet.detectFace as detectFace
import numpy as np
import cv2
class faceNet():
def __init__(self,model_bb='bin/bb/frozen_graph',model_emb='bin/emb/frozen_graph',conf_thr=0.7,
fps_bb=False, fps_emb=False,verbose=True):
self.graph_bb = tf.Graph()
self.graph_emb = tf.Graph()
self.import_graph(model_bb,model_emb,verbose)
self.sess_bb = tf.compat.v1.Session(graph=self.graph_bb)
self.sess_emb = tf.compat.v1.Session(graph=self.graph_emb)
self.conf_thr = conf_thr
self.fps_bb = fps_bb
self.fps_emb = fps_emb
def import_graph(self,model_bb,model_emb,verbose):
if verbose:
print("[faceNet] Importing bounding boxes graph.")
with self.graph_bb.as_default():
with tf.io.gfile.GFile(model_bb,'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
if verbose:
print("[faceNet] Importing embeddings graph.")
with self.graph_emb.as_default():
with tf.io.gfile.GFile(model_emb,'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
if verbose:
print("[faceNet] Done.\n")
def get_face_locations(self,frame,minsize=20,factor=0.709,
threshold = [ 0.6, 0.7, 0.7 ]):
if self.fps_bb:
start = time.time()
boxes, landmarks = detectFace.detect_face(frame, minsize,
threshold,factor,self.sess_bb)
if self.fps_bb:
delta = time.time()-start
print('[faceNet] Bounding boxes: time:',delta,'fsp:',1/delta)
return boxes, np.transpose(landmarks)
def get_embeddings(self,faces):
faces = np.array([detectFace.prewhiten(cv2.resize(face,(160,160))) for face in faces])
if self.fps_emb:
start = time.time()
emb = self.sess_emb.run('embeddings:0',
feed_dict={'input:0': faces,
'phase_train:0': False})
if self.fps_emb:
delta = time.time()-start
print('[faceNet] Embeddings: time:',delta,'fsp:',1/delta)
return emb
def get_faces(self,frame,boxes,margin=60):
faces = []
for (left, top, right, bottom, conf) in boxes:
left = np.maximum(left-margin/2,0)
top = np.maximum(top-margin/2,0)
right = np.minimum(right+margin/2,frame.shape[1])
bottom = np.minimum(bottom+margin/2,frame.shape[0])
top = int(top); right = int(right);
bottom = int(bottom); left = int(left)
face = frame[top:bottom,left:right]
faces.append(face)
return faces
def compare_faces(self,storage_emb,emb,distance_thr=0.6,verbose=False):
dist = np.sqrt(np.sum(np.square(np.subtract(storage_emb, emb)),axis=-1))
if verbose:
print("[faceNet] Distances:",dist)
return dist < distance_thr
| 37.067961 | 99 | 0.537454 |
0b921a0907113e5872238f4ed66061b3edb2b9c7 | 14,254 | py | Python | aleph/views/serializers.py | Rosencrantz/aleph | 47ac45fa72607e1ab16c7c30690013a7d00be116 | [
"MIT"
] | 2 | 2021-10-31T01:04:47.000Z | 2021-11-08T09:43:29.000Z | aleph/views/serializers.py | Rosencrantz/aleph | 47ac45fa72607e1ab16c7c30690013a7d00be116 | [
"MIT"
] | 319 | 2021-04-30T01:09:57.000Z | 2022-03-30T01:15:19.000Z | aleph/views/serializers.py | Rosencrantz/aleph | 47ac45fa72607e1ab16c7c30690013a7d00be116 | [
"MIT"
] | null | null | null | import logging
from pprint import pprint, pformat # noqa
from flask import request
from pantomime.types import PDF, CSV
from banal import ensure_list
from followthemoney import model
from followthemoney.types import registry
from followthemoney.helpers import entity_filename
from aleph.core import url_for
from aleph.logic import resolver
from aleph.logic.entities import check_write_entity, transliterate_values
from aleph.logic.util import collection_url, entity_url, archive_url
from aleph.model import Role, Collection, Document, Entity, Events
from aleph.model import Alert, EntitySet, EntitySetItem, Export
from aleph.views.util import jsonify, clean_object
log = logging.getLogger(__name__)
class Serializer(object):
def __init__(self, nested=False):
self.nested = nested
def collect(self, obj):
pass
def _serialize(self, obj):
return obj
def _serialize_common(self, obj):
id_ = obj.pop("id", None)
if id_ is not None:
obj["id"] = str(id_)
obj.pop("_index", None)
obj["writeable"] = False
obj["links"] = {}
obj = self._serialize(obj)
return clean_object(obj)
def queue(self, clazz, key, schema=None):
if not self.nested:
resolver.queue(request, clazz, key, schema=schema)
def resolve(self, clazz, key, serializer=None):
data = resolver.get(request, clazz, key)
if data is not None and serializer is not None:
serializer = serializer(nested=True)
data = serializer.serialize(data)
return data
def serialize(self, obj):
obj = self._to_dict(obj)
if obj is not None:
self.collect(obj)
resolver.resolve(request)
return self._serialize_common(obj)
def serialize_many(self, objs):
collected = []
for obj in ensure_list(objs):
obj = self._to_dict(obj)
if obj is not None:
self.collect(obj)
collected.append(obj)
resolver.resolve(request)
serialized = []
for obj in collected:
obj = self._serialize_common(obj)
if obj is not None:
serialized.append(obj)
return serialized
def _to_dict(self, obj):
if hasattr(obj, "to_dict"):
obj = obj.to_dict()
if hasattr(obj, "_asdict"):
obj = obj._asdict()
return obj
@classmethod
def jsonify(cls, obj, **kwargs):
data = cls().serialize(obj)
return jsonify(data, **kwargs)
@classmethod
def jsonify_result(cls, result, extra=None, **kwargs):
data = result.to_dict(serializer=cls)
if extra is not None:
data.update(extra)
return jsonify(data, **kwargs)
class RoleSerializer(Serializer):
def _serialize(self, obj):
obj["links"] = {"self": url_for("roles_api.view", id=obj.get("id"))}
obj["writeable"] = request.authz.can_write_role(obj.get("id"))
obj["shallow"] = obj.get("shallow", True)
if self.nested or not obj["writeable"]:
obj.pop("has_password", None)
obj.pop("is_admin", None)
obj.pop("is_muted", None)
obj.pop("is_tester", None)
obj.pop("is_blocked", None)
obj.pop("api_key", None)
obj.pop("email", None)
obj.pop("locale", None)
obj.pop("created_at", None)
obj.pop("updated_at", None)
if obj["type"] != Role.USER:
obj.pop("api_key", None)
obj.pop("email", None)
obj.pop("locale", None)
obj.pop("password", None)
return obj
class AlertSerializer(Serializer):
def _serialize(self, obj):
obj["links"] = {"self": url_for("alerts_api.view", alert_id=obj.get("id"))}
role_id = obj.pop("role_id", None)
obj["writeable"] = request.authz.can_write_role(role_id)
return obj
class CollectionSerializer(Serializer):
def collect(self, obj):
self.queue(Role, obj.get("creator_id"))
for role_id in ensure_list(obj.get("team_id")):
if request.authz.can_read_role(role_id):
self.queue(Role, role_id)
def _serialize(self, obj):
pk = obj.get("id")
authz = request.authz if obj.get("secret") else None
obj["links"] = {
"self": url_for("collections_api.view", collection_id=pk),
"xref_export": url_for("xref_api.export", collection_id=pk, _authz=authz),
"reconcile": url_for("reconcile_api.reconcile", collection_id=pk),
"ui": collection_url(pk),
}
obj["shallow"] = obj.get("shallow", True)
obj["writeable"] = request.authz.can(pk, request.authz.WRITE)
creator_id = obj.pop("creator_id", None)
obj["creator"] = self.resolve(Role, creator_id, RoleSerializer)
obj["team"] = []
for role_id in ensure_list(obj.pop("team_id", [])):
if request.authz.can_read_role(role_id):
role = self.resolve(Role, role_id, RoleSerializer)
obj["team"].append(role)
return obj
class PermissionSerializer(Serializer):
def collect(self, obj):
self.queue(Role, obj.get("role_id"))
def _serialize(self, obj):
obj.pop("collection_id", None)
role_id = obj.pop("role_id", None)
obj["writeable"] = request.authz.can_read_role(role_id) # wat
obj["role"] = self.resolve(Role, role_id, RoleSerializer)
return obj
class EntitySerializer(Serializer):
def collect(self, obj):
self.queue(Collection, obj.get("collection_id"))
self.queue(Role, obj.get("role_id"))
schema = model.get(obj.get("schema"))
if schema is None or self.nested:
return
properties = obj.get("properties", {})
for name, values in properties.items():
prop = schema.get(name)
if prop is None or prop.type != registry.entity:
continue
for value in ensure_list(values):
self.queue(Entity, value, schema=prop.range)
def _serialize(self, obj):
proxy = model.get_proxy(dict(obj))
properties = {}
for prop, value in proxy.itervalues():
properties.setdefault(prop.name, [])
if prop.type == registry.entity and not self.nested:
entity = self.resolve(Entity, value, EntitySerializer)
if entity is not None:
entity["shallow"] = True
value = entity
if value is not None:
properties[prop.name].append(value)
obj["properties"] = properties
links = {
"self": url_for("entities_api.view", entity_id=proxy.id),
"expand": url_for("entities_api.expand", entity_id=proxy.id),
"tags": url_for("entities_api.tags", entity_id=proxy.id),
"ui": entity_url(proxy.id),
}
if proxy.schema.is_a(Document.SCHEMA):
content_hash = proxy.first("contentHash", quiet=True)
if content_hash:
name = entity_filename(proxy)
mime = proxy.first("mimeType", quiet=True)
links["file"] = archive_url(
content_hash, file_name=name, mime_type=mime
)
pdf_hash = proxy.first("pdfHash", quiet=True)
if pdf_hash:
name = entity_filename(proxy, extension="pdf")
links["pdf"] = archive_url(pdf_hash, file_name=name, mime_type=PDF)
csv_hash = proxy.first("csvHash", quiet=True)
if csv_hash:
name = entity_filename(proxy, extension="csv")
links["csv"] = archive_url(csv_hash, file_name=name, mime_type=CSV)
collection = obj.get("collection") or {}
coll_id = obj.pop("collection_id", collection.get("id"))
# This is a last resort catcher for entities nested in other
# entities that get resolved without regard for authz.
if not request.authz.can(coll_id, request.authz.READ):
return None
obj["collection"] = self.resolve(Collection, coll_id, CollectionSerializer)
role_id = obj.pop("role_id", None)
obj["role"] = self.resolve(Role, role_id, RoleSerializer)
obj["links"] = links
obj["latinized"] = transliterate_values(proxy)
obj["writeable"] = check_write_entity(obj, request.authz)
obj["shallow"] = obj.get("shallow", True)
# Phasing out multi-values here (2021-01):
obj["created_at"] = min(ensure_list(obj.get("created_at")), default=None)
obj["updated_at"] = max(ensure_list(obj.get("updated_at")), default=None)
return obj
class XrefSerializer(Serializer):
def collect(self, obj):
matchable = tuple([s.matchable for s in model])
self.queue(Entity, obj.get("entity_id"), matchable)
self.queue(Entity, obj.get("match_id"), matchable)
self.queue(Collection, obj.get("collection_id"))
self.queue(Collection, obj.pop("match_collection_id"))
def _serialize(self, obj):
entity_id = obj.pop("entity_id")
obj["entity"] = self.resolve(Entity, entity_id, EntitySerializer)
match_id = obj.pop("match_id")
obj["match"] = self.resolve(Entity, match_id, EntitySerializer)
collection_id = obj.get("collection_id")
obj["writeable"] = request.authz.can(collection_id, request.authz.WRITE)
if obj["entity"] and obj["match"]:
return obj
class SimilarSerializer(Serializer):
def collect(self, obj):
EntitySerializer().collect(obj.get("entity", {}))
def _serialize(self, obj):
entity = obj.get("entity", {})
obj["entity"] = EntitySerializer().serialize(entity)
collection_id = obj.pop("collection_id")
obj["writeable"] = request.authz.can(collection_id, request.authz.WRITE)
return obj
class ExportSerializer(Serializer):
def _serialize(self, obj):
if obj.get("content_hash") and not obj.get("deleted"):
url = archive_url(
obj.get("content_hash"),
file_name=obj.get("file_name"),
mime_type=obj.get("mime_type"),
)
obj["links"] = {"download": url}
return obj
class EntitySetSerializer(Serializer):
def collect(self, obj):
self.queue(Collection, obj.get("collection_id"))
self.queue(Role, obj.get("role_id"))
def _serialize(self, obj):
collection_id = obj.pop("collection_id", None)
obj["shallow"] = obj.get("shallow", True)
obj["writeable"] = request.authz.can(collection_id, request.authz.WRITE)
obj["collection"] = self.resolve(
Collection, collection_id, CollectionSerializer
)
role_id = obj.get("role_id", None)
obj["role"] = self.resolve(Role, role_id, RoleSerializer)
return obj
class EntitySetItemSerializer(Serializer):
def collect(self, obj):
self.queue(Collection, obj.get("collection_id"))
self.queue(Entity, obj.get("entity_id"))
def _serialize(self, obj):
coll_id = obj.pop("collection_id", None)
# Should never come into effect:
if not request.authz.can(coll_id, request.authz.READ):
return None
entity_id = obj.pop("entity_id", None)
obj["entity"] = self.resolve(Entity, entity_id, EntitySerializer)
obj["collection"] = self.resolve(Collection, coll_id, CollectionSerializer)
esi_coll_id = obj.get("entityset_collection_id")
obj["writeable"] = request.authz.can(esi_coll_id, request.authz.WRITE)
return obj
class ProfileSerializer(Serializer):
def collect(self, obj):
self.queue(Collection, obj.get("collection_id"))
def _serialize(self, obj):
collection_id = obj.pop("collection_id", None)
obj["writeable"] = request.authz.can(collection_id, request.authz.WRITE)
obj["shallow"] = obj.get("shallow", True)
obj["collection"] = self.resolve(
Collection, collection_id, CollectionSerializer
)
proxy = obj.pop("merged")
data = proxy.to_dict()
data["latinized"] = transliterate_values(proxy)
obj["merged"] = data
items = obj.pop("items", [])
entities = [i.get("entity") for i in items]
obj["entities"] = [e.get("id") for e in entities if e is not None]
obj.pop("proxies", None)
return obj
class NotificationSerializer(Serializer):
SERIALIZERS = {
Alert: AlertSerializer,
Entity: EntitySerializer,
Collection: CollectionSerializer,
EntitySet: EntitySetSerializer,
EntitySetItem: EntitySetItemSerializer,
Role: RoleSerializer,
Export: ExportSerializer,
}
def collect(self, obj):
self.queue(Role, obj.get("actor_id"))
event = Events.get(obj.get("event"))
if event is not None:
for name, clazz in event.params.items():
key = obj.get("params", {}).get(name)
self.queue(clazz, key, Entity.THING)
def _serialize(self, obj):
event = Events.get(obj.get("event"))
if event is None:
return None
params = {"actor": self.resolve(Role, obj.get("actor_id"), RoleSerializer)}
for name, clazz in event.params.items():
key = obj.get("params", {}).get(name)
serializer = self.SERIALIZERS.get(clazz)
params[name] = self.resolve(clazz, key, serializer)
obj["params"] = params
obj["event"] = event.to_dict()
return obj
class MappingSerializer(Serializer):
def collect(self, obj):
self.queue(EntitySet, obj.get("entityset_id"))
self.queue(Entity, obj.get("table_id"))
def _serialize(self, obj):
obj["links"] = {}
entityset_id = obj.pop("entityset_id", None)
obj["entityset"] = self.resolve(EntitySet, entityset_id, EntitySetSerializer)
obj["table"] = self.resolve(Entity, obj.get("table_id", None), EntitySerializer)
return obj
| 37.21671 | 88 | 0.605374 |
1e5ba2c5098841a772e25dfbd0d351c8f914b4c4 | 1,005 | py | Python | data_augmentation_scripts/remove_apply_exif.py | team8/outdoor-blind-navigation | 78b99fe1a193161c4b809990c71940fbed719785 | [
"MIT"
] | 6 | 2021-05-18T16:46:04.000Z | 2022-01-08T02:49:27.000Z | data_augmentation_scripts/remove_apply_exif.py | aoberai/outdoor-blind-navigation | 78b99fe1a193161c4b809990c71940fbed719785 | [
"MIT"
] | 15 | 2021-05-19T23:40:50.000Z | 2021-07-06T08:24:40.000Z | data_augmentation_scripts/remove_apply_exif.py | aoberai/outdoor-blind-navigation | 78b99fe1a193161c4b809990c71940fbed719785 | [
"MIT"
] | 4 | 2021-05-26T02:10:43.000Z | 2021-09-04T17:59:51.000Z | import os
import PIL
import PIL.Image
import PIL.ExifTags
import cv2
import numpy as np
folders = ["Left of Sidewalk", "Right of Sidewalk", "Middle of Sidewalk"]
def rotate(img_name):
img = PIL.Image.open(img_name)
if img is None:
return
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in PIL.ExifTags.TAGS
}
img = img.convert('RGB')
img = np.array(img)
if exif['Orientation'] == 3:
img = cv2.rotate(img, cv2.ROTATE_180)
if exif['Orientation'] == 6:
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
if exif['Orientation'] == 8:
img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(img_name, img)
for folder in folders:
for file in os.listdir(folder):
if file.endswith(".JPG") or file.endswith(".jpg"):
print(os.path.join(folder, file))
rotate(os.path.join(folder, file))
| 26.447368 | 73 | 0.614925 |
384204c18ea4b11a5ed221a7130e8330330d9683 | 1,872 | py | Python | ecom/models.py | mohammedrafathali/maxfashions | 3afe0c4a4530d839a347434cffc7509e422e197a | [
"MIT"
] | null | null | null | ecom/models.py | mohammedrafathali/maxfashions | 3afe0c4a4530d839a347434cffc7509e422e197a | [
"MIT"
] | null | null | null | ecom/models.py | mohammedrafathali/maxfashions | 3afe0c4a4530d839a347434cffc7509e422e197a | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_pic = models.ImageField(
upload_to='profile_pic/CustomerProfilePic/', null=True, blank=True)
address = models.CharField(max_length=40)
mobile = models.CharField(max_length=20, null=False)
@property
def get_name(self):
return self.user.first_name+" "+self.user.last_name
@property
def get_id(self):
return self.user.id
def __str__(self):
return self.user.first_name
class Product(models.Model):
name = models.CharField(max_length=40)
product_image = models.ImageField(
upload_to='product_image/', null=True, blank=True)
price = models.PositiveIntegerField()
description = models.CharField(max_length=40)
def __str__(self):
return self.name
class Orders(models.Model):
STATUS = (
('Pending', 'Pending'),
('Order Confirmed', 'Order Confirmed'),
('Out for Delivery', 'Out for Delivery'),
('Delivered', 'Delivered'),
)
customer = models.ForeignKey(
'Customer', on_delete=models.CASCADE, null=True)
product = models.ForeignKey('Product', on_delete=models.CASCADE, null=True)
email = models.CharField(max_length=50, null=True)
address = models.CharField(max_length=500, null=True)
mobile = models.CharField(max_length=20, null=True)
order_date = models.DateField(auto_now_add=True, null=True)
status = models.CharField(max_length=50, null=True, choices=STATUS)
class Feedback(models.Model):
name = models.CharField(max_length=40)
feedback = models.CharField(max_length=500)
date = models.DateField(auto_now_add=True, null=True)
def __str__(self):
return self.name
| 31.2 | 79 | 0.692842 |
6ae15d9fbedc619a66d6609a9a3bdf80270996b9 | 490 | py | Python | smnsr/__init__.py | rciszek/SMNSR | b151d78e93b16bca1372eb36798f5921920ef1ce | [
"MIT"
] | null | null | null | smnsr/__init__.py | rciszek/SMNSR | b151d78e93b16bca1372eb36798f5921920ef1ce | [
"MIT"
] | null | null | null | smnsr/__init__.py | rciszek/SMNSR | b151d78e93b16bca1372eb36798f5921920ef1ce | [
"MIT"
] | null | null | null | import logging.config
import yaml
import os
path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + "logging.yaml"
env_key = "LOG_CFG"
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, "rt") as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
print("logging.yaml does not exist")
logging.basicConfig(level=logging.DEBUG)
from .cross_validate import perform_cv, parse_args
| 23.333333 | 81 | 0.712245 |
456d77d4080f0accbb78911ae4ce72c7078e8400 | 674 | py | Python | test_project/hotels/migrations/0004_auto_20171228_0314.py | squallcs12/django-modelduplication | 85aeb9768287cc9b0b768a4710c93dc8c6be4e28 | [
"MIT"
] | null | null | null | test_project/hotels/migrations/0004_auto_20171228_0314.py | squallcs12/django-modelduplication | 85aeb9768287cc9b0b768a4710c93dc8c6be4e28 | [
"MIT"
] | 3 | 2020-02-11T21:29:29.000Z | 2021-06-10T17:28:10.000Z | test_project/hotels/migrations/0004_auto_20171228_0314.py | squallcs12/django-modelduplication | 85aeb9768287cc9b0b768a4710c93dc8c6be4e28 | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2017-12-28 03:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hotels', '0003_booking_roomitem'),
]
operations = [
migrations.CreateModel(
name='Feature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='hotel',
name='features',
field=models.ManyToManyField(to='hotels.Feature'),
),
]
| 25.923077 | 114 | 0.565282 |
77bd4b2d5fc7a9c09844a604b925aa9c9cff9497 | 18,215 | py | Python | src/zeit/content/volume/volume.py | ZeitOnline/zeit.content.volume | f5aec3e0720be5ecfbee79f4b9ce785c11ff714d | [
"BSD-3-Clause"
] | 1 | 2019-05-16T19:17:45.000Z | 2019-05-16T19:17:45.000Z | src/zeit/content/volume/volume.py | ZeitOnline/zeit.content.volume | f5aec3e0720be5ecfbee79f4b9ce785c11ff714d | [
"BSD-3-Clause"
] | 12 | 2016-08-18T08:03:03.000Z | 2019-02-27T13:57:19.000Z | src/zeit/content/volume/volume.py | ZeitOnline/zeit.content.volume | f5aec3e0720be5ecfbee79f4b9ce785c11ff714d | [
"BSD-3-Clause"
] | null | null | null | import datetime
import itertools
import logging
import requests
from zeit.cms.i18n import MessageFactory as _
import grokcore.component as grok
import lxml.objectify
import zeit.cms.content.dav
import zeit.cms.content.xmlsupport
import zeit.cms.interfaces
import zeit.cms.type
import zeit.content.cp.interfaces
import zeit.content.volume.interfaces
import zeit.content.portraitbox.interfaces
import zeit.content.infobox.interfaces
import zeit.edit.interfaces
import zeit.retresco.interfaces
import zeit.retresco.search
import zeit.workflow.dependency
import zope.interface
import zope.lifecycleevent
import zope.schema
log = logging.getLogger()
UNIQUEID_PREFIX = zeit.cms.interfaces.ID_NAMESPACE[:-1]
class Volume(zeit.cms.content.xmlsupport.XMLContentBase):
zope.interface.implements(
zeit.content.volume.interfaces.IVolume,
zeit.cms.interfaces.IAsset)
default_template = u"""\
<volume xmlns:py="http://codespeak.net/lxml/objectify/pytype">
<head/>
<body/>
<covers/>
</volume>
"""
zeit.cms.content.dav.mapProperties(
zeit.content.volume.interfaces.IVolume,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('date_digital_published', 'year', 'volume'))
_product_id = zeit.cms.content.dav.DAVProperty(
zope.schema.TextLine(),
zeit.workflow.interfaces.WORKFLOW_NS,
'product-id')
assets_to_publish = [zeit.content.portraitbox.interfaces.IPortraitbox,
zeit.content.infobox.interfaces.IInfobox
]
@property
def product(self):
source = zeit.content.volume.interfaces.IVolume['product'].source(self)
for value in source:
if value.id == self._product_id:
return value
@product.setter
def product(self, value):
if self._product_id == value.id:
return
self._product_id = value.id if value is not None else None
_teaserText = zeit.cms.content.dav.DAVProperty(
zeit.content.volume.interfaces.IVolume['teaserText'],
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS, 'teaserText')
@property
def teaserText(self):
text = self._teaserText
if text is None:
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.volume')
text = config['default-teaser-text'].decode('utf-8')
return self.fill_template(text)
@teaserText.setter
def teaserText(self, value):
self._teaserText = value
@property
def teaserSupertitle(self): # For display in CP-editor
return self.fill_template('Ausgabe {name}/{year}')
def fill_template(self, text):
return self._fill_template(self, text)
@staticmethod
def _fill_template(context, text):
return text.format(
year=context.year,
name=str(context.volume).rjust(2, '0'))
@property
def _all_products(self):
return [self.product] + self.product.dependent_products
@property
def previous(self):
return self._find_in_order(None, self.date_digital_published, 'desc')
@property
def next(self):
return self._find_in_order(self.date_digital_published, None, 'asc')
def _find_in_order(self, start, end, sort):
if len(filter(None, [start, end])) != 1:
return None
# Since `sort` is passed in accordingly, and we exclude ourselves,
# the first result (if any) is always the one we want.
query = {'query': {'bool': {'filter': [
{'term': {'doc_type': VolumeType.type}},
{'term': {'payload.workflow.product-id': self.product.id}},
{'range': {'payload.document.date_digital_published':
zeit.retresco.search.date_range(start, end)}},
], 'must_not': [
{'term': {'url': self.uniqueId.replace(UNIQUEID_PREFIX, '')}}
]}}}
return Volume._find_via_elastic(
query, 'payload.document.date_digital_published:' + sort)
@staticmethod
def published_days_ago(days_ago):
query = {'query': {'bool': {'filter': [
{'term': {'doc_type': VolumeType.type}},
{'term': {'payload.workflow.published': True}},
{'range': {'payload.document.date_digital_published': {
'gte': 'now-%dd/d' % (days_ago + 1),
'lt': 'now-%dd/d' % days_ago,
}}}
]}}}
return Volume._find_via_elastic(
query, 'payload.workflow.date_last_published:desc')
@staticmethod
def _find_via_elastic(query, sort_order):
es = zope.component.getUtility(zeit.retresco.interfaces.IElasticsearch)
result = es.search(query, sort_order, rows=1)
if not result:
return None
return zeit.cms.interfaces.ICMSContent(
UNIQUEID_PREFIX + iter(result).next()['url'], None)
def get_cover(self, cover_id, product_id=None, use_fallback=True):
if product_id is None and use_fallback:
product_id = self.product.id
if product_id and product_id not in \
[prod.id for prod in self._all_products]:
log.warning('%s is not a valid product id for %s' % (
product_id, self))
return None
path = '//covers/cover[@id="{}" and @product_id="{}"]' \
.format(cover_id, product_id)
node = self.xml.xpath(path)
uniqueId = node[0].get('href') if node else None
if uniqueId:
return zeit.cms.interfaces.ICMSContent(uniqueId, None)
if use_fallback:
# Fall back to the main product (which must be self.product,
# since we respond only to ids out of self._all_products)
# Recursive call of this function with the main product ID
return self.get_cover(
cover_id, self.product.id, use_fallback=False)
def set_cover(self, cover_id, product_id, imagegroup):
if not self._is_valid_cover_id_and_product_id(cover_id, product_id):
raise ValueError("Cover id {} or product id {} are not "
"valid.".format(cover_id, product_id))
path = '//covers/cover[@id="{}" and @product_id="{}"]' \
.format(cover_id, product_id)
node = self.xml.xpath(path)
if node:
self.xml.covers.remove(node[0])
if imagegroup is not None:
node = lxml.objectify.E.cover(id=cover_id,
product_id=product_id,
href=imagegroup.uniqueId)
lxml.objectify.deannotate(node[0], cleanup_namespaces=True)
self.xml.covers.append(node)
super(Volume, self).__setattr__('_p_changed', True)
def _is_valid_cover_id_and_product_id(self, cover_id, product_id):
cover_ids = list(zeit.content.volume.interfaces.VOLUME_COVER_SOURCE(
self))
product_ids = [prod.id for prod in self._all_products]
return cover_id in cover_ids and product_id in product_ids
def all_content_via_search(self, additional_query_constraints=None):
"""
Get all content for this volume via ES.
If u pass a list of additional query clauses, they will be added as
an AND-operand to the query.
"""
if not additional_query_constraints:
additional_query_constraints = []
elastic = zope.component.getUtility(zeit.find.interfaces.ICMSSearch)
query = [
{'term': {'payload.document.year': self.year}},
{'term': {'payload.document.volume': self.volume}},
{'bool': {'should': [
{'term': {'payload.workflow.product-id': x.id}}
for x in self._all_products]}},
]
result = elastic.search({'query': {'bool': {
'filter': query + additional_query_constraints,
'must_not': [
{'term': {'url': self.uniqueId.replace(UNIQUEID_PREFIX, '')}}
]}}}, rows=1000)
# We assume a maximum content amount per usual production print volume
assert result.hits < 250
content = []
for item in result:
item = zeit.cms.interfaces.ICMSContent(
UNIQUEID_PREFIX + item['url'], None)
if item is not None:
content.append(item)
return content
def change_contents_access(
self, access_from, access_to, published=True,
exclude_performing_articles=True, dry_run=False):
constraints = [{'term': {'payload.document.access': access_from}}]
if exclude_performing_articles:
try:
to_filter = _find_performing_articles_via_webtrekk(self)
except Exception:
log.error("Error while retrieving data from webtrekk api",
exc_info=True)
return []
log.info("Not changing access for %s " % to_filter)
filter_constraint = {
'bool': {'must_not': {'terms': {'url': to_filter}}}}
constraints.append(filter_constraint)
if published:
constraints.append({'term': {'payload.workflow.published': True}})
cnts = self.all_content_via_search(constraints)
if dry_run:
return cnts
for cnt in cnts:
try:
with zeit.cms.checkout.helper.checked_out(cnt) as co:
co.access = unicode(access_to)
zope.lifecycleevent.modified(
co, zope.lifecycleevent.Attributes(
zeit.cms.content.interfaces.ICommonMetadata,
'access')
)
except Exception:
log.error("Couldn't change access for {}. Skipping "
"it.".format(cnt.uniqueId))
return cnts
def content_with_references_for_publishing(self):
additional_constraints = [
{'term': {
'doc_type': zeit.content.article.article.ArticleType.type}},
{'term': {'payload.workflow.published': False}},
{'term': {'payload.workflow.urgent': True}},
]
articles_to_publish = self.all_content_via_search(
additional_query_constraints=additional_constraints)
# Flatten the list of lists and remove duplicates
articles_with_references = list(set(itertools.chain.from_iterable(
[self._with_references(article) for article in
articles_to_publish])))
articles_with_references.append(self)
return articles_with_references
def _with_references(self, article):
"""
:param content: CMSContent
:return: [referenced_content1, ..., content]
"""
# XXX Using zeit.cms.relation.IReferences would make sense here as
# well but due to some license issues with images referenced by
# articles we have to be careful what we want to publish
with_dependencies = [
content for content in zeit.edit.interfaces.IElementReferences(
article, []) if self._needs_publishing(content)
]
with_dependencies.append(article)
return with_dependencies
def _needs_publishing(self, content):
# Dont publish content which is already published
if zeit.cms.workflow.interfaces.IPublishInfo(content).published:
return False
# content has to provide one of interfaces defined above
return any([interface.providedBy(content) for interface
in self.assets_to_publish])
class VolumeType(zeit.cms.type.XMLContentTypeDeclaration):
factory = Volume
interface = zeit.content.volume.interfaces.IVolume
title = _('Volume')
type = 'volume'
class VolumeMetadata(grok.Adapter):
"""Since ICenterPage inherits from ICommonMetadata, we need to ensure
that adapting a volume to ICommonMetadata returns fields from the volume,
and not the CP.
"""
grok.context(zeit.content.volume.interfaces.IVolume)
grok.implements(zeit.cms.content.interfaces.ICommonMetadata)
missing = object()
def __getattr__(self, name):
value = getattr(self.context, name, self.missing)
if value is self.missing:
field = zeit.cms.content.interfaces.ICommonMetadata.get(name, None)
return field.default
return value
@grok.adapter(zeit.content.volume.interfaces.IVolume)
@grok.implementer(zeit.cms.workflow.interfaces.IPublishPriority)
def publish_priority_volume(context):
# XXX Kludgy. The JS-based "do-publish-all" uses the context's priority to
# retrieve the task queue where it looks up the job id, and
# publish_multiple runs with PRIORITY_LOW (which makes sense). To connect
# these two, we set IVolume to low, even though that's not really
# warrantend, semantically speaking.
return zeit.cms.workflow.interfaces.PRIORITY_LOW
class CoverDependency(zeit.workflow.dependency.DependencyBase):
"""
If a Volume is published, its covers are published as well.
"""
grok.context(zeit.content.volume.interfaces.IVolume)
grok.name('zeit.content.volume.cover')
retract_dependencies = True
def get_dependencies(self):
cover_names = zeit.content.volume.interfaces.VOLUME_COVER_SOURCE(
self.context)
covers = []
for product in self.context._all_products:
for cover_name in cover_names:
cover = self.context.get_cover(cover_name,
product_id=product.id,
use_fallback=False)
if cover:
covers.append(cover)
return covers
@grok.adapter(zeit.cms.content.interfaces.ICommonMetadata)
@grok.implementer(zeit.content.volume.interfaces.IVolume)
def retrieve_volume_using_info_from_metadata(context):
if (context.year is None or context.volume is None or
context.product is None):
return None
unique_id = None
if context.product.volume and context.product.location:
unique_id = Volume._fill_template(context, context.product.location)
else:
main_product = zeit.content.volume.interfaces.PRODUCT_SOURCE(
context).find(context.product.relates_to)
if main_product and main_product.volume and main_product.location:
unique_id = Volume._fill_template(context, main_product.location)
return zeit.cms.interfaces.ICMSContent(unique_id, None)
@grok.adapter(zeit.content.volume.interfaces.IVolume)
@grok.implementer(zeit.content.cp.interfaces.ICenterPage)
def retrieve_corresponding_centerpage(context):
if context.product is None:
return None
unique_id = None
if context.product.location:
unique_id = context.fill_template(context.product.centerpage)
else:
main_product = zeit.content.volume.interfaces.PRODUCT_SOURCE(
context).find(context.product.relates_to)
if main_product and main_product.centerpage:
unique_id = context.fill_template(main_product.centerpage)
cp = zeit.cms.interfaces.ICMSContent(unique_id, None)
if not zeit.content.cp.interfaces.ICenterPage.providedBy(cp):
return None
return cp
def _find_performing_articles_via_webtrekk(volume):
"""
Check webtrekk-API for performing articles. Since the webtrekk api,
this should only be used when performance is no criteria.
"""
api_date_format = '%Y-%m-%d %H:%M:%S'
cr_metric_name = u'CR Bestellungen Abo (Artikelbasis)'
order_metric_name = u'Anzahl Bestellungen \u2013\xa0Zplus (Seitenbasis)'
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.volume')
info = zeit.cms.workflow.interfaces.IPublishInfo(volume)
start = info.date_first_released
stop = start + datetime.timedelta(weeks=3)
# XXX Unfortunately the webtrekk api doesn't allow filtering for custom
# metrics, so we got filter our results here
body = {'version': '1.1',
'method': 'getAnalysisData',
'params': {
'login': config['access-control-webtrekk-username'],
'pass': config['access-control-webtrekk-password'],
'customerId': config['access-control-webtrekk-customerid'],
'language': 'de',
'analysisConfig': {
"analysisFilter": {'filterRules': [
# Only paid articles
{'objectTitle': 'Wall - Status', 'comparator': '=',
'filter': 'paid', 'scope': 'page'},
]},
'metrics': [
{'sortOrder': 'desc', 'title': order_metric_name},
{'sortOrder': 'desc', 'title': cr_metric_name}
],
'analysisObjects': [{'title': 'Seiten'}],
'startTime':
start.strftime(api_date_format),
'stopTime':
stop.strftime(api_date_format),
'rowLimit': 1000,
"hideFooters": 1}}}
access_control_config = (
zeit.content.volume.interfaces.ACCESS_CONTROL_CONFIG)
resp = requests.post(config['access-control-webtrekk-url'],
timeout=int(
config['access-control-webtrekk-timeout']),
json=body)
result = resp.json()
if result.get('error'):
raise Exception('Webtrekk API reported an error %s' %
result.get('error'))
data = result['result']['analysisData']
urls = set()
for page, order, cr in data:
url = page.split('zeit.de/')[1]
if (volume.fill_template('{year}/{name}') in url) and \
(float(cr) >= access_control_config.min_cr or
int(order) >= access_control_config.min_orders):
urls.add('/' + url)
return list(urls)
| 39.857768 | 79 | 0.618062 |
3a80cc52a1f3c6aa313bb6f670687b7a0c1c4108 | 812 | py | Python | packages/machine-p2020rdb-pca/example/p2020-util.py | macdaliot/echronos | 00a4e7d90143d58d47b548e10f0a1f3b2976db8d | [
"MIT"
] | null | null | null | packages/machine-p2020rdb-pca/example/p2020-util.py | macdaliot/echronos | 00a4e7d90143d58d47b548e10f0a1f3b2976db8d | [
"MIT"
] | null | null | null | packages/machine-p2020rdb-pca/example/p2020-util.py | macdaliot/echronos | 00a4e7d90143d58d47b548e10f0a1f3b2976db8d | [
"MIT"
] | null | null | null | #
# eChronos Real-Time Operating System
# Copyright (c) 2017, Commonwealth Scientific and Industrial Research
# Organisation (CSIRO) ABN 41 687 119 230.
#
# All rights reserved. CSIRO is willing to grant you a licence to the eChronos
# real-time operating system under the terms of the CSIRO_BSD_MIT license. See
# the file "LICENSE_CSIRO_BSD_MIT.txt" for details.
#
# @TAG(CSIRO_BSD_MIT)
#
from prj import Module
class P2020UtilModule(Module):
# U-Boot on the P2020RDB-PCA inits the CCSRBAR to 0xffe00000
xml_schema = """
<schema>
<entry name="ccsrbar" type="int" default="0xffe00000" />
</schema>"""
files = [
{'input': 'p2020-util.h', 'render': True},
{'input': 'p2020-util.c', 'render': True, 'type': 'c'},
]
module = P2020UtilModule() # pylint: disable=invalid-name
| 28 | 78 | 0.692118 |
7f391b82fd2afdec5888673437e5be45b2edea83 | 3,540 | py | Python | fandogh_cli/volume_commands.py | MXareie/fandogh-cli | 89b5a8b276d19ec1b32f74c6b32ba1ec6bcde4e0 | [
"MIT"
] | 2 | 2020-05-03T10:31:12.000Z | 2021-07-27T09:38:47.000Z | fandogh_cli/volume_commands.py | MXareie/fandogh-cli | 89b5a8b276d19ec1b32f74c6b32ba1ec6bcde4e0 | [
"MIT"
] | null | null | null | fandogh_cli/volume_commands.py | MXareie/fandogh-cli | 89b5a8b276d19ec1b32f74c6b32ba1ec6bcde4e0 | [
"MIT"
] | null | null | null | import click
from fandogh_cli.utils import format_text, TextStyle
from .presenter import present
from .base_commands import FandoghCommand
from .fandogh_client import create_volume_claim, delete_volume_claim, list_volumes
'''
This class is for volume commands
all cli commands related to volume CRUD and etc
are written here
method list:
- volume: parent
- create_volume
- delete volume
- volume_list
'''
'''
Volume parent command
This command should be called before any sub commands
'''
@click.group('volume')
def volume():
"""Volume management commands"""
'''
Fandogh user calls this cli command
in order to create a new volume.
it will show the resulting value in table format
command name:
- add
options:
. --name or -n : this option is required and will be used as the volume name
. --capacity or -c: this option is required and will be used as the volume size
. --detach or -d: using this command user will imply that whether the request
should be executed in background or not. default value is False.
'''
@click.command('add', help='Add new volume', cls=FandoghCommand)
@click.option('--name', '-n', help='Name of the volume', prompt='Volume Name')
@click.option('--capacity', '-c', help='Volume capacity', prompt='Storage Capacity')
@click.option('--detach', '-d', help='Execute request in background', default=False, is_flag=True)
def create_volume(name, capacity, detach):
click.echo('Creating volume may take some times, please wait...')
if detach:
create_volume_claim(name, capacity)
else:
data = create_volume_claim(name, capacity)
click.echo('volume \'{}\' was built successfully and is ready to attach'.format(data.get('name')))
table = present(lambda: [data],
renderer='table',
headers=['Name', 'Status', 'Mounted To', 'Volume', 'Capacity', 'Creation Date'],
columns=['name', 'status', 'mounted_to', 'volume', 'capacity', 'age'])
click.echo(table)
'''
Fandogh user calls this cli command
in order to delete an existing volume.
command name:
- delete
options:
. --name or -n: this option is required and will be used as volume name
'''
@click.command('delete', help='Delete specific volume', cls=FandoghCommand)
@click.option('--name', '-n', help='Name of the volume', prompt='Volume Name')
def delete_volume(name):
if click.confirm(format_text('If you proceed all your data will be deleted, do you want to continue?',
TextStyle.WARNING)):
click.echo('Volume delete may take some times, please wait...')
click.echo(delete_volume_claim(name))
'''
Fandogh user calls this cli command
in order to get the list of volumes available
in her/his namespace
command:
- volume_list
options:
None required
'''
@click.command('list', help='Volume list', cls=FandoghCommand)
def volume_list():
table = present(lambda: list_volumes(),
renderer='table',
headers=['Name', 'Status', 'Mounted To', 'Volume', 'Capacity', 'Creation Date'],
columns=['name', 'status', 'mounted_to', 'volume', 'capacity', 'age'])
if table:
click.echo(table)
else:
click.echo('You have no volumes in your namespace!')
volume.add_command(create_volume)
volume.add_command(delete_volume)
volume.add_command(volume_list)
| 27.874016 | 106 | 0.651977 |
f1d5b79160b584c1438915b8792954710dd763f6 | 9,057 | py | Python | old/Python/simulate/simulate_original.py | conelul/Computer-Science-2021 | d5ad20643a293bb7e0e24259bb2ef43192ccb002 | [
"MIT"
] | null | null | null | old/Python/simulate/simulate_original.py | conelul/Computer-Science-2021 | d5ad20643a293bb7e0e24259bb2ef43192ccb002 | [
"MIT"
] | null | null | null | old/Python/simulate/simulate_original.py | conelul/Computer-Science-2021 | d5ad20643a293bb7e0e24259bb2ef43192ccb002 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Simulate (a Simon clone)
# By Al Sweigart al@inventwithpython.com
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random, sys, time, os, pygame
from pygame.locals import *
FPS = 60
WINDOWWIDTH = 1080
WINDOWHEIGHT = 720
FLASHSPEED = 500 # in milliseconds
FLASHDELAY = 200 # in milliseconds
BUTTONSIZE = 300
BUTTONGAPSIZE = 20
TIMEOUT = 4 # seconds before game over if no button is pushed.
CURRENT_DIR = os.path.dirname(__file__) # Working dir, NOT IN ORIGINAL FILE
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
BRIGHTRED = (255, 0, 0)
RED = (155, 0, 0)
BRIGHTGREEN = ( 0, 255, 0)
GREEN = ( 0, 155, 0)
BRIGHTBLUE = ( 0, 0, 255)
BLUE = ( 0, 0, 155)
BRIGHTYELLOW = (255, 255, 0)
YELLOW = (155, 155, 0)
DARKGRAY = ( 40, 40, 40)
bgColor = BLACK
XMARGIN = int((WINDOWWIDTH - (2 * BUTTONSIZE) - BUTTONGAPSIZE) / 2)
YMARGIN = int((WINDOWHEIGHT - (2 * BUTTONSIZE) - BUTTONGAPSIZE) / 2)
# Rect objects for each of the four buttons
YELLOWRECT = pygame.Rect(XMARGIN, YMARGIN, BUTTONSIZE, BUTTONSIZE)
BLUERECT = pygame.Rect(XMARGIN + BUTTONSIZE + BUTTONGAPSIZE, YMARGIN, BUTTONSIZE, BUTTONSIZE)
REDRECT = pygame.Rect(XMARGIN, YMARGIN + BUTTONSIZE + BUTTONGAPSIZE, BUTTONSIZE, BUTTONSIZE)
GREENRECT = pygame.Rect(XMARGIN + BUTTONSIZE + BUTTONGAPSIZE, YMARGIN + BUTTONSIZE + BUTTONGAPSIZE, BUTTONSIZE, BUTTONSIZE)
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BEEP1, BEEP2, BEEP3, BEEP4
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Simulate')
BASICFONT = pygame.font.Font('freesansbold.ttf', 16)
infoSurf = BASICFONT.render('Match the pattern by clicking on the button or using the Q, W, A, S keys.', 1, DARKGRAY)
infoRect = infoSurf.get_rect()
infoRect.topleft = (10, WINDOWHEIGHT - 25)
# load the sound files
BEEP1 = pygame.mixer.Sound(f'{CURRENT_DIR}/sounds/beep1.ogg')
BEEP2 = pygame.mixer.Sound(f'{CURRENT_DIR}/sounds/beep2.ogg')
BEEP3 = pygame.mixer.Sound(f'{CURRENT_DIR}/sounds/beep3.ogg')
BEEP4 = pygame.mixer.Sound(f'{CURRENT_DIR}/sounds/beep4.ogg')
# Initialize some variables for a new game
pattern = [] # stores the pattern of colors
currentStep = 0 # the color the player must push next
lastClickTime = 0 # timestamp of the player's last button push
score = 0
# when False, the pattern is playing. when True, waiting for the player to click a colored button:
waitingForInput = False
while True: # main game loop
clickedButton = None # button that was clicked (set to YELLOW, RED, GREEN, or BLUE)
DISPLAYSURF.fill(bgColor)
drawButtons()
scoreSurf = BASICFONT.render('Score: ' + str(score), 1, WHITE)
scoreRect = scoreSurf.get_rect()
scoreRect.topleft = (WINDOWWIDTH - 100, 10)
DISPLAYSURF.blit(scoreSurf, scoreRect)
DISPLAYSURF.blit(infoSurf, infoRect)
checkForQuit()
for event in pygame.event.get(): # event handling loop
if event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
clickedButton = getButtonClicked(mousex, mousey)
elif event.type == KEYDOWN:
if event.key == K_q:
clickedButton = YELLOW
elif event.key == K_w:
clickedButton = BLUE
elif event.key == K_a:
clickedButton = RED
elif event.key == K_s:
clickedButton = GREEN
if not waitingForInput:
# play the pattern
pygame.display.update()
pygame.time.wait(1000)
pattern.append(random.choice((YELLOW, BLUE, RED, GREEN)))
for button in pattern:
flashButtonAnimation(button)
pygame.time.wait(FLASHDELAY)
waitingForInput = True
else:
# wait for the player to enter buttons
if clickedButton and clickedButton == pattern[currentStep]:
# pushed the correct button
flashButtonAnimation(clickedButton)
currentStep += 1
lastClickTime = time.time()
if currentStep == len(pattern):
# pushed the last button in the pattern
changeBackgroundAnimation()
score += 1
waitingForInput = False
currentStep = 0 # reset back to first step
elif (clickedButton and clickedButton != pattern[currentStep]) or (currentStep != 0 and time.time() - TIMEOUT > lastClickTime):
# pushed the incorrect button, or has timed out
gameOverAnimation()
# reset the variables for a new game:
pattern = []
currentStep = 0
waitingForInput = False
score = 0
pygame.time.wait(1000)
changeBackgroundAnimation()
pygame.display.update()
FPSCLOCK.tick(FPS)
def terminate():
pygame.quit()
sys.exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
def flashButtonAnimation(color, animationSpeed=50):
if color == YELLOW:
sound = BEEP1
flashColor = BRIGHTYELLOW
rectangle = YELLOWRECT
elif color == BLUE:
sound = BEEP2
flashColor = BRIGHTBLUE
rectangle = BLUERECT
elif color == RED:
sound = BEEP3
flashColor = BRIGHTRED
rectangle = REDRECT
elif color == GREEN:
sound = BEEP4
flashColor = BRIGHTGREEN
rectangle = GREENRECT
origSurf = DISPLAYSURF.copy()
flashSurf = pygame.Surface((BUTTONSIZE, BUTTONSIZE))
flashSurf = flashSurf.convert_alpha()
r, g, b = flashColor
sound.play()
for start, end, step in ((0, 255, 1), (255, 0, -1)): # animation loop
for alpha in range(start, end, animationSpeed * step):
checkForQuit()
DISPLAYSURF.blit(origSurf, (0, 0))
flashSurf.fill((r, g, b, alpha))
DISPLAYSURF.blit(flashSurf, rectangle.topleft)
pygame.display.update()
FPSCLOCK.tick(FPS)
DISPLAYSURF.blit(origSurf, (0, 0))
def drawButtons():
pygame.draw.rect(DISPLAYSURF, YELLOW, YELLOWRECT)
pygame.draw.rect(DISPLAYSURF, BLUE, BLUERECT)
pygame.draw.rect(DISPLAYSURF, RED, REDRECT)
pygame.draw.rect(DISPLAYSURF, GREEN, GREENRECT)
def changeBackgroundAnimation(animationSpeed=40):
global bgColor
newBgColor = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
newBgSurf = pygame.Surface((WINDOWWIDTH, WINDOWHEIGHT))
newBgSurf = newBgSurf.convert_alpha()
r, g, b = newBgColor
for alpha in range(0, 255, animationSpeed): # animation loop
checkForQuit()
DISPLAYSURF.fill(bgColor)
newBgSurf.fill((r, g, b, alpha))
DISPLAYSURF.blit(newBgSurf, (0, 0))
drawButtons() # redraw the buttons on top of the tint
pygame.display.update()
FPSCLOCK.tick(FPS)
bgColor = newBgColor
def gameOverAnimation(color=WHITE, animationSpeed=50):
# play all beeps at once, then flash the background
origSurf = DISPLAYSURF.copy()
flashSurf = pygame.Surface(DISPLAYSURF.get_size())
flashSurf = flashSurf.convert_alpha()
BEEP1.play() # play all four beeps at the same time, roughly.
BEEP2.play()
BEEP3.play()
BEEP4.play()
r, g, b = color
for i in range(3): # do the flash 3 times
for start, end, step in ((0, 255, 1), (255, 0, -1)):
# The first iteration in this loop sets the following for loop
# to go from 0 to 255, the second from 255 to 0.
for alpha in range(start, end, animationSpeed * step): # animation loop
# alpha means transparency. 255 is opaque, 0 is invisible
checkForQuit()
flashSurf.fill((r, g, b, alpha))
DISPLAYSURF.blit(origSurf, (0, 0))
DISPLAYSURF.blit(flashSurf, (0, 0))
drawButtons()
pygame.display.update()
FPSCLOCK.tick(FPS)
def getButtonClicked(x, y):
if YELLOWRECT.collidepoint( (x, y) ):
return YELLOW
elif BLUERECT.collidepoint( (x, y) ):
return BLUE
elif REDRECT.collidepoint( (x, y) ):
return RED
elif GREENRECT.collidepoint( (x, y) ):
return GREEN
return None
if __name__ == '__main__':
main()
| 35.517647 | 139 | 0.612786 |
7955e327c9f7060d3eb18383e4c3ea47f7df4c02 | 6,081 | py | Python | teabag_app.py | alma-frankenstein/Other-projects | ec9972125310c3cdeb353e317a94d1baca287b3a | [
"MIT"
] | null | null | null | teabag_app.py | alma-frankenstein/Other-projects | ec9972125310c3cdeb353e317a94d1baca287b3a | [
"MIT"
] | null | null | null | teabag_app.py | alma-frankenstein/Other-projects | ec9972125310c3cdeb353e317a94d1baca287b3a | [
"MIT"
] | null | null | null | # teabag app
from flask import Flask, render_template
import random as r
import os
app = Flask(__name__)
partsOfSpeech = {'nouns1': ['an aura', 'an accomplishment', 'the love', 'the life', 'the soul'],
'nouns2': ['respect', 'compassion', 'kindness', 'love', 'life', 'knowledge', 'strength',
'generosity', 'love', 'goodness', 'strength',
'belief', 'light', 'love', 'happiness', 'love', 'love', 'everything', 'trust', 'heart'],
'adverbs': ['righteously', 'sincerely'],
'verbs': ['live', 'sing', 'love', 'love', 'live', 'love', 'love', 'give', 'speak', 'speak', 'create',
'intend', 'intend', 'respect'],
'adjectives': ['happy', 'sacred', 'good', 'compassionate', 'giving', 'forgiving', 'loving', 'joyful',
'sincere']
}
phraseDict = {
0: f"You are {r.choice(partsOfSpeech['adjectives'])}",
1: f"{r.choice(partsOfSpeech['verbs']).title()} {r.choice(partsOfSpeech['adverbs'])}; you will build up {r.choice(partsOfSpeech['nouns1'])} of {r.choice(partsOfSpeech['nouns2'])}",
2: f"{r.choice(partsOfSpeech['verbs']).title()} to make yourself {r.choice(partsOfSpeech['adjectives'])}",
3: f"{r.choice(partsOfSpeech['nouns2']).title()} is {r.choice(partsOfSpeech['nouns1'])}",
4: f"It is not to talk of {r.choice(partsOfSpeech['nouns2'])} but to {r.choice(partsOfSpeech['verbs'])} {r.choice(partsOfSpeech['nouns2'])} that is {r.choice(partsOfSpeech['nouns2'])}",
5: f"{r.choice(partsOfSpeech['nouns2']).title()} is for now, {r.choice(partsOfSpeech['nouns2'])} is for the future",
6: f"{r.choice(partsOfSpeech['verbs']).title()} what you {r.choice(partsOfSpeech['verbs'])}, {r.choice(partsOfSpeech['verbs'])} what you {r.choice(partsOfSpeech['verbs'])}",
7: f"Your {r.choice(partsOfSpeech['nouns2'])} is your own {r.choice(partsOfSpeech['nouns2'])}",
8: f"{r.choice(partsOfSpeech['nouns2']).title()} has no limit, {r.choice(partsOfSpeech['nouns2'])} has no enemy",
9: f"{r.choice(partsOfSpeech['verbs']).title()} yourself so that you may know to to {r.choice(partsOfSpeech['verbs'])} with {r.choice(partsOfSpeech['nouns2'])}",
10: f"You don't need {r.choice(partsOfSpeech['nouns2'])} if you are {r.choice(partsOfSpeech['nouns2'])}",
11: f"{r.choice(partsOfSpeech['verbs']).title()} the sequence of {r.choice(partsOfSpeech['nouns2'])}, the consequences will always be {r.choice(partsOfSpeech['adjectives'])}",
12: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
13: f"Be {r.choice(partsOfSpeech['adjectives'])}",
14: f"{r.choice(partsOfSpeech['nouns2']).title()} is the constant state of {r.choice(partsOfSpeech['nouns2'])} for others",
15: f"{r.choice(partsOfSpeech['verbs']).title()} by your inner {r.choice(partsOfSpeech['nouns2'])}",
16: f"Develop the power of {r.choice(partsOfSpeech['nouns2'])}",
17: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
18: f"The principal ingredient of {r.choice(partsOfSpeech['nouns2'])} is {r.choice(partsOfSpeech['nouns2'])}",
19: "You're already dead",
20: f"{r.choice(partsOfSpeech['nouns1']).title()} of {r.choice(partsOfSpeech['nouns2'])}",
21: f"You are {r.choice(partsOfSpeech['adjectives'])}",
22: f"{r.choice(partsOfSpeech['verbs']).title()} {r.choice(partsOfSpeech['adverbs'])}; you will build up {r.choice(partsOfSpeech['nouns1'])} of {r.choice(partsOfSpeech['nouns2'])}",
23: f"{r.choice(partsOfSpeech['verbs']).title()} to make yourself {r.choice(partsOfSpeech['adjectives'])}",
24: f"{r.choice(partsOfSpeech['nouns2']).title()} is {r.choice(partsOfSpeech['nouns1'])}",
25: f"It is not to talk of {r.choice(partsOfSpeech['nouns2'])} but to {r.choice(partsOfSpeech['verbs'])} {r.choice(partsOfSpeech['nouns2'])} that is {r.choice(partsOfSpeech['nouns2'])}",
26: f"{r.choice(partsOfSpeech['nouns2']).title()} is for now, {r.choice(partsOfSpeech['nouns2'])} is for the future",
27: f"{r.choice(partsOfSpeech['verbs']).title()} what you {r.choice(partsOfSpeech['verbs'])}, {r.choice(partsOfSpeech['verbs'])} what you {r.choice(partsOfSpeech['verbs'])}",
28: f"Your {r.choice(partsOfSpeech['nouns2'])} is your own {r.choice(partsOfSpeech['nouns2'])}",
29: f"{r.choice(partsOfSpeech['nouns2']).title()} has no limit, {r.choice(partsOfSpeech['nouns2'])} has no enemy",
30: f"{r.choice(partsOfSpeech['verbs']).title()} yourself so that you may know to to {r.choice(partsOfSpeech['verbs'])} with {r.choice(partsOfSpeech['nouns2'])}",
31: f"You don't need {r.choice(partsOfSpeech['nouns2'])} if you are {r.choice(partsOfSpeech['nouns2'])}",
32: f"{r.choice(partsOfSpeech['verbs']).title()} the sequence of {r.choice(partsOfSpeech['nouns2'])}, the consequences will always be {r.choice(partsOfSpeech['adjectives'])}",
33: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
34: f"Be {r.choice(partsOfSpeech['adjectives'])}",
35: f"{r.choice(partsOfSpeech['nouns2']).title()} is the constant state of {r.choice(partsOfSpeech['nouns2'])} for others",
36: f"{r.choice(partsOfSpeech['verbs']).title()} by your inner {r.choice(partsOfSpeech['nouns2'])}",
37: f"Develop the power of {r.choice(partsOfSpeech['nouns2'])}",
38: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
39: f"The principal ingredient of {r.choice(partsOfSpeech['nouns2'])} is {r.choice(partsOfSpeech['nouns2'])}",
40: f"{r.choice(partsOfSpeech['nouns1']).title()} of {r.choice(partsOfSpeech['nouns2'])}",
}
@app.route('/') # endpoint of domain name
def teaBagger():
phrases = list(range(len(phraseDict)))
phraseKey = r.choice(phrases)
sentence = phraseDict[phraseKey]
return render_template('teasite.jinja2', sentence=sentence)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
| 81.08 | 190 | 0.651866 |
457085e2e44ddc4e450f03a8c55c5925f4651b8f | 326 | py | Python | core/mixins.py | RENCI-NRIG/notary-service | f8b3ed91f9ea4e36ba007a3eaac7373905475a37 | [
"MIT"
] | 8 | 2019-05-27T23:00:18.000Z | 2022-03-01T11:10:39.000Z | core/mixins.py | RENCI-NRIG/notary-service | 5d519d925d011004aefd6a4acdfc8565f5b76a94 | [
"MIT"
] | 52 | 2018-10-12T18:59:43.000Z | 2022-03-28T21:16:35.000Z | core/mixins.py | RENCI-NRIG/notary-service | 5d519d925d011004aefd6a4acdfc8565f5b76a94 | [
"MIT"
] | 1 | 2020-04-30T19:34:54.000Z | 2020-04-30T19:34:54.000Z | from core import models
class AuditModelMixin(models.BaseTimestampModel,
models.BaseTrackingModel):
"""
Mixin that provides created_by, created, modified_by, modified fields
Includes
- BaseTimestampModel
- BaseTrackingModel
"""
class Meta:
abstract = True
| 20.375 | 73 | 0.644172 |
2a3ffc5acffe0f67d1c341951679a742ead2f7c8 | 789 | py | Python | allink_core/core_apps/allink_seo_accordion/models.py | allink/allink-core | cf2727f26192d8dee89d76feb262bc4760f36f5e | [
"BSD-3-Clause"
] | 5 | 2017-03-13T08:49:45.000Z | 2022-03-05T20:05:56.000Z | allink_core/core_apps/allink_seo_accordion/models.py | allink/allink-core | cf2727f26192d8dee89d76feb262bc4760f36f5e | [
"BSD-3-Clause"
] | 28 | 2019-10-21T08:32:18.000Z | 2022-02-10T13:16:38.000Z | allink_core/core_apps/allink_seo_accordion/models.py | allink/allink-core | cf2727f26192d8dee89d76feb262bc4760f36f5e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from cms.models.pluginmodel import CMSPlugin
@python_2_unicode_compatible
class AllinkSEOAccordionContainerPlugin(CMSPlugin):
"""
A Container-Plugin for SEO Accordion content
"""
is_seo_faq = models.BooleanField(
'Enable SEO FAQ schema',
help_text='Enable to display accordion contents as questions/answers in search engine result pages',
default=True
)
def __str__(self):
return '{}'.format(str(self.pk))
@python_2_unicode_compatible
class AllinkSEOAccordion(CMSPlugin):
title = models.CharField(
'Title',
max_length=255
)
def __str__(self):
return '{}'.format(self.title)
| 23.909091 | 108 | 0.695817 |
fc83aeb6fc22e1d498da90643a5d81b71b5cc83e | 2,591 | py | Python | manimpp/mobject/mobject_update_utils.py | yudhastyawan/manimpp | 5bcfa7ba24e368a72f55a3c69364521843b9766d | [
"MIT"
] | null | null | null | manimpp/mobject/mobject_update_utils.py | yudhastyawan/manimpp | 5bcfa7ba24e368a72f55a3c69364521843b9766d | [
"MIT"
] | null | null | null | manimpp/mobject/mobject_update_utils.py | yudhastyawan/manimpp | 5bcfa7ba24e368a72f55a3c69364521843b9766d | [
"MIT"
] | null | null | null | import inspect
import numpy as np
from manimpp.constants import DEGREES
from manimpp.constants import RIGHT
from manimpp.mobject.mobject import Mobject
def assert_is_mobject_method(method):
assert(inspect.ismethod(method))
mobject = method.__self__
assert(isinstance(mobject, Mobject))
def always(method, *args, **kwargs):
assert_is_mobject_method(method)
mobject = method.__self__
func = method.__func__
mobject.add_updater(lambda m: func(m, *args, **kwargs))
return mobject
def f_always(method, *arg_generators, **kwargs):
"""
More functional version of always, where instead
of taking in args, it takes in functions which ouput
the relevant arguments.
"""
assert_is_mobject_method(method)
mobject = method.__self__
func = method.__func__
def updater(mob):
args = [
arg_generator()
for arg_generator in arg_generators
]
func(mob, *args, **kwargs)
mobject.add_updater(updater)
return mobject
def always_redraw(func):
mob = func()
mob.add_updater(lambda m: mob.become(func()))
return mob
def always_shift(mobject, direction=RIGHT, rate=0.1):
mobject.add_updater(
lambda m, dt: m.shift(dt * rate * direction)
)
return mobject
def always_rotate(mobject, rate=20 * DEGREES, **kwargs):
mobject.add_updater(
lambda m, dt: m.rotate(dt * rate, **kwargs)
)
return mobject
def turn_animation_into_updater(animation, cycle=False, **kwargs):
"""
Add an updater to the animation's mobject which applies
the interpolation and update functions of the animation
If cycle is True, this repeats over and over. Otherwise,
the updater will be popped uplon completion
"""
mobject = animation.mobject
animation.update_config(**kwargs)
animation.suspend_mobject_updating = False
animation.begin()
animation.total_time = 0
def update(m, dt):
run_time = animation.get_run_time()
time_ratio = animation.total_time / run_time
if cycle:
alpha = time_ratio % 1
else:
alpha = np.clip(time_ratio, 0, 1)
if alpha >= 1:
animation.finish()
m.remove_updater(update)
return
animation.interpolate(alpha)
animation.update_mobjects(dt)
animation.total_time += dt
mobject.add_updater(update)
return mobject
def cycle_animation(animation, **kwargs):
return turn_animation_into_updater(
animation, cycle=True, **kwargs
)
| 25.653465 | 66 | 0.663064 |
66ad149aa1ae1e6878f5572da5e15938282f95e2 | 2,536 | py | Python | apps/order/models.py | jakejie/ShopPro | f0cec134ae77f4449f15a0219123d6a6bce2aad2 | [
"Apache-2.0"
] | 1 | 2019-04-20T16:58:02.000Z | 2019-04-20T16:58:02.000Z | apps/order/models.py | jakejie/ShopPro | f0cec134ae77f4449f15a0219123d6a6bce2aad2 | [
"Apache-2.0"
] | 6 | 2020-06-05T19:57:58.000Z | 2021-09-08T00:49:17.000Z | apps/order/models.py | jakejie/ShopPro | f0cec134ae77f4449f15a0219123d6a6bce2aad2 | [
"Apache-2.0"
] | 1 | 2021-09-10T18:29:28.000Z | 2021-09-10T18:29:28.000Z | from django.db import models
from datetime import datetime
# 支付方式
PAY_METHOD = {
1: "货到付款",
2: "微信支付",
3: "支付宝",
4: "银联卡支付",
}
# 订单管理
class OrderList(models.Model):
user = models.ForeignKey("user.UserProfile", verbose_name="用户", on_delete=models.CASCADE)
order = models.CharField(verbose_name="订单号", max_length=32)
totalNum = models.IntegerField(verbose_name="总商品数量")
totalPrice = models.FloatField(verbose_name="总金额")
pay_method = models.CharField(verbose_name="支付方式", max_length=20, default="")
pay_status = models.BooleanField(verbose_name="支付状态", default=False)
address = models.ForeignKey("user.Address", verbose_name="订单收货地址", default=1, on_delete=models.CASCADE)
add_time = models.DateTimeField(verbose_name="下单时间", default=datetime.now)
class Meta:
verbose_name = "订单管理"
verbose_name_plural = verbose_name
def __str__(self):
return "{} {}".format(self.user, self.order)
# 获取该订单的商品
def get_item(self):
return OrderItem.objects.filter(orderNum=self.order).all()
# 获取该订单的物流信息
def get_logistics(self):
return Logistics.objects.filter(orderNum=self.order).first()
# 订单商品管理
class OrderItem(models.Model):
orderNum = models.CharField(verbose_name="订单号", max_length=32)
product = models.ForeignKey("product.Product", verbose_name="商品", on_delete=models.CASCADE)
num = models.IntegerField(verbose_name="数量")
add_time = models.DateTimeField(verbose_name="下单时间", default=datetime.now)
class Meta:
verbose_name = "订单详情商品"
verbose_name_plural = verbose_name
def __str__(self):
return "{} {}".format(self.orderNum, self.product.title)
# 物流管理
class Logistics(models.Model):
SENTTYPECHOICE = (
("快递", "express"),
("无需物流", "no"),
("线下交易", "under_line"),
("其他", "other"),
)
# orderNum = models.ForeignKey("OrderList", verbose_name="订单号", on_delete=models.CASCADE)
orderNum = models.CharField(verbose_name="订单号", max_length=32)
sentType = models.CharField(verbose_name="发货方式", choices=SENTTYPECHOICE, max_length=16)
name = models.CharField(verbose_name="物流公司", max_length=200, default="")
num = models.CharField(verbose_name="快递单号", max_length=32, default="0")
add_time = models.DateTimeField(verbose_name="下单时间", default=datetime.now)
class Meta:
verbose_name = "物流管理"
verbose_name_plural = verbose_name
def __str__(self):
return "{} {}".format(self.orderNum, self.sentType)
| 33.368421 | 107 | 0.68336 |
9cf4597841076171f728b43e8d693a936674f4bc | 183 | py | Python | packages/leon/bye.py | madstk1/leon | 9ba59ff717316c6f0cd57c0960f04d66cda99667 | [
"MIT"
] | 9,211 | 2019-02-10T12:32:07.000Z | 2022-03-31T07:53:55.000Z | packages/leon/bye.py | madstk1/leon | 9ba59ff717316c6f0cd57c0960f04d66cda99667 | [
"MIT"
] | 316 | 2019-02-11T03:31:05.000Z | 2022-03-26T04:21:52.000Z | packages/leon/bye.py | madstk1/leon | 9ba59ff717316c6f0cd57c0960f04d66cda99667 | [
"MIT"
] | 895 | 2019-02-10T16:37:57.000Z | 2022-03-31T08:19:30.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import utils
def run(string, entities):
"""Leon says good bye"""
return utils.output('end', 'good_bye', utils.translate('good_bye'))
| 18.3 | 68 | 0.661202 |
620169c0bf632c1647fff0c24b3088d029fce515 | 113,540 | py | Python | synapse/models/inet.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 216 | 2017-01-17T18:52:50.000Z | 2022-03-31T18:44:49.000Z | synapse/models/inet.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 2,189 | 2017-01-17T22:31:48.000Z | 2022-03-31T20:41:45.000Z | synapse/models/inet.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 44 | 2017-01-17T16:50:57.000Z | 2022-03-16T18:35:52.000Z | import socket
import hashlib
import logging
import ipaddress
import email.utils
import idna
import regex
import unicodedata
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.chop as s_chop
import synapse.lib.layer as s_layer
import synapse.lib.types as s_types
import synapse.lib.scrape as s_scrape
import synapse.lib.module as s_module
import synapse.lookup.iana as s_l_iana
logger = logging.getLogger(__name__)
fqdnre = regex.compile(r'^[\w._-]+$', regex.U)
srv6re = regex.compile(r'^\[([a-f0-9\.:]+)\]:(\d+)$')
udots = regex.compile(r'[\u3002\uff0e\uff61]')
cidrmasks = [((0xffffffff - (2 ** (32 - i) - 1)), (2 ** (32 - i))) for i in range(33)]
ipv4max = 2 ** 32 - 1
def getAddrType(ip):
if ip.is_multicast:
return 'multicast'
if ip.is_loopback:
return 'loopback'
if ip.is_link_local:
return 'linklocal'
if ip.is_private:
return 'private'
if ip.is_reserved:
return 'reserved'
return 'unicast'
class Addr(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _getPort(self, valu):
parts = valu.split(':', 1)
if len(parts) == 2:
valu, port = parts
port = self.modl.type('inet:port').norm(port)[0]
return valu, port, f':{port}'
return valu, None, ''
def _normPyStr(self, valu):
orig = valu
subs = {}
# no protos use case sensitivity yet...
valu = valu.lower()
proto = 'tcp'
parts = valu.split('://', 1)
if len(parts) == 2:
proto, valu = parts
if proto not in ('tcp', 'udp', 'icmp', 'host'):
raise s_exc.BadTypeValu(valu=orig, name=self.name,
mesg='inet:addr protocol must be in: tcp, udp, icmp, host')
subs['proto'] = proto
valu = valu.strip().strip('/')
# Treat as host if proto is host
if proto == 'host':
valu, port, pstr = self._getPort(valu)
if port:
subs['port'] = port
host = s_common.guid(valu)
subs['host'] = host
return f'host://{host}{pstr}', {'subs': subs}
# Treat as IPv6 if starts with [ or contains multiple :
if valu.startswith('['):
match = srv6re.match(valu)
if match:
ipv6, port = match.groups()
ipv6, v6info = self.modl.type('inet:ipv6').norm(ipv6)
v6subs = v6info.get('subs')
if v6subs is not None:
v6v4addr = v6subs.get('ipv4')
if v6v4addr is not None:
subs['ipv4'] = v6v4addr
port = self.modl.type('inet:port').norm(port)[0]
subs['ipv6'] = ipv6
subs['port'] = port
return f'{proto}://[{ipv6}]:{port}', {'subs': subs}
mesg = f'Invalid IPv6 w/port ({orig})'
raise s_exc.BadTypeValu(valu=orig, name=self.name, mesg=mesg)
elif valu.count(':') >= 2:
ipv6 = self.modl.type('inet:ipv6').norm(valu)[0]
subs['ipv6'] = ipv6
return f'{proto}://{ipv6}', {'subs': subs}
# Otherwise treat as IPv4
valu, port, pstr = self._getPort(valu)
if port:
subs['port'] = port
ipv4 = self.modl.type('inet:ipv4').norm(valu)[0]
ipv4_repr = self.modl.type('inet:ipv4').repr(ipv4)
subs['ipv4'] = ipv4
return f'{proto}://{ipv4_repr}{pstr}', {'subs': subs}
class Cidr4(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _normPyStr(self, valu):
ip_str, mask_str = valu.split('/', 1)
mask_int = int(mask_str)
if mask_int > 32 or mask_int < 0:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Invalid CIDR Mask')
ip_int = self.modl.type('inet:ipv4').norm(ip_str)[0]
mask = cidrmasks[mask_int]
network = ip_int & mask[0]
broadcast = network + mask[1] - 1
network_str = self.modl.type('inet:ipv4').repr(network)
norm = f'{network_str}/{mask_int}'
info = {
'subs': {
'broadcast': broadcast,
'mask': mask_int,
'network': network,
}
}
return norm, info
class Cidr6(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _normPyStr(self, valu):
try:
network = ipaddress.IPv6Network(valu)
except Exception as e:
raise s_exc.BadTypeValu(valu=valu, name=self.name, mesg=str(e)) from None
norm = str(network)
info = {
'subs': {
'broadcast': str(network.broadcast_address),
'mask': network.prefixlen,
'network': str(network.network_address),
}
}
return norm, info
class Email(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _normPyStr(self, valu):
try:
user, fqdn = valu.split('@', 1)
fqdnnorm, fqdninfo = self.modl.type('inet:fqdn').norm(fqdn)
usernorm, userinfo = self.modl.type('inet:user').norm(user)
except Exception as e:
raise s_exc.BadTypeValu(valu=valu, name=self.name, mesg=str(e)) from None
norm = f'{usernorm}@{fqdnnorm}'
info = {
'subs': {
'fqdn': fqdnnorm,
'user': usernorm,
}
}
return norm, info
class Fqdn(s_types.Type):
stortype = s_layer.STOR_TYPE_FQDN
def postTypeInit(self):
self.setNormFunc(str, self._normPyStr)
self.storlifts.update({
'=': self._storLiftEq,
})
def _storLiftEq(self, cmpr, valu):
if type(valu) == str:
if valu == '':
mesg = 'Cannot generate fqdn index bytes for a empty string.'
raise s_exc.BadLiftValu(valu=valu, name=self.name, mesg=mesg)
if valu == '*':
return (
('=', '*', self.stortype),
)
if valu.startswith('*.'):
norm, info = self.norm(valu[2:])
return (
('=', f'*.{norm}', self.stortype),
)
if valu.startswith('*'):
norm, info = self.norm(valu[1:])
return (
('=', f'*{norm}', self.stortype),
)
if '*' in valu:
mesg = 'Wild card may only appear at the beginning.'
raise s_exc.BadLiftValu(valu=valu, name=self.name, mesg=mesg)
return self._storLiftNorm(cmpr, valu)
def _ctorCmprEq(self, text):
if text == '':
# Asking if a +inet:fqdn='' is a odd filter, but
# the intuitive answer for that filter is to return False
def cmpr(valu):
return False
return cmpr
if text[0] == '*':
cval = text[1:]
def cmpr(valu):
return valu.endswith(cval)
return cmpr
norm, info = self.norm(text)
def cmpr(valu):
return norm == valu
return cmpr
def _normPyStr(self, valu):
valu = unicodedata.normalize('NFKC', valu)
valu = regex.sub(udots, '.', valu)
valu = valu.replace('[.]', '.')
valu = valu.replace('(.)', '.')
# strip leading/trailing .
valu = valu.strip().strip('.')
try:
valu = idna.encode(valu, uts46=True).decode('utf8')
except idna.IDNAError:
try:
valu = valu.encode('idna').decode('utf8').lower()
except UnicodeError:
mesg = 'Failed to encode/decode the value with idna/utf8.'
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg=mesg) from None
if not fqdnre.match(valu):
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg=f'FQDN failed to match fqdnre [{fqdnre.pattern}]')
# Make sure we *don't* get an IP address
try:
socket.inet_pton(socket.AF_INET, valu)
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='FQDN Got an IP address instead')
except OSError:
pass
parts = valu.split('.', 1)
subs = {'host': parts[0]}
if len(parts) == 2:
subs['domain'] = parts[1]
else:
subs['issuffix'] = 1
return valu, {'subs': subs}
def repr(self, valu):
try:
return idna.decode(valu.encode('utf8'), uts46=True)
except idna.IDNAError:
try:
return valu.encode('utf8').decode('idna')
except UnicodeError:
return valu
class IPv4(s_types.Type):
'''
The base type for an IPv4 address.
'''
stortype = s_layer.STOR_TYPE_U32
def postTypeInit(self):
self.setCmprCtor('>=', self._ctorCmprGe)
self.setCmprCtor('<=', self._ctorCmprLe)
self.setCmprCtor('>', self._ctorCmprGt)
self.setCmprCtor('<', self._ctorCmprLt)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
self.storlifts.update({
'=': self._storLiftEq,
'<': self._storLiftNorm,
'>': self._storLiftNorm,
'<=': self._storLiftNorm,
'>=': self._storLiftNorm,
})
def _ctorCmprEq(self, valu):
if type(valu) == str:
if valu.find('/') != -1:
minv, maxv = self.getCidrRange(valu)
def cmpr(norm):
return norm >= minv and norm < maxv
return cmpr
if valu.find('-') != -1:
minv, maxv = self.getNetRange(valu)
def cmpr(norm):
return norm >= minv and norm <= maxv
return cmpr
return s_types.Type._ctorCmprEq(self, valu)
def getTypeVals(self, valu):
if isinstance(valu, str):
if valu.find('/') != -1:
minv, maxv = self.getCidrRange(valu)
while minv < maxv:
yield minv
minv += 1
return
if valu.find('-') != -1:
minv, maxv = self.getNetRange(valu)
while minv <= maxv:
yield minv
minv += 1
return
yield valu
def _normPyInt(self, valu):
if valu < 0 or valu > ipv4max:
raise s_exc.BadTypeValu(name=self.name, valu=valu,
mesg='Value outside of IPv4 range')
addr = ipaddress.IPv4Address(valu)
subs = {'type': getAddrType(addr)}
return valu, {'subs': subs}
def _normPyStr(self, valu):
valu = valu.replace('[.]', '.')
valu = valu.replace('(.)', '.')
valu = s_chop.printables(valu)
try:
byts = socket.inet_aton(valu)
except OSError as e:
raise s_exc.BadTypeValu(name=self.name, valu=valu,
mesg=str(e)) from None
norm = int.from_bytes(byts, 'big')
return self._normPyInt(norm)
def repr(self, norm):
byts = norm.to_bytes(4, 'big')
return socket.inet_ntoa(byts)
def getNetRange(self, text):
minstr, maxstr = text.split('-', 1)
minv, info = self.norm(minstr)
maxv, info = self.norm(maxstr)
return minv, maxv
def getCidrRange(self, text):
addr, mask = text.split('/', 1)
norm, info = self.norm(addr)
mask = cidrmasks[int(mask)]
minv = norm & mask[0]
return minv, minv + mask[1]
def _storLiftEq(self, cmpr, valu):
if type(valu) == str:
if valu.find('/') != -1:
minv, maxv = self.getCidrRange(valu)
maxv -= 1
return (
('range=', (minv, maxv), self.stortype),
)
if valu.find('-') != -1:
minv, maxv = self.getNetRange(valu)
return (
('range=', (minv, maxv), self.stortype),
)
return self._storLiftNorm(cmpr, valu)
def _ctorCmprGe(self, text):
norm, info = self.norm(text)
def cmpr(valu):
return valu >= norm
return cmpr
def _ctorCmprLe(self, text):
norm, info = self.norm(text)
def cmpr(valu):
return valu <= norm
return cmpr
def _ctorCmprGt(self, text):
norm, info = self.norm(text)
def cmpr(valu):
return valu > norm
return cmpr
def _ctorCmprLt(self, text):
norm, info = self.norm(text)
def cmpr(valu):
return valu < norm
return cmpr
class IPv6(s_types.Type):
stortype = s_layer.STOR_TYPE_IPV6
def postTypeInit(self):
self.setNormFunc(int, self._normPyStr)
self.setNormFunc(str, self._normPyStr)
self.storlifts.update({
'=': self._storLiftEq,
})
def _normPyStr(self, valu):
try:
if type(valu) == str:
valu = s_chop.printables(valu)
if valu.find(':') == -1:
valu = '::ffff:' + valu
v6 = ipaddress.IPv6Address(valu)
v4 = v6.ipv4_mapped
subs = {'type': getAddrType(v6)}
if v4 is not None:
v4_int = self.modl.type('inet:ipv4').norm(v4.compressed)[0]
v4_str = self.modl.type('inet:ipv4').repr(v4_int)
subs['ipv4'] = v4_int
return f'::ffff:{v4_str}', {'subs': subs}
return ipaddress.IPv6Address(valu).compressed, {'subs': subs}
except Exception as e:
raise s_exc.BadTypeValu(valu=valu, name=self.name, mesg=str(e)) from None
def getTypeVals(self, valu):
if isinstance(valu, str):
if valu.find('/') != -1:
minv, maxv = self.getCidrRange(valu)
while minv <= maxv:
yield minv.compressed
minv += 1
return
if valu.find('-') != -1:
minv, maxv = self.getNetRange(valu)
while minv <= maxv:
yield minv.compressed
minv += 1
return
yield valu
def getCidrRange(self, text):
try:
netw = ipaddress.IPv6Network(text, strict=False)
except Exception as e:
raise s_exc.BadTypeValu(valu=text, name=self.name, mesg=str(e)) from None
minv = netw[0]
maxv = netw[-1]
return minv, maxv
def getNetRange(self, text):
minv, maxv = text.split('-', 1)
try:
minv = ipaddress.IPv6Address(minv)
maxv = ipaddress.IPv6Address(maxv)
except Exception as e:
raise s_exc.BadTypeValu(valu=text, name=self.name, mesg=str(e)) from None
return minv, maxv
def _ctorCmprEq(self, valu):
if type(valu) == str:
if valu.find('/') != -1:
minv, maxv = self.getCidrRange(valu)
def cmpr(norm):
norm = ipaddress.IPv6Address(norm)
return norm >= minv and norm <= maxv
return cmpr
if valu.find('-') != -1:
minv, maxv = self.getNetRange(valu)
def cmpr(norm):
norm = ipaddress.IPv6Address(norm)
return norm >= minv and norm <= maxv
return cmpr
return s_types.Type._ctorCmprEq(self, valu)
def _storLiftEq(self, cmpr, valu):
if type(valu) == str:
if valu.find('/') != -1:
minv, maxv = self.getCidrRange(valu)
return (
('range=', (minv.compressed, maxv.compressed), self.stortype),
)
if valu.find('-') != -1:
minv, maxv = self.getNetRange(valu)
return (
('range=', (minv.compressed, maxv.compressed), self.stortype),
)
return self._storLiftNorm(cmpr, valu)
class IPv4Range(s_types.Range):
def postTypeInit(self):
self.opts['type'] = ('inet:ipv4', {})
s_types.Range.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.cidrtype = self.modl.type('inet:cidr4')
def _normPyStr(self, valu):
if '-' in valu:
return super()._normPyStr(valu)
cidrnorm = self.cidrtype._normPyStr(valu)
tupl = cidrnorm[1]['subs']['network'], cidrnorm[1]['subs']['broadcast']
return self._normPyTuple(tupl)
class IPv6Range(s_types.Range):
def postTypeInit(self):
self.opts['type'] = ('inet:ipv6', {})
s_types.Range.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.cidrtype = self.modl.type('inet:cidr6')
def _normPyStr(self, valu):
if '-' in valu:
return super()._normPyStr(valu)
cidrnorm = self.cidrtype._normPyStr(valu)
tupl = cidrnorm[1]['subs']['network'], cidrnorm[1]['subs']['broadcast']
return self._normPyTuple(tupl)
def _normPyTuple(self, valu):
if len(valu) != 2:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg=f'Must be a 2-tuple of type {self.subtype.name}')
minv = self.subtype.norm(valu[0])[0]
maxv = self.subtype.norm(valu[1])[0]
if ipaddress.ip_address(minv) > ipaddress.ip_address(maxv):
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='minval cannot be greater than maxval')
return (minv, maxv), {'subs': {'min': minv, 'max': maxv}}
class Rfc2822Addr(s_types.Str):
'''
An RFC 2822 compatible email address parser
'''
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _normPyStr(self, valu):
# remove quotes for normalized version
valu = valu.replace('"', ' ').replace("'", ' ')
valu = valu.strip().lower()
valu = ' '.join(valu.split())
try:
name, addr = email.utils.parseaddr(valu)
except Exception as e: # pragma: no cover
# not sure we can ever really trigger this with a string as input
mesg = f'email.utils.parsaddr failed: {str(e)}'
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg=mesg) from None
subs = {}
if name:
subs['name'] = name
try:
data = self.modl.type('inet:email').norm(addr)
if len(data) == 2:
mail = data[0]
subs['email'] = mail
if name:
valu = '%s <%s>' % (name, mail)
else:
valu = mail
except s_exc.BadTypeValu as e:
pass # it's all good, we just dont have a valid email addr
return valu, {'subs': subs}
class Url(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _ctorCmprEq(self, text):
if text == '':
# Asking if a +inet:url='' is a odd filter, but
# the intuitive answer for that filter is to return False
def cmpr(valu):
return False
return cmpr
norm, info = self.norm(text)
def cmpr(valu):
return norm == valu
return cmpr
def _normPyStr(self, valu):
orig = valu
subs = {}
proto = ''
authparts = None
hostparts = ''
pathpart = ''
parampart = ''
# Protocol
try:
proto, valu = valu.split('://', 1)
proto = proto.lower()
subs['proto'] = proto
except Exception:
raise s_exc.BadTypeValu(valu=orig, name=self.name,
mesg='Invalid/Missing protocol') from None
# Query params first
queryrem = ''
if '?' in valu:
valu, queryrem = valu.split('?', 1)
# TODO break out query params separately
# Resource Path
parts = valu.split('/', 1)
if len(parts) == 2:
valu, pathpart = parts
pathpart = f'/{pathpart}'
subs['path'] = pathpart
if queryrem:
parampart = f'?{queryrem}'
subs['params'] = parampart
# Optional User/Password
parts = valu.split('@', 1)
if len(parts) == 2:
authparts, valu = parts
userpass = authparts.split(':', 1)
subs['user'] = userpass[0]
if len(userpass) == 2:
subs['passwd'] = userpass[1]
# Host (FQDN, IPv4, or IPv6)
host = None
port = None
# Treat as IPv6 if starts with [ or contains multiple :
if valu.startswith('[') or valu.count(':') >= 2:
try:
match = srv6re.match(valu)
if match:
valu, port = match.groups()
host, ipv6_subs = self.modl.type('inet:ipv6').norm(valu)
subs['ipv6'] = host
if match:
host = f'[{host}]'
except Exception:
pass
else:
# FQDN and IPv4 handle ports the same way
fqdnipv4_parts = valu.split(':', 1)
part = fqdnipv4_parts[0]
if len(fqdnipv4_parts) == 2:
port = fqdnipv4_parts[1]
# IPv4
try:
# Norm and repr to handle fangs
ipv4 = self.modl.type('inet:ipv4').norm(part)[0]
host = self.modl.type('inet:ipv4').repr(ipv4)
subs['ipv4'] = ipv4
except Exception:
pass
# FQDN
if host is None:
try:
host = self.modl.type('inet:fqdn').norm(part)[0]
subs['fqdn'] = host
except Exception:
pass
# Raise exception if there was no FQDN, IPv4, or IPv6
if host is None:
raise s_exc.BadTypeValu(valu=orig, name=self.name, mesg='No valid host')
# Optional Port
if port is not None:
port = self.modl.type('inet:port').norm(port)[0]
subs['port'] = port
else:
# Look up default port for protocol, but don't add it back into the url
defport = s_l_iana.services.get(proto)
if defport:
subs['port'] = self.modl.type('inet:port').norm(defport)[0]
# Set up Normed URL
if authparts:
hostparts = f'{authparts}@'
hostparts = f'{hostparts}{host}'
if port is not None:
hostparts = f'{hostparts}:{port}'
base = f'{proto}://{hostparts}{pathpart}'
subs['base'] = base
norm = f'{base}{parampart}'
return norm, {'subs': subs}
class InetModule(s_module.CoreModule):
async def initCoreModule(self):
self.model.form('inet:fqdn').onAdd(self._onAddFqdn)
self.model.prop('inet:fqdn:zone').onSet(self._onSetFqdnZone)
self.model.prop('inet:fqdn:iszone').onSet(self._onSetFqdnIsZone)
self.model.prop('inet:fqdn:issuffix').onSet(self._onSetFqdnIsSuffix)
self.model.form('inet:passwd').onAdd(self._onAddPasswd)
self.model.prop('inet:whois:rec:text').onSet(self._onSetWhoisText)
async def _onSetWhoisText(self, node, oldv):
text = node.get('text')
fqdn = node.get('fqdn')
asof = node.get('asof')
for form, valu in s_scrape.scrape(text):
if form == 'inet:email':
whomail = await node.snap.addNode('inet:whois:email', (fqdn, valu))
await whomail.set('.seen', asof)
async def _onAddPasswd(self, node):
byts = node.ndef[1].encode('utf8')
await node.set('md5', hashlib.md5(byts).hexdigest())
await node.set('sha1', hashlib.sha1(byts).hexdigest())
await node.set('sha256', hashlib.sha256(byts).hexdigest())
async def _onAddFqdn(self, node):
fqdn = node.ndef[1]
domain = node.get('domain')
if domain is None:
await node.set('iszone', False)
await node.set('issuffix', True)
return
if node.get('issuffix') is None:
await node.set('issuffix', False)
# almost certainly in the cache anyway....
parent = await node.snap.addNode('inet:fqdn', domain)
if parent.get('issuffix'):
await node.set('iszone', True)
await node.set('zone', fqdn)
return
await node.set('iszone', False)
if parent.get('iszone'):
await node.set('zone', domain)
return
zone = parent.get('zone')
if zone is not None:
await node.set('zone', zone)
async def _onSetFqdnIsSuffix(self, node, oldv):
fqdn = node.ndef[1]
issuffix = node.get('issuffix')
async for child in node.snap.nodesByPropValu('inet:fqdn:domain', '=', fqdn):
await child.set('iszone', issuffix)
async def _onSetFqdnIsZone(self, node, oldv):
fqdn = node.ndef[1]
iszone = node.get('iszone')
if iszone:
await node.set('zone', fqdn)
return
# we are not a zone...
domain = node.get('domain')
if not domain:
await node.pop('zone')
return
parent = await node.snap.addNode('inet:fqdn', domain)
zone = parent.get('zone')
if zone is None:
await node.pop('zone')
return
await node.set('zone', zone)
async def _onSetFqdnZone(self, node, oldv):
fqdn = node.ndef[1]
zone = node.get('zone')
async for child in node.snap.nodesByPropValu('inet:fqdn:domain', '=', fqdn):
# if they are their own zone level, skip
if child.get('iszone'):
continue
# the have the same zone we do
await child.set('zone', zone)
def getModelDefs(self):
return (
('inet', {
'ctors': (
('inet:addr', 'synapse.models.inet.Addr', {}, {
'doc': 'A network layer URL-like format to represent tcp/udp/icmp clients and servers.',
'ex': 'tcp://1.2.3.4:80'
}),
('inet:cidr4', 'synapse.models.inet.Cidr4', {}, {
'doc': 'An IPv4 address block in Classless Inter-Domain Routing (CIDR) notation.',
'ex': '1.2.3.0/24'
}),
('inet:cidr6', 'synapse.models.inet.Cidr6', {}, {
'doc': 'An IPv6 address block in Classless Inter-Domain Routing (CIDR) notation.',
'ex': '2001:db8::/101'
}),
('inet:email', 'synapse.models.inet.Email', {}, {
'doc': 'An e-mail address.'}),
('inet:fqdn', 'synapse.models.inet.Fqdn', {}, {
'doc': 'A Fully Qualified Domain Name (FQDN).',
'ex': 'vertex.link'}),
('inet:ipv4', 'synapse.models.inet.IPv4', {}, {
'doc': 'An IPv4 address.',
'ex': '1.2.3.4'
}),
('inet:ipv4range', 'synapse.models.inet.IPv4Range', {}, {
'doc': 'An IPv4 address range.',
'ex': '1.2.3.4-1.2.3.8'
}),
('inet:ipv6', 'synapse.models.inet.IPv6', {}, {
'doc': 'An IPv6 address.',
'ex': '2607:f8b0:4004:809::200e'
}),
('inet:ipv6range', 'synapse.models.inet.IPv6Range', {}, {
'doc': 'An IPv6 address range.',
'ex': '(2607:f8b0:4004:809::200e, 2607:f8b0:4004:809::2011)'
}),
('inet:rfc2822:addr', 'synapse.models.inet.Rfc2822Addr', {}, {
'doc': 'An RFC 2822 Address field.',
'ex': '"Visi Kenshoto" <visi@vertex.link>'
}),
('inet:url', 'synapse.models.inet.Url', {}, {
'doc': 'A Universal Resource Locator (URL).',
'ex': 'http://www.woot.com/files/index.html'
}),
),
'types': (
('inet:asn', ('int', {}), {
'doc': 'An Autonomous System Number (ASN).'
}),
('inet:asnet4', ('comp', {'fields': (('asn', 'inet:asn'), ('net4', 'inet:net4'))}), {
'doc': 'An Autonomous System Number (ASN) and its associated IPv4 address range.',
'ex': '(54959, (1.2.3.4, 1.2.3.20))',
}),
('inet:asnet6', ('comp', {'fields': (('asn', 'inet:asn'), ('net6', 'inet:net6'))}), {
'doc': 'An Autonomous System Number (ASN) and its associated IPv6 address range.',
'ex': '(54959, (ff::00, ff::02))',
}),
('inet:client', ('inet:addr', {}), {
'doc': 'A network client address.'
}),
('inet:download', ('guid', {}), {
'doc': 'An instance of a file downloaded from a server.',
}),
('inet:flow', ('guid', {}), {
'doc': 'An individual network connection between a given source and destination.'
}),
('inet:group', ('str', {}), {
'doc': 'A group name string.'
}),
('inet:http:cookie', ('str', {}), {
'doc': 'An HTTP cookie string.'}),
('inet:http:header:name', ('str', {'lower': True}), {}),
('inet:http:header', ('comp', {'fields': (('name', 'inet:http:header:name'), ('value', 'str'))}), {
'doc': 'An HTTP protocol header key/value.'}),
('inet:http:request:header', ('inet:http:header', {}), {
'doc': 'An HTTP request header.'}),
('inet:http:response:header', ('inet:http:header', {}), {
'doc': 'An HTTP response header.'}),
('inet:http:param', ('comp', {'fields': (('name', 'str'), ('value', 'str'))}), {
'doc': 'An HTTP request path query parameter.'}),
('inet:http:session', ('guid', {}), {
'doc': 'An HTTP session.'}),
('inet:http:request', ('guid', {}), {
'interfaces': ('inet:proto:request',),
'doc': 'A single HTTP request.'}),
('inet:iface', ('guid', {}), {
'doc': 'A network interface with a set of associated protocol addresses.'
}),
('inet:mac', ('str', {'lower': True, 'regex': '^([0-9a-f]{2}[:]){5}([0-9a-f]{2})$'}), {
'doc': 'A 48-bit Media Access Control (MAC) address.',
'ex': 'aa:bb:cc:dd:ee:ff'
}),
('inet:net4', ('inet:ipv4range', {}), {
'doc': 'An IPv4 address range.',
'ex': '(1.2.3.4, 1.2.3.20)'
}),
('inet:net6', ('inet:ipv6range', {}), {
'doc': 'An IPv6 address range.',
'ex': "('ff::00', 'ff::30')"
}),
('inet:passwd', ('str', {}), {
'doc': 'A password string.'
}),
('inet:ssl:cert', ('comp', {'fields': (('server', 'inet:server'), ('file', 'file:bytes'))}), {
'doc': 'An SSL certificate file served by a server.',
'ex': '(1.2.3.4:443, guid:d41d8cd98f00b204e9800998ecf8427e)',
}),
('inet:port', ('int', {'min': 0, 'max': 0xffff}), {
'doc': 'A network port.',
'ex': '80'
}),
('inet:server', ('inet:addr', {}), {
'doc': 'A network server address.'
}),
('inet:banner', ('comp', {'fields': (('server', 'inet:server'), ('text', 'it:dev:str'))}), {
'doc': 'A network protocol banner string presented by a server.',
}),
('inet:servfile', ('comp', {'fields': (('server', 'inet:server'), ('file', 'file:bytes'))}), {
'doc': 'A file hosted on a server for access over a network protocol.',
}),
('inet:urlfile', ('comp', {'fields': (('url', 'inet:url'), ('file', 'file:bytes'))}), {
'doc': 'A file hosted at a specific Universal Resource Locator (URL).'
}),
('inet:urlredir', ('comp', {'fields': (('src', 'inet:url'), ('dst', 'inet:url'))}), {
'doc': 'A URL that redirects to another URL, such as via a URL shortening service '
'or an HTTP 302 response.',
'ex': '(http://foo.com/,http://bar.com/)'
}),
('inet:url:mirror', ('comp', {'fields': (('of', 'inet:url'), ('at', 'inet:url'))}), {
'doc': 'A URL mirror site.',
}),
('inet:user', ('str', {'lower': True}), {
'doc': 'A username string.'
}),
('inet:search:query', ('guid', {}), {
'doc': 'An instance of a search query issued to a search engine.',
}),
('inet:search:result', ('guid', {}), {
'doc': 'A single result from a web search.',
}),
('inet:web:acct', ('comp', {'fields': (('site', 'inet:fqdn'), ('user', 'inet:user')), 'sepr': '/'}), {
'doc': 'An account with a given Internet-based site or service.',
'ex': 'twitter.com/invisig0th'
}),
('inet:web:action', ('guid', {}), {
'doc': 'An instance of an account performing an action at an Internet-based site or service.'
}),
('inet:web:chprofile', ('guid', {}), {
'doc': 'A change to a web account. Used to capture historical properties associated with '
' an account, as opposed to current data in the inet:web:acct node.'
}),
('inet:web:file', ('comp', {'fields': (('acct', 'inet:web:acct'), ('file', 'file:bytes'))}), {
'doc': 'A file posted by a web account.'
}),
('inet:web:follows', ('comp', {'fields': (('follower', 'inet:web:acct'), ('followee', 'inet:web:acct'))}), {
'doc': 'A web account follows or is connected to another web account.'
}),
('inet:web:group', ('comp', {'fields': (('site', 'inet:fqdn'), ('id', 'inet:group')), 'sepr': '/'}), {
'doc': 'A group hosted within or registered with a given Internet-based site or service.',
'ex': 'somesite.com/mycoolgroup'
}),
('inet:web:logon', ('guid', {}), {
'doc': 'An instance of an account authenticating to an Internet-based site or service.'
}),
('inet:web:memb', ('comp', {'fields': (('acct', 'inet:web:acct'), ('group', 'inet:web:group'))}), {
'doc': 'A web account that is a member of a web group.'
}),
('inet:web:mesg', ('comp', {'fields': (('from', 'inet:web:acct'), ('to', 'inet:web:acct'), ('time', 'time'))}), {
'doc': 'A message sent from one web account to another web account.',
'ex': '((twitter.com, invisig0th), (twitter.com, gobbles), 20041012130220)'
}),
('inet:web:post', ('guid', {}), {
'doc': 'A post made by a web account.'
}),
('inet:web:hashtag', ('str', {'lower': True, 'regex': r'^#[\w]+$'}), {
'doc': 'A hashtag used in a web post.',
}),
('inet:whois:contact', ('comp', {'fields': (('rec', 'inet:whois:rec'), ('type', ('str', {'lower': True})))}), {
'doc': 'An individual contact from a domain whois record.'
}),
('inet:whois:rar', ('str', {'lower': True}), {
'doc': 'A domain registrar.',
'ex': 'godaddy, inc.'
}),
('inet:whois:rec', ('comp', {'fields': (('fqdn', 'inet:fqdn'), ('asof', 'time'))}), {
'doc': 'A domain whois record.'
}),
('inet:whois:recns', ('comp', {'fields': (('ns', 'inet:fqdn'), ('rec', 'inet:whois:rec'))}), {
'doc': 'A nameserver associated with a domain whois record.'
}),
('inet:whois:reg', ('str', {'lower': True}), {
'doc': 'A domain registrant.',
'ex': 'woot hostmaster'
}),
('inet:whois:email', ('comp', {'fields': (('fqdn', 'inet:fqdn'), ('email', 'inet:email'))}), {
'doc': 'An email address associated with an FQDN via whois registration text.',
}),
('inet:whois:ipquery', ('guid', {}), {
'doc': 'Query details used to retrieve an IP record.'
}),
('inet:whois:iprec', ('guid', {}), {
'doc': 'An IPv4/IPv6 block registration record.'
}),
('inet:whois:ipcontact', ('guid', {}), {
'doc': 'An individual contact from an IP block record.'
}),
('inet:whois:regid', ('str', {}), {
'doc': 'The registry unique identifier of the registration record.',
'ex': 'NET-10-0-0-0-1'
}),
('inet:wifi:ap', ('comp', {'fields': (('ssid', 'inet:wifi:ssid'), ('bssid', 'inet:mac'))}), {
'doc': 'An SSID/MAC address combination for a wireless access point.'
}),
('inet:wifi:ssid', ('str', {}), {
'doc': 'A WiFi service set identifier (SSID) name.',
'ex': 'The Vertex Project'
}),
('inet:email:message', ('guid', {}), {
'doc': 'A unique email message.',
}),
('inet:email:header:name', ('str', {'lower': True}), {
'doc': 'An email header name.',
'ex': 'subject',
}),
('inet:email:header', ('comp', {'fields': (('name', 'inet:email:header:name'), ('value', 'str'))}), {
'doc': 'A unique email message header.',
}),
('inet:email:message:attachment', ('comp', {'fields': (('message', 'inet:email:message'), ('file', 'file:bytes'))}), {
'doc': 'A file which was attached to an email message.',
}),
('inet:email:message:link', ('comp', {'fields': (('message', 'inet:email:message'), ('url', 'inet:url'))}), {
'doc': 'A url/link embedded in an email message.',
}),
),
'interfaces': (
('inet:proto:request', {
'doc': 'Properties common to network protocol requests and responses.',
'interfaces': ('it:host:activity',),
'props': (
('flow', ('inet:flow', {}), {
'doc': 'The raw inet:flow containing the request.'}),
('client', ('inet:client', {}), {
'doc': 'The inet:addr of the client.'}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The server IPv4 address that the request was sent from.'}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The server IPv6 address that the request was sent from.'}),
('client:host', ('it:host', {}), {
'doc': 'The host that the request was sent from.'}),
('server', ('inet:server', {}), {
'doc': 'The inet:addr of the server.'}),
('server:ipv4', ('inet:ipv4', {}), {
'doc': 'The server IPv4 address that the request was sent to.'}),
('server:ipv6', ('inet:ipv6', {}), {
'doc': 'The server IPv6 address that the request was sent to.'}),
('server:port', ('inet:port', {}), {
'doc': 'The server port that the request was sent to.'}),
('server:host', ('it:host', {}), {
'doc': 'The host that the request was sent to.'}),
),
}),
),
'forms': (
('inet:email:message', {}, (
('to', ('inet:email', {}), {
'doc': 'The email address of the recipient.'
}),
('from', ('inet:email', {}), {
'doc': 'The email address of the sender.'
}),
('replyto', ('inet:email', {}), {
'doc': 'The email address from the reply-to header.'
}),
('subject', ('str', {}), {
'doc': 'The email message subject line.'
}),
('body', ('str', {}), {
'doc': 'The body of the email message.',
'disp': {'hint': 'text'},
}),
('date', ('time', {}), {
'doc': 'The time the email message was received.'
}),
('bytes', ('file:bytes', {}), {
'doc': 'The file bytes which contain the email message.'
}),
('headers', ('array', {'type': 'inet:email:header'}), {
'doc': 'An array of email headers from the message.'
}),
)),
('inet:email:header', {}, (
('name', ('inet:email:header:name', {}), {
'ro': True,
'doc': 'The name of the email header.'}),
('value', ('str', {}), {
'ro': True,
'doc': 'The value of the email header.'}),
)),
('inet:email:message:attachment', {}, (
('message', ('inet:email:message', {}), {
'ro': True,
'doc': 'The message containing the attached file.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The attached file.'}),
('name', ('file:base', {}), {
'doc': 'The name of the attached file.'}),
)),
('inet:email:message:link', {}, (
('message', ('inet:email:message', {}), {
'ro': True,
'doc': 'The message containing the embedded link.'}),
('url', ('inet:url', {}), {
'ro': True,
'doc': 'The url contained within the email message.'}),
)),
('inet:asn', {}, (
('name', ('str', {'lower': True}), {
'doc': 'The name of the organization currently responsible for the ASN.'
}),
('owner', ('ou:org', {}), {
'doc': 'The guid of the organization currently responsible for the ASN.'
}),
)),
('inet:asnet4', {}, (
('asn', ('inet:asn', {}), {
'ro': True,
'doc': 'The Autonomous System Number (ASN) of the netblock.'
}),
('net4', ('inet:net4', {}), {
'ro': True,
'doc': 'The IPv4 address range assigned to the ASN.'
}),
('net4:min', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The first IPv4 in the range assigned to the ASN.'
}),
('net4:max', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The last IPv4 in the range assigned to the ASN.'
}),
)),
('inet:asnet6', {}, (
('asn', ('inet:asn', {}), {
'ro': True,
'doc': 'The Autonomous System Number (ASN) of the netblock.'
}),
('net6', ('inet:net6', {}), {
'ro': True,
'doc': 'The IPv6 address range assigned to the ASN.'
}),
('net6:min', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The first IPv6 in the range assigned to the ASN.'
}),
('net6:max', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The last IPv6 in the range assigned to the ASN.'
}),
)),
('inet:cidr4', {}, (
('broadcast', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The broadcast IP address from the CIDR notation.'
}),
('mask', ('int', {}), {
'ro': True,
'doc': 'The mask from the CIDR notation.'
}),
('network', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The network IP address from the CIDR notation.'
}),
)),
('inet:cidr6', {}, (
('broadcast', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The broadcast IP address from the CIDR notation.'
}),
('mask', ('int', {}), {
'ro': True,
'doc': 'The mask from the CIDR notation.'
}),
('network', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The network IP address from the CIDR notation.'
}),
)),
('inet:client', {}, (
('proto', ('str', {'lower': True}), {
'ro': True,
'doc': 'The network protocol of the client.'
}),
('ipv4', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The IPv4 of the client.'
}),
('ipv6', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The IPv6 of the client.'
}),
('host', ('it:host', {}), {
'ro': True,
'doc': 'The it:host node for the client.'
}),
('port', ('inet:port', {}), {
'doc': 'The client tcp/udp port.'
}),
)),
('inet:download', {}, (
('time', ('time', {}), {
'doc': 'The time the file was downloaded.'
}),
('fqdn', ('inet:fqdn', {}), {
'doc': 'The FQDN used to resolve the server.'
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was downloaded.'
}),
('server', ('inet:server', {}), {
'doc': 'The inet:addr of the server.'
}),
('server:host', ('it:host', {}), {
'doc': 'The it:host node for the server.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 of the server.'
}),
('server:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 of the server.'
}),
('server:port', ('inet:port', {}), {
'doc': 'The server tcp/udp port.'
}),
('server:proto', ('str', {'lower': True}), {
'doc': 'The server network layer protocol.'
}),
('client', ('inet:client', {}), {
'doc': 'The inet:addr of the client.'
}),
('client:host', ('it:host', {}), {
'doc': 'The it:host node for the client.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 of the client.'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 of the client.'
}),
('client:port', ('inet:port', {}), {
'doc': 'The client tcp/udp port.'
}),
('client:proto', ('str', {'lower': True}), {
'doc': 'The client network layer protocol.'
}),
)),
('inet:email', {}, (
('user', ('inet:user', {}), {
'ro': True,
'doc': 'The username of the email address.'}),
('fqdn', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The domain of the email address.'}),
)),
('inet:flow', {}, (
('time', ('time', {}), {
'doc': 'The time the network connection was initiated.'
}),
('duration', ('int', {}), {
'doc': 'The duration of the flow in seconds.'
}),
('from', ('guid', {}), {
'doc': 'The ingest source file/iden. Used for reparsing.'
}),
('dst', ('inet:server', {}), {
'doc': 'The destination address / port for a connection.'
}),
('dst:ipv4', ('inet:ipv4', {}), {
'doc': 'The destination IPv4 address.'
}),
('dst:ipv6', ('inet:ipv6', {}), {
'doc': 'The destination IPv6 address.'
}),
('dst:port', ('inet:port', {}), {
'doc': 'The destination port.'
}),
('dst:proto', ('str', {'lower': True}), {
'doc': 'The destination protocol.'
}),
('dst:host', ('it:host', {}), {
'doc': 'The guid of the destination host.'
}),
('dst:proc', ('it:exec:proc', {}), {
'doc': 'The guid of the destination process.'
}),
('dst:exe', ('file:bytes', {}), {
'doc': 'The file (executable) that received the connection.'
}),
('dst:txbytes', ('int', {}), {
'doc': 'The number of bytes sent by the destination host / process / file.'
}),
('src', ('inet:client', {}), {
'doc': 'The source address / port for a connection.'
}),
('src:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address.'
}),
('src:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address.'
}),
('src:port', ('inet:port', {}), {
'doc': 'The source port.'
}),
('src:proto', ('str', {'lower': True}), {
'doc': 'The source protocol.'
}),
('src:host', ('it:host', {}), {
'doc': 'The guid of the source host.'
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'The guid of the source process.'
}),
('src:exe', ('file:bytes', {}), {
'doc': 'The file (executable) that created the connection.'
}),
('src:txbytes', ('int', {}), {
'doc': 'The number of bytes sent by the source host / process / file.'
}),
('dst:cpes', ('array', {'type': 'it:sec:cpe', 'uniq': True, 'sorted': True}), {
'doc': 'An array of NIST CPEs identified on the destination host.',
}),
('dst:softnames', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'An array of software names identified on the destination host.',
}),
('src:cpes', ('array', {'type': 'it:sec:cpe', 'uniq': True, 'sorted': True}), {
'doc': 'An array of NIST CPEs identified on the source host.',
}),
('src:softnames', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'An array of software names identified on the source host.',
}),
)),
('inet:fqdn', {}, (
('domain', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The parent domain for the FQDN.',
}),
('host', ('str', {'lower': True}), {
'ro': True,
'doc': 'The host part of the FQDN.',
}),
('issuffix', ('bool', {}), {
'doc': 'True if the FQDN is considered a suffix.',
}),
('iszone', ('bool', {}), {
'doc': 'True if the FQDN is considered a zone.',
}),
('zone', ('inet:fqdn', {}), {
'doc': 'The zone level parent for this FQDN.',
}),
)),
('inet:group', {}, ()),
('inet:http:request:header', {}, (
('name', ('inet:http:header:name', {}), {'ro': True,
'doc': 'The name of the HTTP request header.'}),
('value', ('str', {}), {'ro': True,
'doc': 'The value of the HTTP request header.'}),
)),
('inet:http:response:header', {}, (
('name', ('inet:http:header:name', {}), {'ro': True,
'doc': 'The name of the HTTP response header.'}),
('value', ('str', {}), {'ro': True,
'doc': 'The value of the HTTP response header.'}),
)),
('inet:http:param', {}, (
('name', ('str', {'lower': True}), {'ro': True,
'doc': 'The name of the HTTP query parameter.'}),
('value', ('str', {}), {'ro': True,
'doc': 'The value of the HTTP query parameter.'}),
)),
('inet:http:cookie', {}, ()),
('inet:http:request', {}, (
('method', ('str', {}), {
'doc': 'The HTTP request method string.'}),
('path', ('str', {}), {
'doc': 'The requested HTTP path (without query parameters).'}),
('url', ('inet:url', {}), {
'doc': 'The reconstructed URL for the request if known.'}),
('query', ('str', {}), {
'doc': 'The HTTP query string which optionally follows the path.'}),
('headers', ('array', {'type': 'inet:http:request:header'}), {
'doc': 'An array of HTTP headers from the request.'}),
('body', ('file:bytes', {}), {
'doc': 'The body of the HTTP request.'}),
('response:time', ('time', {}), {}),
('response:code', ('int', {}), {}),
('response:reason', ('str', {}), {}),
('response:headers', ('array', {'type': 'inet:http:response:header'}), {
'doc': 'An array of HTTP headers from the response.'}),
('response:body', ('file:bytes', {}), {}),
('session', ('inet:http:session', {}), {
'doc': 'The HTTP session this request was part of.'}),
)),
('inet:http:session', {}, (
('contact', ('ps:contact', {}), {
'doc': 'The ps:contact which owns the session.'}),
)),
('inet:iface', {}, (
('host', ('it:host', {}), {
'doc': 'The guid of the host the interface is associated with.'
}),
('network', ('it:network', {}), {
'doc': 'The guid of the it:network the interface connected to.'
}),
('type', ('str', {'lower': True}), {
'doc': 'The free-form interface type.'
}),
('mac', ('inet:mac', {}), {
'doc': 'The ethernet (MAC) address of the interface.'
}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address of the interface.'
}),
('ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address of the interface.'
}),
('phone', ('tel:phone', {}), {
'doc': 'The telephone number of the interface.'
}),
('wifi:ssid', ('inet:wifi:ssid', {}), {
'doc': 'The wifi SSID of the interface.'
}),
('wifi:bssid', ('inet:mac', {}), {
'doc': 'The wifi BSSID of the interface.'
}),
('adid', ('it:adid', {}), {
'doc': 'An advertising ID associated with the interface.',
}),
('mob:imei', ('tel:mob:imei', {}), {
'doc': 'The IMEI of the interface.'
}),
('mob:imsi', ('tel:mob:imsi', {}), {
'doc': 'The IMSI of the interface.'
}),
)),
('inet:ipv4', {}, (
('asn', ('inet:asn', {}), {
'doc': 'The ASN to which the IPv4 address is currently assigned.'}),
('latlong', ('geo:latlong', {}), {
'doc': 'The best known latitude/longitude for the node.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the IPv4.'}),
('place', ('geo:place', {}), {
'doc': 'The geo:place associated with the latlong property.'}),
('type', ('str', {}), {
'doc': 'The type of IP address (e.g., private, multicast, etc.).'}),
('dns:rev', ('inet:fqdn', {}), {
'doc': 'The most current DNS reverse lookup for the IPv4.'}),
)),
('inet:ipv6', {}, (
('asn', ('inet:asn', {}), {
'doc': 'The ASN to which the IPv6 address is currently assigned.'}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The mapped ipv4.'}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known latitude/longitude for the node.'}),
('place', ('geo:place', {}), {
'doc': 'The geo:place associated with the latlong property.'}),
('dns:rev', ('inet:fqdn', {}), {
'doc': 'The most current DNS reverse lookup for the IPv6.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the IPv6.'}),
)),
('inet:mac', {}, (
('vendor', ('str', {}), {
'doc': 'The vendor associated with the 24-bit prefix of a MAC address.'
}),
)),
('inet:passwd', {}, (
('md5', ('hash:md5', {}), {
'ro': True,
'doc': 'The MD5 hash of the password.'
}),
('sha1', ('hash:sha1', {}), {
'ro': True,
'doc': 'The SHA1 hash of the password.'
}),
('sha256', ('hash:sha256', {}), {
'ro': True,
'doc': 'The SHA256 hash of the password.'
}),
)),
('inet:rfc2822:addr', {}, (
('name', ('ps:name', {}), {
'ro': True,
'doc': 'The name field parsed from an RFC 2822 address string.'
}),
('email', ('inet:email', {}), {
'ro': True,
'doc': 'The email field parsed from an RFC 2822 address string.'
}),
)),
('inet:server', {}, (
('proto', ('str', {'lower': True}), {
'ro': True,
'doc': 'The network protocol of the server.'
}),
('ipv4', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The IPv4 of the server.'
}),
('ipv6', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The IPv6 of the server.'
}),
('host', ('it:host', {}), {
'ro': True,
'doc': 'The it:host node for the server.'
}),
('port', ('inet:port', {}), {
'doc': 'The server tcp/udp port.'
}),
)),
('inet:banner', {}, (
('server', ('inet:server', {}), {'ro': True,
'doc': 'The server which presented the banner string.'}),
('server:ipv4', ('inet:ipv4', {}), {'ro': True,
'doc': 'The IPv4 address of the server.'}),
('server:ipv6', ('inet:ipv6', {}), {'ro': True,
'doc': 'The IPv6 address of the server.'}),
('server:port', ('inet:port', {}), {'ro': True,
'doc': 'The network port.'}),
('text', ('it:dev:str', {}), {'ro': True,
'doc': 'The banner text.',
'disp': {'hint': 'text'},
}),
)),
('inet:servfile', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file hosted by the server.'
}),
('server', ('inet:server', {}), {
'ro': True,
'doc': 'The inet:addr of the server.'
}),
('server:proto', ('str', {'lower': True}), {
'ro': True,
'doc': 'The network protocol of the server.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The IPv4 of the server.'
}),
('server:ipv6', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The IPv6 of the server.'
}),
('server:host', ('it:host', {}), {
'ro': True,
'doc': 'The it:host node for the server.'
}),
('server:port', ('inet:port', {}), {
'doc': 'The server tcp/udp port.'
}),
)),
('inet:ssl:cert', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file bytes for the SSL certificate.'
}),
('server', ('inet:server', {}), {
'ro': True,
'doc': 'The server that presented the SSL certificate.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The SSL server IPv4 address.'
}),
('server:ipv6', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The SSL server IPv6 address.'
}),
('server:port', ('inet:port', {}), {
'ro': True,
'doc': 'The SSL server listening port.'
}),
)),
('inet:url', {}, (
('fqdn', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The fqdn used in the URL (e.g., http://www.woot.com/page.html).'
}),
('ipv4', ('inet:ipv4', {}), {
'ro': True,
'doc': 'The IPv4 address used in the URL (e.g., http://1.2.3.4/page.html).'
}),
('ipv6', ('inet:ipv6', {}), {
'ro': True,
'doc': 'The IPv6 address used in the URL.'
}),
('passwd', ('inet:passwd', {}), {
'ro': True,
'doc': 'The optional password used to access the URL.'
}),
('base', ('str', {}), {
'ro': True,
'doc': 'The base scheme, user/pass, fqdn, port and path w/o parameters.'
}),
('path', ('str', {}), {
'ro': True,
'doc': 'The path in the URL w/o parameters.'
}),
('params', ('str', {}), {
'ro': True,
'doc': 'The URL parameter string.'
}),
('port', ('inet:port', {}), {
'ro': True,
'doc': 'The port of the URL. URLs prefixed with http will be set to port 80 and '
'URLs prefixed with https will be set to port 443 unless otherwise specified.'
}),
('proto', ('str', {'lower': True}), {
'ro': True,
'doc': 'The protocol in the URL.'
}),
('user', ('inet:user', {}), {
'ro': True,
'doc': 'The optional username used to access the URL.'
}),
)),
('inet:urlfile', {}, (
('url', ('inet:url', {}), {
'ro': True,
'doc': 'The URL where the file was hosted.'
}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that was hosted at the URL.'
}),
)),
('inet:urlredir', {}, (
('src', ('inet:url', {}), {
'ro': True,
'doc': 'The original/source URL before redirect.'
}),
('src:fqdn', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The FQDN within the src URL (if present).'
}),
('dst', ('inet:url', {}), {
'ro': True,
'doc': 'The redirected/destination URL.'
}),
('dst:fqdn', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The FQDN within the dst URL (if present).'
}),
)),
('inet:url:mirror', {}, (
('of', ('inet:url', {}), {
'ro': True,
'doc': 'The URL being mirrored.',
}),
('at', ('inet:url', {}), {
'ro': True,
'doc': 'The URL of the mirror.',
}),
)),
('inet:user', {}, ()),
('inet:search:query', {}, (
('text', ('str', {}), {
'doc': 'The search query text.',
'disp': {'hint': 'text'},
}),
('time', ('time', {}), {
'doc': 'The time the web search was issued.',
}),
('acct', ('inet:web:acct', {}), {
'doc': 'The account that the query was issued as.',
}),
('host', ('it:host', {}), {
'doc': 'The host that issued the query.',
}),
('engine', ('str', {'lower': True}), {
'ex': 'google',
'doc': 'A simple name for the search engine used.',
}),
)),
('inet:search:result', {}, (
('query', ('inet:search:query', {}), {
'doc': 'The search query that produced the result.'}),
('title', ('str', {'lower': True}), {
'doc': 'The title of the matching web page.'}),
('rank', ('int', {}), {
'doc': 'The rank/order of the query result.'}),
('url', ('inet:url', {}), {
'doc': 'The URL hosting the matching content.'}),
('text', ('str', {'lower': True}), {
'doc': 'Extracted/matched text from the matched content.'}),
)),
('inet:web:acct', {}, (
('avatar', ('file:bytes', {}), {
'doc': 'The file representing the avatar (e.g., profile picture) for the account.'
}),
('dob', ('time', {}), {
'doc': 'A self-declared date of birth for the account (if the account belongs to a person).'
}),
('email', ('inet:email', {}), {
'doc': 'The email address associated with the account.'
}),
('linked:accts', ('array', {'type': 'inet:web:acct', 'uniq': True, 'sorted': True}), {
'doc': 'Linked accounts specified in the account profile.',
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known latitude/longitude for the node.'
}),
('place', ('geo:place', {}), {
'doc': 'The geo:place associated with the latlong property.'
}),
('loc', ('loc', {}), {
'doc': 'A self-declared location for the account.'
}),
('name', ('inet:user', {}), {
'doc': 'The localized name associated with the account (may be different from the '
'account identifier, e.g., a display name).'
}),
('name:en', ('inet:user', {}), {
'doc': 'The English version of the name associated with the (may be different from '
'the account identifier, e.g., a display name).',
}),
('aliases', ('array', {'type': 'inet:user', 'uniq': True, 'sorted': True}), {
'doc': 'An array of alternate names for the user.',
}),
('occupation', ('str', {'lower': True}), {
'doc': 'A self-declared occupation for the account.'
}),
('passwd', ('inet:passwd', {}), {
'doc': 'The current password for the account.'
}),
('phone', ('tel:phone', {}), {
'doc': 'The phone number associated with the account.'
}),
('realname', ('ps:name', {}), {
'doc': 'The localized version of the real name of the account owner / registrant.'
}),
('realname:en', ('ps:name', {}), {
'doc': 'The English version of the real name of the account owner / registrant.'
}),
('signup', ('time', {}), {
'doc': 'The date and time the account was registered.'
}),
('signup:client', ('inet:client', {}), {
'doc': 'The client address used to sign up for the account.'
}),
('signup:client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address used to sign up for the account.'
}),
('signup:client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address used to sign up for the account.'
}),
('site', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The site or service associated with the account.'
}),
('tagline', ('str', {}), {
'doc': 'The text of the account status or tag line.'
}),
('url', ('inet:url', {}), {
'doc': 'The service provider URL where the account is hosted.'
}),
('user', ('inet:user', {}), {
'ro': True,
'doc': 'The unique identifier for the account (may be different from the common '
'name or display name).'
}),
('webpage', ('inet:url', {}), {
'doc': 'A related URL specified by the account (e.g., a personal or company web '
'page, blog, etc.).'
}),
('recovery:email', ('inet:email', {}), {
'doc': 'An email address registered as a recovery email address for the account.',
}),
)),
('inet:web:action', {}, (
('act', ('str', {'lower': True, 'strip': True}), {
'doc': 'The action performed by the account.'
}),
('acct', ('inet:web:acct', {}), {
'doc': 'The web account associated with the action.'
}),
('acct:site', ('inet:fqdn', {}), {
'doc': 'The site or service associated with the account.'
}),
('acct:user', ('inet:user', {}), {
'doc': 'The unique identifier for the account.'
}),
('time', ('time', {}), {
'doc': 'The date and time the account performed the action.'
}),
('client', ('inet:client', {}), {
'doc': 'The source client address of the action.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the action.'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the action.'
}),
('loc', ('loc', {}), {
'doc': 'The location of the user executing the web action.',
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The latlong of the user when executing the web action.',
}),
('place', ('geo:place', {}), {
'doc': 'The geo:place of the user when executing the web action.',
}),
)),
('inet:web:chprofile', {}, (
('acct', ('inet:web:acct', {}), {
'doc': 'The web account associated with the change.'
}),
('acct:site', ('inet:fqdn', {}), {
'doc': 'The site or service associated with the account.'
}),
('acct:user', ('inet:user', {}), {
'doc': 'The unique identifier for the account.'
}),
('client', ('inet:client', {}), {
'doc': 'The source address used to make the account change.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address used to make the account change.'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address used to make the account change.'
}),
('time', ('time', {}), {
'doc': 'The date and time when the account change occurred.'
}),
('pv', ('nodeprop', {}), {
'doc': 'The prop=valu of the account property that was changed. Valu should be '
'the old / original value, while the new value should be updated on the '
'inet:web:acct form.'}),
('pv:prop', ('str', {}), {
'doc': 'The property that was changed.'
}),
)),
('inet:web:file', {}, (
('acct', ('inet:web:acct', {}), {
'ro': True,
'doc': 'The account that owns or is associated with the file.'
}),
('acct:site', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The site or service associated with the account.'
}),
('acct:user', ('inet:user', {}), {
'ro': True,
'doc': 'The unique identifier for the account.'
}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file owned by or associated with the account.'
}),
('name', ('file:base', {}), {
'doc': 'The name of the file owned by or associated with the account.'
}),
('posted', ('time', {}), {
'doc': 'The date and time the file was posted / submitted.'
}),
('client', ('inet:client', {}), {
'doc': 'The source client address used to post or submit the file.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address used to post or submit the file.'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address used to post or submit the file.'
}),
)),
('inet:web:follows', {}, (
('follower', ('inet:web:acct', {}), {
'ro': True,
'doc': 'The account following an account.'
}),
('followee', ('inet:web:acct', {}), {
'ro': True,
'doc': 'The account followed by an account.'
}),
)),
('inet:web:group', {}, (
('site', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The site or service associated with the group.'
}),
('id', ('inet:group', {}), {
'ro': True,
'doc': 'The site-specific unique identifier for the group (may be different from '
'the common name or display name).'
}),
('name', ('inet:group', {}), {
'doc': 'The localized name associated with the group (may be different from '
'the account identifier, e.g., a display name).'
}),
('aliases', ('array', {'type': 'inet:group', 'uniq': True, 'sorted': True}), {
'doc': 'An array of alternate names for the group.',
}),
('name:en', ('inet:group', {}), {
'doc': 'The English version of the name associated with the group (may be different '
'from the localized name).'
}),
('url', ('inet:url', {}), {
'doc': 'The service provider URL where the group is hosted.'
}),
('avatar', ('file:bytes', {}), {
'doc': 'The file representing the avatar (e.g., profile picture) for the group.'
}),
('desc', ('str', {}), {
'doc': 'The text of the description of the group.'
}),
('webpage', ('inet:url', {}), {
'doc': 'A related URL specified by the group (e.g., primary web site, etc.).'
}),
('loc', ('str', {'lower': True}), {
'doc': 'A self-declared location for the group.'
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known latitude/longitude for the node.'
}),
('place', ('geo:place', {}), {
'doc': 'The geo:place associated with the latlong property.'
}),
('signup', ('time', {}), {
'doc': 'The date and time the group was created on the site.'
}),
('signup:client', ('inet:client', {}), {
'doc': 'The client address used to create the group.'
}),
('signup:client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address used to create the group.'
}),
('signup:client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address used to create the group.'
}),
)),
('inet:web:logon', {}, (
('acct', ('inet:web:acct', {}), {
'ro': True,
'doc': 'The web account associated with the logon event.'
}),
('acct:site', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The site or service associated with the account.'
}),
('acct:user', ('inet:user', {}), {
'ro': True,
'doc': 'The unique identifier for the account.'
}),
('time', ('time', {}), {
'ro': True,
'doc': 'The date and time the account logged into the service.'
}),
('client', ('inet:client', {}), {
'doc': 'The source address of the logon.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the logon.'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the logon.'
}),
('logout', ('time', {}), {
'ro': True,
'doc': 'The date and time the account logged out of the service.'
}),
('loc', ('loc', {}), {
'doc': 'The location of the user executing the logon.',
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The latlong of the user executing the logon.',
}),
('place', ('geo:place', {}), {
'doc': 'The geo:place of the user executing the logon.',
}),
)),
('inet:web:memb', {}, (
('acct', ('inet:web:acct', {}), {
'ro': True,
'doc': 'The account that is a member of the group.'
}),
('group', ('inet:web:group', {}), {
'ro': True,
'doc': 'The group that the account is a member of.'
}),
('title', ('str', {'lower': True}), {
'doc': 'The title or status of the member (e.g., admin, new member, etc.).'
}),
('joined', ('time', {}), {
'doc': 'The date / time the account joined the group.'
}),
)),
('inet:web:mesg', {}, (
('from', ('inet:web:acct', {}), {
'ro': True,
'doc': 'The web account that sent the message.'
}),
('to', ('inet:web:acct', {}), {
'ro': True,
'doc': 'The web account that received the message.'
}),
('client', ('inet:client', {}), {
'doc': 'The source address of the message.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the message.'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the message.'
}),
('time', ('time', {}), {
'ro': True,
'doc': 'The date and time at which the message was sent.'
}),
('url', ('inet:url', {}), {
'doc': 'The URL where the message is posted / visible.'
}),
('text', ('str', {}), {
'doc': 'The text of the message.',
'disp': {'hint': 'text'},
}),
('file', ('file:bytes', {}), {
'doc': 'The file attached to or sent with the message.'
}),
)),
('inet:web:post', {}, (
('acct', ('inet:web:acct', {}), {
'doc': 'The web account that made the post.'
}),
('acct:site', ('inet:fqdn', {}), {
'doc': 'The site or service associated with the account.'
}),
('client', ('inet:client', {}), {
'doc': 'The source address of the post.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the post.'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the post.'
}),
('acct:user', ('inet:user', {}), {
'doc': 'The unique identifier for the account.'
}),
('text', ('str', {}), {
'doc': 'The text of the post.',
'disp': {'hint': 'text'},
}),
('time', ('time', {}), {
'doc': 'The date and time that the post was made.'
}),
('deleted', ('bool', {}), {
'doc': 'The message was deleted by the poster.',
}),
('url', ('inet:url', {}), {
'doc': 'The URL where the post is published / visible.'
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was attached to the post.'
}),
('replyto', ('inet:web:post', {}), {
'doc': 'The post that this post is in reply to.'
}),
('repost', ('inet:web:post', {}), {
'doc': 'The original post that this is a repost of.'
}),
('hashtags', ('array', {'type': 'inet:web:hashtag', 'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'Hashtags mentioned within the post.',
}),
('mentions:users', ('array', {'type': 'inet:web:acct', 'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'Accounts mentioned within the post.',
}),
('mentions:groups', ('array', {'type': 'inet:web:group', 'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'Groups mentioned within the post.',
}),
# location protocol...
('loc', ('loc', {}), {
'doc': 'The location that the post was reportedly sent from.',
}),
('place', ('geo:place', {}), {
'doc': 'The place that the post was reportedly sent from.',
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The place that the post was reportedly sent from.',
}),
)),
('inet:web:hashtag', {}, ()),
('inet:whois:contact', {}, (
('rec', ('inet:whois:rec', {}), {
'ro': True,
'doc': 'The whois record containing the contact data.'
}),
('rec:fqdn', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The domain associated with the whois record.'
}),
('rec:asof', ('time', {}), {
'ro': True,
'doc': 'The date of the whois record.'
}),
('type', ('str', {'lower': True}), {
'doc': 'The contact type (e.g., registrar, registrant, admin, billing, tech, etc.).'
}),
('id', ('str', {'lower': True}), {
'doc': 'The ID associated with the contact.'
}),
('name', ('str', {'lower': True}), {
'doc': 'The name of the contact.'
}),
('email', ('inet:email', {}), {
'doc': 'The email address of the contact.'
}),
('orgname', ('ou:name', {}), {
'doc': 'The name of the contact organization.'
}),
('address', ('str', {'lower': True}), {
'doc': 'The content of the street address field(s) of the contact.'
}),
('city', ('str', {'lower': True}), {
'doc': 'The content of the city field of the contact.'
}),
('state', ('str', {'lower': True}), {
'doc': 'The content of the state field of the contact.'
}),
('country', ('str', {'lower': True}), {
'doc': 'The two-letter country code of the contact.'
}),
('phone', ('tel:phone', {}), {
'doc': 'The content of the phone field of the contact.'
}),
('fax', ('tel:phone', {}), {
'doc': 'The content of the fax field of the contact.'
}),
('url', ('inet:url', {}), {
'doc': 'The URL specified for the contact.'
}),
('whois:fqdn', ('inet:fqdn', {}), {
'doc': 'The whois server FQDN for the given contact (most likely a registrar).'
}),
)),
('inet:whois:rar', {}, ()),
('inet:whois:rec', {}, (
('fqdn', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The domain associated with the whois record.'
}),
('asof', ('time', {}), {
'ro': True,
'doc': 'The date of the whois record.'
}),
('text', ('str', {'lower': True}), {
'doc': 'The full text of the whois record.',
'disp': {'hint': 'text'},
}),
('created', ('time', {}), {
'doc': 'The "created" time from the whois record.'
}),
('updated', ('time', {}), {
'doc': 'The "last updated" time from the whois record.'
}),
('expires', ('time', {}), {
'doc': 'The "expires" time from the whois record.'
}),
('registrar', ('inet:whois:rar', {}), {
'doc': 'The registrar name from the whois record.'
}),
('registrant', ('inet:whois:reg', {}), {
'doc': 'The registrant name from the whois record.'
}),
)),
('inet:whois:recns', {}, (
('ns', ('inet:fqdn', {}), {
'ro': True,
'doc': 'A nameserver for a domain as listed in the domain whois record.'
}),
('rec', ('inet:whois:rec', {}), {
'ro': True,
'doc': 'The whois record containing the nameserver data.'
}),
('rec:fqdn', ('inet:fqdn', {}), {
'ro': True,
'doc': 'The domain associated with the whois record.'
}),
('rec:asof', ('time', {}), {
'ro': True,
'doc': 'The date of the whois record.'
}),
)),
('inet:whois:reg', {}, ()),
('inet:whois:email', {}, (
('fqdn', ('inet:fqdn', {}), {'ro': True,
'doc': 'The domain with a whois record containing the email address.',
}),
('email', ('inet:email', {}), {'ro': True,
'doc': 'The email address associated with the domain whois record.',
}),
)),
('inet:whois:ipquery', {}, (
('time', ('time', {}), {
'doc': 'The time the request was made.'
}),
('url', ('inet:url', {}), {
'doc': 'The query URL when using the HTTP RDAP Protocol.'
}),
('fqdn', ('inet:fqdn', {}), {
'doc': 'The FQDN of the host server when using the legacy WHOIS Protocol.'
}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address queried.'
}),
('ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address queried.'
}),
('success', ('bool', {}), {
'doc': 'Whether the host returned a valid response for the query.'
}),
('rec', ('inet:whois:iprec', {}), {
'doc': 'The resulting record from the query.'
}),
)),
('inet:whois:iprec', {}, (
('net4', ('inet:net4', {}), {
'doc': 'The IPv4 address range assigned.'
}),
('net4:min', ('inet:ipv4', {}), {
'doc': 'The first IPv4 in the range assigned.'
}),
('net4:max', ('inet:ipv4', {}), {
'doc': 'The last IPv4 in the range assigned.'
}),
('net6', ('inet:net6', {}), {
'doc': 'The IPv6 address range assigned.'
}),
('net6:min', ('inet:ipv6', {}), {
'doc': 'The first IPv6 in the range assigned.'
}),
('net6:max', ('inet:ipv6', {}), {
'doc': 'The last IPv6 in the range assigned.'
}),
('asof', ('time', {}), {
'doc': 'The date of the record.'
}),
('created', ('time', {}), {
'doc': 'The "created" time from the record.'
}),
('updated', ('time', {}), {
'doc': 'The "last updated" time from the record.'
}),
('text', ('str', {'lower': True}), {
'doc': 'The full text of the record.',
'disp': {'hint': 'text'},
}),
('desc', ('str', {'lower': True}), {
'doc': 'Notes concerning the record.',
'disp': {'hint': 'text'},
}),
('asn', ('inet:asn', {}), {
'doc': 'The associated Autonomous System Number (ASN).'
}),
('id', ('inet:whois:regid', {}), {
'doc': 'The registry unique identifier (e.g. NET-74-0-0-0-1).'
}),
('name', ('str', {}), {
'doc': 'The name assigned to the network by the registrant.'
}),
('parentid', ('inet:whois:regid', {}), {
'doc': 'The registry unique identifier of the parent whois record (e.g. NET-74-0-0-0-0).'
}),
('registrant', ('inet:whois:ipcontact', {}), {
'doc': 'The registrant contact from the record.'
}),
('contacts', ('array', {'type': 'inet:whois:ipcontact'}), {
'doc': 'Additional contacts from the record.',
}),
('country', ('str', {'lower': True, 'regex': '^[a-z]{2}$'}), {
'doc': 'The two-letter ISO 3166 country code.'
}),
('status', ('str', {'lower': True}), {
'doc': 'The state of the registered network.'
}),
('type', ('str', {'lower': True}), {
'doc': 'The classification of the registered network (e.g. direct allocation).'
}),
('links', ('array', {'type': 'inet:url'}), {
'doc': 'URLs provided with the record.',
}),
)),
('inet:whois:ipcontact', {}, (
('contact', ('ps:contact', {}), {
'doc': 'Contact information associated with a registration.'
}),
('asof', ('time', {}), {
'doc': 'The date of the record.'
}),
('created', ('time', {}), {
'doc': 'The "created" time from the record.'
}),
('updated', ('time', {}), {
'doc': 'The "last updated" time from the record.'
}),
('role', ('str', {'lower': True}), {
'doc': 'The primary role for the contact.'
}),
('roles', ('array', {'type': 'str'}), {
'doc': 'Additional roles assigned to the contact.',
}),
('asn', ('inet:asn', {}), {
'doc': 'The associated Autonomous System Number (ASN).'
}),
('id', ('inet:whois:regid', {}), {
'doc': 'The registry unique identifier (e.g. NET-74-0-0-0-1).'
}),
('links', ('array', {'type': 'inet:url'}), {
'doc': 'URLs provided with the record.',
}),
('status', ('str', {'lower': True}), {
'doc': 'The state of the registered contact (e.g. validated, obscured).'
}),
('contacts', ('array', {'type': 'inet:whois:ipcontact'}), {
'doc': 'Additional contacts referenced by this contact.',
}),
)),
('inet:whois:regid', {}, ()),
('inet:wifi:ap', {}, (
('ssid', ('inet:wifi:ssid', {}), {
'doc': 'The SSID for the wireless access point.'}),
('bssid', ('inet:mac', {}), {
'doc': 'The MAC address for the wireless access point.'}),
('latlong', ('geo:latlong', {}), {
'doc': 'The best known latitude/longitude for the wireless access point.'}),
('accuracy', ('geo:dist', {}), {
'doc': 'The reported accuracy of the latlong telemetry reading.',
}),
('channel', ('int', {}), {
'doc': 'The WIFI channel that the AP was last observed operating on.',
}),
('encryption', ('str', {'lower': True, 'strip': True}), {
'doc': 'The type of encryption used by the WIFI AP such as "wpa2".',
}),
('place', ('geo:place', {}), {
'doc': 'The geo:place associated with the latlong property.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the wireless access point.'}),
('org', ('ou:org', {}), {
'doc': 'The organization that owns/operates the access point.'}),
)),
('inet:wifi:ssid', {}, ()),
),
}),
)
| 42.302534 | 138 | 0.364039 |
0b9379e1b4069286eaf2d5acbbeb6eb6dc2ed57f | 383 | py | Python | sourceFiles/ex48_SomaImparesMultiplosDeTres.py | mcleber/Aulas_Python | bd224b593fcf907d54c8a2b92eb3afa88d327171 | [
"MIT"
] | null | null | null | sourceFiles/ex48_SomaImparesMultiplosDeTres.py | mcleber/Aulas_Python | bd224b593fcf907d54c8a2b92eb3afa88d327171 | [
"MIT"
] | null | null | null | sourceFiles/ex48_SomaImparesMultiplosDeTres.py | mcleber/Aulas_Python | bd224b593fcf907d54c8a2b92eb3afa88d327171 | [
"MIT"
] | null | null | null | '''
Faça um programa que calcule a soma entre todos os números ímpares que
são multiplos de três e que se encontram no intervalo de 1 até 500.
'''
soma = 0 # acumulador
cont = 0 # contador
for c in range(1, 501, 2):
if c % 3 == 0:
cont = cont + 1
soma += c # soma = soma + c
print('A soma de todos os {} valores solicitados é {}'.format(cont, soma))
| 29.461538 | 75 | 0.613577 |
f4296f17fe6501f0e6b9390708cbf1764290e9a4 | 307 | py | Python | worldcup18/utils.py | tadeoos/worldcup | c167251c7a6131253a1ea1914a0e6e418e9f32a7 | [
"MIT"
] | 2 | 2018-06-16T04:05:39.000Z | 2018-06-18T12:20:37.000Z | worldcup18/utils.py | tadeoos/worldcup | c167251c7a6131253a1ea1914a0e6e418e9f32a7 | [
"MIT"
] | null | null | null | worldcup18/utils.py | tadeoos/worldcup | c167251c7a6131253a1ea1914a0e6e418e9f32a7 | [
"MIT"
] | null | null | null | # - *- coding: utf- 8 - *-
from __future__ import absolute_import
import dateutil.parser
from tzlocal import get_localzone
def iso_to_datetime(s):
return dateutil.parser.parse(s)
def get_nice_date(date):
return iso_to_datetime(date).astimezone(get_localzone()).strftime("%A, %d. %B %Y %I:%M%p")
| 23.615385 | 94 | 0.726384 |
4b641ef241e121e33b749c4463117e1c6b615701 | 10,686 | py | Python | tests/core/ssl/test_ssl.py | grayfallstown/equality-blockchain | 019425b703f6b013e441481ac43389a80415f2f1 | [
"Apache-2.0"
] | 10 | 2021-07-04T15:14:12.000Z | 2021-10-17T14:52:56.000Z | tests/core/ssl/test_ssl.py | grayfallstown/equality-blockchain | 019425b703f6b013e441481ac43389a80415f2f1 | [
"Apache-2.0"
] | 11 | 2021-07-04T19:31:36.000Z | 2022-01-11T02:46:23.000Z | tests/core/ssl/test_ssl.py | grayfallstown/equality-blockchain | 019425b703f6b013e441481ac43389a80415f2f1 | [
"Apache-2.0"
] | 11 | 2021-07-04T21:49:17.000Z | 2021-10-04T17:45:38.000Z | import asyncio
import aiohttp
import pytest
from equality.protocols.shared_protocol import protocol_version
from equality.server.outbound_message import NodeType
from equality.server.server import EqualityServer, ssl_context_for_client
from equality.server.ws_connection import WSEqualityConnection
from equality.ssl.create_ssl import generate_ca_signed_cert
from equality.types.peer_info import PeerInfo
from equality.util.block_tools import test_constants
from equality.util.ints import uint16
from tests.setup_nodes import (
bt,
self_hostname,
setup_farmer_harvester,
setup_introducer,
setup_simulators_and_wallets,
setup_timelord,
)
async def establish_connection(server: EqualityServer, dummy_port: int, ssl_context) -> bool:
timeout = aiohttp.ClientTimeout(total=10)
session = aiohttp.ClientSession(timeout=timeout)
try:
incoming_queue: asyncio.Queue = asyncio.Queue()
url = f"wss://{self_hostname}:{server._port}/ws"
ws = await session.ws_connect(url, autoclose=False, autoping=True, ssl=ssl_context)
wsc = WSEqualityConnection(
NodeType.FULL_NODE,
ws,
server._port,
server.log,
True,
False,
self_hostname,
incoming_queue,
lambda x, y: x,
None,
100,
30,
)
handshake = await wsc.perform_handshake(server._network_id, protocol_version, dummy_port, NodeType.FULL_NODE)
await session.close()
return handshake
except Exception:
await session.close()
return False
class TestSSL:
@pytest.fixture(scope="function")
async def harvester_farmer(self):
async for _ in setup_farmer_harvester(test_constants):
yield _
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def introducer(self):
async for _ in setup_introducer(21233):
yield _
@pytest.fixture(scope="function")
async def timelord(self):
async for _ in setup_timelord(21236, 21237, False, test_constants, bt):
yield _
@pytest.mark.asyncio
async def test_public_connections(self, wallet_node):
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
server_1: EqualityServer = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
success = await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
assert success is True
@pytest.mark.asyncio
async def test_farmer(self, harvester_farmer):
harvester_api, farmer_api = harvester_farmer
farmer_server = farmer_api.farmer.server
# Create valid cert (valid meaning signed with private CA)
priv_crt = farmer_server._private_key_path.parent / "valid.crt"
priv_key = farmer_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
farmer_server.ca_private_crt_path.read_bytes(),
farmer_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
farmer_server.ca_private_crt_path, farmer_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(farmer_server, 12312, ssl_context)
assert connected is True
# Create not authenticated cert
pub_crt = farmer_server._private_key_path.parent / "non_valid.crt"
pub_key = farmer_server._private_key_path.parent / "non_valid.key"
generate_ca_signed_cert(
farmer_server.equality_ca_crt_path.read_bytes(), farmer_server.equality_ca_key_path.read_bytes(), pub_crt, pub_key
)
ssl_context = ssl_context_for_client(
farmer_server.equality_ca_crt_path, farmer_server.equality_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(farmer_server, 12312, ssl_context)
assert connected is False
ssl_context = ssl_context_for_client(
farmer_server.ca_private_crt_path, farmer_server.ca_private_crt_path, pub_crt, pub_key
)
connected = await establish_connection(farmer_server, 12312, ssl_context)
assert connected is False
@pytest.mark.asyncio
async def test_full_node(self, wallet_node):
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Create not authenticated cert
pub_crt = full_node_server._private_key_path.parent / "p2p.crt"
pub_key = full_node_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
full_node_server.equality_ca_crt_path.read_bytes(),
full_node_server.equality_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
full_node_server.equality_ca_crt_path, full_node_server.equality_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(full_node_server, 12312, ssl_context)
assert connected is True
@pytest.mark.asyncio
async def test_wallet(self, wallet_node):
full_nodes, wallets = wallet_node
wallet_node, wallet_server = wallets[0]
# Wallet should not accept incoming connections
pub_crt = wallet_server._private_key_path.parent / "p2p.crt"
pub_key = wallet_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
wallet_server.equality_ca_crt_path.read_bytes(), wallet_server.equality_ca_key_path.read_bytes(), pub_crt, pub_key
)
ssl_context = ssl_context_for_client(
wallet_server.equality_ca_crt_path, wallet_server.equality_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(wallet_server, 12312, ssl_context)
assert connected is False
# Not even signed by private cert
priv_crt = wallet_server._private_key_path.parent / "valid.crt"
priv_key = wallet_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
wallet_server.ca_private_crt_path.read_bytes(),
wallet_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
wallet_server.ca_private_crt_path, wallet_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(wallet_server, 12312, ssl_context)
assert connected is False
@pytest.mark.asyncio
async def test_harvester(self, harvester_farmer):
harvester, farmer_api = harvester_farmer
harvester_server = harvester.server
# harvester should not accept incoming connections
pub_crt = harvester_server._private_key_path.parent / "p2p.crt"
pub_key = harvester_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
harvester_server.equality_ca_crt_path.read_bytes(),
harvester_server.equality_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
harvester_server.equality_ca_crt_path, harvester_server.equality_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(harvester_server, 12312, ssl_context)
assert connected is False
# Not even signed by private cert
priv_crt = harvester_server._private_key_path.parent / "valid.crt"
priv_key = harvester_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
harvester_server.ca_private_crt_path.read_bytes(),
harvester_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
harvester_server.ca_private_crt_path, harvester_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(harvester_server, 12312, ssl_context)
assert connected is False
@pytest.mark.asyncio
async def test_introducer(self, introducer):
introducer_api, introducer_server = introducer
# Create not authenticated cert
pub_crt = introducer_server.equality_ca_key_path.parent / "p2p.crt"
pub_key = introducer_server.equality_ca_key_path.parent / "p2p.key"
generate_ca_signed_cert(
introducer_server.equality_ca_crt_path.read_bytes(),
introducer_server.equality_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
introducer_server.equality_ca_crt_path, introducer_server.equality_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(introducer_server, 12312, ssl_context)
assert connected is True
@pytest.mark.asyncio
async def test_timelord(self, timelord):
timelord_api, timelord_server = timelord
# timelord should not accept incoming connections
pub_crt = timelord_server._private_key_path.parent / "p2p.crt"
pub_key = timelord_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
timelord_server.equality_ca_crt_path.read_bytes(),
timelord_server.equality_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
timelord_server.equality_ca_crt_path, timelord_server.equality_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(timelord_server, 12312, ssl_context)
assert connected is False
# Not even signed by private cert
priv_crt = timelord_server._private_key_path.parent / "valid.crt"
priv_key = timelord_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
timelord_server.ca_private_crt_path.read_bytes(),
timelord_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
timelord_server.ca_private_crt_path, timelord_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(timelord_server, 12312, ssl_context)
assert connected is False
| 40.942529 | 126 | 0.692588 |
04c10e0304c1e27d052f0c89051c3ca4135480f6 | 18,165 | py | Python | openquake/hazardlib/gsim/bradley_2013.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 1 | 2019-08-01T00:28:24.000Z | 2019-08-01T00:28:24.000Z | openquake/hazardlib/gsim/bradley_2013.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 4 | 2018-08-31T14:14:35.000Z | 2021-10-11T12:53:13.000Z | openquake/hazardlib/gsim/bradley_2013.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 3 | 2018-08-31T14:11:00.000Z | 2019-07-17T10:06:02.000Z | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`Bradley2013`, :class:`Bradley2013Volc`.
"""
import numpy as np
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
class Bradley2013(GMPE):
"""
Implements GMPE developed by Brendan Bradley for Active Shallow Crust
Earthquakes for New Zealand, and published as "A New Zealand-Specific
Pseudospectral Acceleration Ground-Motion Prediction Equation for Active
Shallow Crustal Earthquakes Based on Foreign Models" (2013, Bulletin of
the Seismological Society of America, Volume 103, No. 3, pages 1801-1822).
This model is modified from Chiou and Youngs, 2008 and has been adapted
for New Zealand conditions. Specifically, the modifications are related to:
1) small magnitude scaling;
2) scaling of short period ground motion from normal faulting events in
volcanic crust;
3) scaling of ground motions on very hard rock sites;
4) anelastic attenuation in the New Zealand crust;
5) consideration of the increates anelastic attenuation in the Taupo
Volcanic Zone (not implemented in this model, use Bradley2013Volc)
"""
#: Supported tectonic region type is active shallow crust, see page 1801
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: peak ground velocity and peak ground acceleration. Note that PGV is
#: the Chiou & Youngs PGV and has not been modified for New Zealand.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
SA
])
#: Supported intensity measure component is geometric mean
#: of two horizontal components
#: attr:`~openquake.hazardlib.const.IMC.AVERAGE_HORIZONTAL`,
#: see abstract page 1801.
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation types are inter-event, intra-event
#: and total, see chapter "Variance model".
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT
])
#: Required site parameters are Vs30 (eq. 13b), Vs30 measured flag (eq. 20)
#: and Z1.0 (eq. 13b).
REQUIRES_SITES_PARAMETERS = set(('vs30', 'vs30measured', 'z1pt0'))
#: Required rupture parameters are magnitude, rake (eq. 13a and 13b),
#: dip (eq. 13a) and ztor (eq. 13a).
REQUIRES_RUPTURE_PARAMETERS = set(('dip', 'rake', 'mag', 'ztor'))
#: Required distance measures are RRup, Rjb and Rx (all are in eq. 13a).
REQUIRES_DISTANCES = set(('rrup', 'rjb', 'rx'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
# intensity on a reference soil is used for both mean
# and stddev calculations.
ln_y_ref = self._get_ln_y_ref(rup, dists, C)
# exp1 and exp2 are parts of eq. 7
exp1 = np.exp(C['phi3'] * (sites.vs30.clip(-np.inf, 1130) - 360))
exp2 = np.exp(C['phi3'] * (1130 - 360))
# v1 is the period dependent site term. The Vs30 above which, the
# amplification is constant
v1 = self._get_v1(imt)
mean = self._get_mean(sites, C, ln_y_ref, exp1, exp2, v1)
stddevs = self._get_stddevs(sites, rup, C, stddev_types,
ln_y_ref, exp1, exp2)
return mean, stddevs
def _get_mean(self, sites, C, ln_y_ref, exp1, exp2, v1):
"""
Add site effects to an intensity.
Implements eq. 5
"""
# we do not support estimating of basin depth and instead
# rely on it being available (since we require it).
z1pt0 = sites.z1pt0
# we consider random variables being zero since we want
# to find the exact mean value.
eta = epsilon = 0
ln_y = (
# first line of eq. 13b
ln_y_ref + C['phi1'] *
np.log(np.clip(sites.vs30, -np.inf, v1) / 1130)
# second line
+ C['phi2'] * (exp1 - exp2)
* np.log((np.exp(ln_y_ref) + C['phi4']) / C['phi4'])
# third line
+ C['phi5']
* (1.0 - 1.0 / np.cosh(
C['phi6'] * (z1pt0 - C['phi7']).clip(0, np.inf)))
+ C['phi8'] / np.cosh(0.15 * (z1pt0 - 15).clip(0, np.inf))
# fourth line
+ eta + epsilon
)
return ln_y
def _get_stddevs(self, sites, rup, C, stddev_types, ln_y_ref, exp1, exp2):
"""
Get standard deviation for a given intensity on reference soil.
Implements equations 19, 20 and 21 of Chiou & Youngs, 2008 for
inter-event, intra-event and total standard deviations respectively.
This has not been modified for NZ conditions.
"""
# aftershock flag is zero, we consider only main shock.
AS = 0
Fmeasured = sites.vs30measured
Finferred = 1 - sites.vs30measured
# eq. 19 to calculate inter-event standard error
mag_test = min(max(rup.mag, 5.0), 7.0) - 5.0
tau = C['tau1'] + (C['tau2'] - C['tau1']) / 2 * mag_test
# b and c coeffs from eq. 10
b = C['phi2'] * (exp1 - exp2)
c = C['phi4']
y_ref = np.exp(ln_y_ref)
# eq. 20
NL = b * y_ref / (y_ref + c)
sigma = (
# first line of eq. 20
(C['sig1']
+ 0.5 * (C['sig2'] - C['sig1']) * mag_test
+ C['sig4'] * AS)
# second line
* np.sqrt((C['sig3'] * Finferred + 0.7 * Fmeasured)
+ (1 + NL) ** 2)
)
ret = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
# eq. 21
ret += [np.sqrt(((1 + NL) ** 2) * (tau ** 2) + (sigma ** 2))]
elif stddev_type == const.StdDev.INTRA_EVENT:
ret.append(sigma)
elif stddev_type == const.StdDev.INTER_EVENT:
# this is implied in eq. 21
ret.append(np.abs((1 + NL) * tau))
return ret
def _get_ln_y_ref(self, rup, dists, C):
"""
Get an intensity on a reference soil.
Implements eq. 4 in Bradley 2013. This is the same as Chiou and
Youngs 2008, with addition of TVZ attentuation term, and addition of
c8 which constains the ZTOR. Note that the TVZ scaling is set to 1
(i.e. no TVZ attenuation)
"""
# Taupo Volcanic Zone Path Distance. Set to zero.
rtvz = self._get_tvz_path_distance(dists.rrup)
# reverse faulting flag
Frv = 1 if 30 <= rup.rake <= 150 else 0
# normal faulting flag
Fnm = 1 if -120 <= rup.rake <= -60 else 0
# hanging wall flag
Fhw = (dists.rx >= 0)
# aftershock flag. always zero since we only consider main shock
AS = 0
ln_y_ref = (
# first line of eq. 4 in Bradley 2013
C['c1']
+ (C['c1a'] * Frv
+ C['c1b'] * Fnm
+ C['c7'] * (np.clip(rup.ztor, -np.inf, C['c8']) - 4))
* (1 - AS)
+ (C['c10'] + C['c7a'] * (rup.ztor - 4)) * AS
# second line
+ C['c2'] * (rup.mag - 6)
+ ((C['c2'] - C['c3']) / C['cn'])
* np.log(1 + np.exp(C['cn'] * (C['cm'] - rup.mag)))
# third line
+ C['c4']
* np.log(dists.rrup
+ C['c5']
* np.cosh(C['c6'] * max(rup.mag - C['chm'], 0)))
# fourth line
+ (C['c4a'] - C['c4'])
* np.log(np.sqrt(dists.rrup ** 2 + C['crb'] ** 2))
# fifth line
+ (C['cg1'] + C['cg2'] / (np.cosh(max(rup.mag - C['cg3'], 0))))
# sixth line
* ((1 + C['ctvz'] * (rtvz / dists.rrup)) * dists.rrup)
# seventh line
+ C['c9'] * Fhw
* np.tanh(dists.rx
* (np.cos(np.radians(rup.dip)) ** 2)
/ C['c9a'])
* (1 - np.sqrt(dists.rjb ** 2 + rup.ztor ** 2)
/ (dists.rrup + 0.001))
)
return ln_y_ref
def _get_v1(self, imt):
"""
Calculates Bradley's V1 term. Equation 2 (page 1814) and 6 (page 1816)
based on SA period
"""
if imt == PGA():
v1 = 1800.
else:
T = imt.period
v1a = np.clip((1130 * (T / 0.75)**-0.11), 1130, np.inf)
v1 = np.clip(v1a, -np.inf, 1800.)
return v1
def _get_tvz_path_distance(self, rrup):
"""
Returns Taupo Volcanic Zone (TVZ) path distance.
Set to zero.
"""
return 0
#: Coefficient tables are constructed from values in tables 1, 2 and 3
#: (pages 197, 198 and 199) in Chiou & Youngs,2008. Only Coefficients c1,
#: c1b, c3, cm, c8, cg1, cg2, ctvz are modified by Bradley 2013.
#: Spectral acceleration is defined for damping of 5%, see page 208 (CY08).
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c2 c3 c4 c4a crb chm cg3 c1 c1a c1b cn cm c5 c6 c7 c7a c8 c9 c9a c10 cg1 cg2 ctvz phi1 phi2 phi3 phi4 phi5 phi6 phi7 phi8 tau1 tau2 sig1 sig2 sig3 sig4
pga 1.06 1.50000 -2.1 -0.5 50.0 3.0 4.0 -1.1985 0.1000 -0.4550 2.996 5.85000 6.1600 0.4893 0.0512 0.0860 10.00 0.7900 1.5005 -0.3218 -0.00960 -0.00480 2.000 -0.4417 -0.1417 -0.007010 0.102151 0.2289 0.014996 580.0 0.0700 0.3437 0.2637 0.4458 0.3459 0.8000 0.0663
0.010 1.06 1.50299 -2.1 -0.5 50.0 3.0 4.0 -1.1958 0.1000 -0.4550 2.996 5.81711 6.1600 0.4893 0.0512 0.0860 10.00 0.7900 1.5005 -0.3218 -0.00960 -0.00481 2.000 -0.4417 -0.1417 -0.007010 0.102151 0.2289 0.014996 580.0 0.0700 0.3437 0.2637 0.4458 0.3459 0.8000 0.0663
0.020 1.06 1.50845 -2.1 -0.5 50.0 3.0 4.0 -1.1756 0.1000 -0.4550 3.292 5.80023 6.1580 0.4892 0.0512 0.0860 10.00 0.8129 1.5028 -0.3323 -0.00970 -0.00486 2.000 -0.4340 -0.1364 -0.007279 0.108360 0.2289 0.014996 580.0 0.0699 0.3471 0.2671 0.4458 0.3459 0.8000 0.0663
0.030 1.06 1.51549 -2.1 -0.5 50.0 3.0 4.0 -1.0909 0.1000 -0.4550 3.514 5.78659 6.1550 0.4890 0.0511 0.0860 10.00 0.8439 1.5071 -0.3394 -0.01010 -0.00503 2.000 -0.4177 -0.1403 -0.007354 0.119888 0.2289 0.014996 580.0 0.0701 0.3603 0.2803 0.4535 0.3537 0.8000 0.0663
0.040 1.06 1.52380 -2.1 -0.5 50.0 3.0 4.0 -0.9793 0.1000 -0.4550 3.563 5.77472 6.1508 0.4888 0.0508 0.0860 10.00 0.8740 1.5138 -0.3453 -0.01050 -0.00526 2.000 -0.4000 -0.1591 -0.006977 0.133641 0.2289 0.014996 579.9 0.0702 0.3718 0.2918 0.4589 0.3592 0.8000 0.0663
0.050 1.06 1.53319 -2.1 -0.5 50.0 3.0 4.0 -0.8549 0.1000 -0.4550 3.547 5.76402 6.1441 0.4884 0.0504 0.0860 10.00 0.8996 1.5230 -0.3502 -0.01090 -0.00549 2.000 -0.3903 -0.1862 -0.006467 0.148927 0.2290 0.014996 579.9 0.0701 0.3848 0.3048 0.4630 0.3635 0.8000 0.0663
0.075 1.06 1.56053 -2.1 -0.5 50.0 3.0 4.0 -0.6008 0.1000 -0.4540 3.448 5.74056 6.1200 0.4872 0.0495 0.0860 10.00 0.9442 1.5597 -0.3579 -0.01170 -0.00588 2.000 -0.4040 -0.2538 -0.005734 0.190596 0.2292 0.014996 579.6 0.0686 0.3878 0.3129 0.4702 0.3713 0.8000 0.0663
0.10 1.06 1.59241 -2.1 -0.5 50.0 3.0 4.0 -0.4700 0.1000 -0.4530 3.312 5.72017 6.0850 0.4854 0.0489 0.0860 10.00 0.9677 1.6104 -0.3604 -0.01170 -0.00591 2.000 -0.4423 -0.2943 -0.005604 0.230662 0.2297 0.014996 579.2 0.0646 0.3835 0.3152 0.4747 0.3769 0.8000 0.0663
0.15 1.06 1.66640 -2.1 -0.5 50.0 3.0 4.0 -0.4139 0.1000 -0.4500 3.044 5.68493 5.9871 0.4808 0.0479 0.0860 10.00 0.9660 1.7549 -0.3565 -0.01110 -0.00540 2.000 -0.5162 -0.3113 -0.005845 0.266468 0.2326 0.014988 577.2 0.0494 0.3719 0.3128 0.4798 0.3847 0.8000 0.0612
0.20 1.06 1.75021 -2.1 -0.5 50.0 3.0 4.0 -0.5237 0.1000 -0.4149 2.831 5.65435 5.8699 0.4755 0.0471 0.0860 10.00 0.9334 1.9157 -0.3470 -0.01000 -0.00479 2.000 -0.5697 -0.2927 -0.006141 0.255253 0.2386 0.014964 573.9 -0.0019 0.3601 0.3076 0.4816 0.3902 0.8000 0.0530
0.25 1.06 1.84052 -2.1 -0.5 50.0 3.0 4.0 -0.6678 0.1000 -0.3582 2.658 5.62686 5.7547 0.4706 0.0464 0.0860 10.50 0.8946 2.0709 -0.3379 -0.00910 -0.00427 2.000 -0.6109 -0.2662 -0.006439 0.231541 0.2497 0.014881 568.5 -0.0479 0.3522 0.3047 0.4815 0.3946 0.7999 0.0457
0.30 1.06 1.93480 -2.1 -0.5 50.0 3.0 4.0 -0.8277 0.0999 -0.3113 2.505 5.60162 5.6527 0.4665 0.0458 0.0860 11.00 0.8590 2.2005 -0.3314 -0.00820 -0.00384 2.500 -0.6444 -0.2405 -0.006704 0.207277 0.2674 0.014639 560.5 -0.0756 0.3438 0.3005 0.4801 0.3981 0.7997 0.0398
0.40 1.06 2.12764 -2.1 -0.5 50.0 3.0 4.0 -1.1284 0.0997 -0.2646 2.261 5.55602 5.4997 0.4607 0.0445 0.0850 12.00 0.8019 2.3886 -0.3256 -0.00690 -0.00317 3.200 -0.6931 -0.1975 -0.007125 0.165464 0.3120 0.013493 540.0 -0.0960 0.3351 0.2984 0.4758 0.4036 0.7988 0.0312
0.50 1.06 2.31684 -2.1 -0.5 50.0 3.0 4.0 -1.3926 0.0991 -0.2272 2.087 5.51513 5.4029 0.4571 0.0429 0.0830 13.00 0.7578 2.5000 -0.3189 -0.00590 -0.00272 3.500 -0.7246 -0.1633 -0.007435 0.133828 0.3610 0.011133 512.9 -0.0998 0.3353 0.3036 0.4710 0.4079 0.7966 0.0255
0.75 1.06 2.73064 -2.1 -0.5 50.0 3.0 4.0 -1.8664 0.0936 -0.1620 1.812 5.38632 5.2900 0.4531 0.0387 0.0690 14.00 0.6788 2.6224 -0.2702 -0.00450 -0.00209 4.500 -0.7708 -0.1028 -0.008120 0.085153 0.4353 0.006739 441.9 -0.0765 0.3429 0.3205 0.4621 0.4157 0.7792 0.0175
1.0 1.06 3.03000 -2.1 -0.5 50.0 3.0 4.0 -2.1935 0.0766 -0.1400 1.648 5.31000 5.2480 0.4517 0.0350 0.0450 15.00 0.6196 2.6690 -0.2059 -0.00370 -0.00175 5.000 -0.7990 -0.0699 -0.008444 0.058595 0.4629 0.005749 391.8 -0.0412 0.3577 0.3419 0.4581 0.4213 0.7504 0.0133
1.5 1.06 3.43384 -2.1 -0.5 50.0 3.0 4.0 -2.6883 0.0022 -0.1184 1.511 5.29995 5.2194 0.4507 0.0280 0.0134 16.00 0.5101 2.6985 -0.0852 -0.00280 -0.00142 5.400 -0.8382 -0.0425 -0.007707 0.031787 0.4756 0.005544 348.1 0.0140 0.3769 0.3703 0.4493 0.4213 0.7136 0.0090
2.0 1.06 3.67464 -2.1 -0.5 50.0 3.0 4.0 -3.1040 -0.0591 -0.1100 1.470 5.32730 5.2099 0.4504 0.0213 0.0040 18.00 0.3917 2.7085 0.0160 -0.00230 -0.00143 5.800 -0.8663 -0.0302 -0.004792 0.019716 0.4785 0.005521 332.5 0.0544 0.4023 0.4023 0.4459 0.4213 0.7035 0.0068
3.0 1.06 3.64933 -2.1 -0.5 50.0 3.0 4.0 -3.7085 -0.0931 -0.1040 1.456 5.43850 5.2040 0.4501 0.0106 0.0010 19.00 0.1244 2.7145 0.1876 -0.00190 -0.00115 6.000 -0.9032 -0.0129 -0.001828 0.009643 0.4796 0.005517 324.1 0.1232 0.4406 0.4406 0.4433 0.4213 0.7006 0.0045
4.0 1.06 3.60999 -2.1 -0.5 50.0 3.0 4.0 -4.1486 -0.0982 -0.1020 1.465 5.59770 5.2020 0.4501 0.0041 0.0000 19.75 0.0086 2.7164 0.3378 -0.00180 -0.00104 6.150 -0.9231 -0.0016 -0.001523 0.005379 0.4799 0.005517 321.7 0.1859 0.4784 0.4784 0.4424 0.4213 0.7001 0.0034
5.0 1.06 3.50000 -2.1 -0.5 50.0 3.0 4.0 -4.4881 -0.0994 -0.1010 1.478 5.72760 5.2010 0.4500 0.0010 0.0000 20.00 0.0000 2.7172 0.4579 -0.00170 -0.00099 6.300 -0.9222 0.0000 -0.001440 0.003223 0.4799 0.005517 320.9 0.2295 0.5074 0.5074 0.4420 0.4213 0.7000 0.0027
7.5 1.06 3.45000 -2.1 -0.5 50.0 3.0 4.0 -5.0891 -0.0999 -0.1010 1.498 5.98910 5.2000 0.4500 0.0000 0.0000 20.00 0.0000 2.7177 0.7514 -0.00170 -0.00094 6.425 -0.8346 0.0000 -0.001369 0.001134 0.4800 0.005517 320.3 0.2660 0.5328 0.5328 0.4416 0.4213 0.7000 0.0018
10.0 1.06 3.45000 -2.1 -0.5 50.0 3.0 4.0 -5.5530 -0.1000 -0.1000 1.502 6.19300 5.2000 0.4500 0.0000 0.0000 20.00 0.0000 2.7180 1.1856 -0.00170 -0.00091 6.550 -0.7332 0.0000 -0.001361 0.000515 0.4800 0.005517 320.1 0.2682 0.5542 0.5542 0.4414 0.4213 0.7000 0.0014
""")
class Bradley2013Volc(Bradley2013):
"""
Extend :class:`Bradley2013` for earthquakes with paths across the Taupo
Volcanic Zone (rtvz) that have increased anelastic attenuation.
Implements GMPE developed by Brendan Bradley for Active Shallow Crust
Earthquakes for New Zealand, and published as "A New Zealand-Specific
Pseudospectral Acceleration Ground-Motion Prediction Equation for Active
Shallow Crustal Earthquakes Based on Foreign Models" (2013, Bulletin of
the Seismological Society of America, Volume 103, No. 3, pages 1801-1822).
This model is modified from Chiou and Youngs, 2008 and has been adapted
for New Zealand conditions. Specifically, the modifications are related to:
1) small magnitude scaling;
2) scaling of short period ground motion from normal faulting events in
volcanic crust;
3) scaling of ground motions on very hard rock sites;
4) anelastic attenuation in the New Zealand crust;
5) consideration of the increates anelastic attenuation in the Taupo
Volcanic Zone (rtvz is equal to rrup)
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.VOLCANIC
def _get_tvz_path_distance(self, rrup):
"""
Returns Taupo Volcanic Zone (TVZ) path distance.
rtvz = rrup as implemented for New Zealand seismic hazard model
"""
return rrup
| 55.045455 | 272 | 0.613047 |
320e8de89c10f0bccfbb8a67d626d4ab73dbf422 | 8,648 | py | Python | sorl/thumbnail/engines/pil_engine.py | ixc/sorl-thumbnail | d12a23492390234eed8a951a294ebc34f36dfd26 | [
"BSD-3-Clause"
] | null | null | null | sorl/thumbnail/engines/pil_engine.py | ixc/sorl-thumbnail | d12a23492390234eed8a951a294ebc34f36dfd26 | [
"BSD-3-Clause"
] | null | null | null | sorl/thumbnail/engines/pil_engine.py | ixc/sorl-thumbnail | d12a23492390234eed8a951a294ebc34f36dfd26 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division
import math
from sorl.thumbnail.engines.base import EngineBase
from sorl.thumbnail.compat import BufferIO
try:
from PIL import Image, ImageFile, ImageDraw, ImageChops, ImageFilter
except ImportError:
import Image, ImageFile, ImageDraw, ImageChops
def round_corner(radius, fill):
"""Draw a round corner"""
corner = Image.new('L', (radius, radius), 0) # (0, 0, 0, 0))
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)
return corner
def round_rectangle(size, radius, fill):
"""Draw a rounded rectangle"""
width, height = size
rectangle = Image.new('L', size, 255) # fill
corner = round_corner(radius, 255) # fill
rectangle.paste(corner, (0, 0))
rectangle.paste(corner.rotate(90),
(0, height - radius)) # Rotate the corner and paste it
rectangle.paste(corner.rotate(180), (width - radius, height - radius))
rectangle.paste(corner.rotate(270), (width - radius, 0))
return rectangle
class GaussianBlur(ImageFilter.Filter):
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
return image.gaussian_blur(self.radius)
class Engine(EngineBase):
def get_image(self, source):
buffer = BufferIO(source.read())
return Image.open(buffer)
def get_image_size(self, image):
return image.size
def get_image_info(self, image):
return image.info or {}
def is_valid_image(self, raw_data):
buffer = BufferIO(raw_data)
try:
trial_image = Image.open(buffer)
trial_image.verify()
except Exception:
return False
return True
def _cropbox(self, image, x, y, x2, y2):
return image.crop((x, y, x2, y2))
def _orientation(self, image):
try:
exif = image._getexif()
except (AttributeError, IOError, KeyError, IndexError):
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
image = image.rotate(180)
elif orientation == 4:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
image = image.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
image = image.rotate(-90)
elif orientation == 7:
image = image.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
image = image.rotate(90)
return image
def _colorspace(self, image, colorspace):
if colorspace == 'RGB':
if image.mode == 'RGBA':
return image # RGBA is just RGB + Alpha
if image.mode == 'LA' or (image.mode == 'P' and 'transparency' in image.info):
return image.convert('RGBA')
return image.convert('RGB')
if colorspace == 'GRAY':
return image.convert('L')
return image
def _remove_border(self, image, image_width, image_height):
image_entropy = self._get_image_entropy(image)
borders = {
'top': lambda iy, dy, y: (dy, dy + y),
'right': lambda ix, dx, x: (ix - dx - x, ix - dx),
'bottom': lambda iy, dy, y: (iy - dy - y, iy - dy),
'left': lambda ix, dx, x: (dx, dx + x),
}
offset = {'top': 0, 'right': 0, 'bottom': 0, 'left': 0, }
for border in ['top', 'bottom']:
# Don't remove too much, the image may just be plain
while offset[border] < image_height / 3.5:
slice_size = min(image_width / 20, 10)
y_range = borders[border](image_height, offset[border], slice_size)
section = image.crop((0, y_range[0], image_width, y_range[1]))
# If this section is below the threshold; remove it
if self._get_image_entropy(section) < 2.0:
offset[border] += slice_size
else:
break
for border in ['left', 'right']:
while offset[border] < image_width / 3.5:
slice_size = min(image_height / 20, 10)
x_range = borders[border](image_width, offset[border], slice_size)
section = image.crop((x_range[0], 0, x_range[1], image_height))
if self._get_image_entropy(section) < 2.0:
offset[border] += slice_size
else:
break
return image.crop(
(offset['left'], offset['top'], image_width - offset['right'], image_height - offset['bottom']))
# Credit to chrisopherhan https://github.com/christopherhan/pycrop
# This is just a slight rework of pycrops implimentation
def _entropy_crop(self, image, geometry_width, geometry_height, image_width, image_height):
geometry_ratio = geometry_width / geometry_height
# The is proportionally wider than it should be
while image_width / image_height > geometry_ratio:
slice_width = max(image_width - geometry_width, 10)
right = image.crop((image_width - slice_width, 0, image_width, image_height))
left = image.crop((0, 0, slice_width, image_height))
if self._get_image_entropy(left) < self._get_image_entropy(right):
image = image.crop((slice_width, 0, image_width, image_height))
else:
image = image.crop((0, 0, image_height - slice_width, image_height))
image_width -= slice_width
# The image is proportionally taller than it should be
while image_width / image_height < geometry_ratio:
slice_height = min(image_height - geometry_height, 10)
bottom = image.crop((0, image_height - slice_height, image_width, image_height))
top = image.crop((0, 0, image_width, slice_height))
if self._get_image_entropy(bottom) < self._get_image_entropy(top):
image = image.crop((0, 0, image_width, image_height - slice_height))
else:
image = image.crop((0, slice_height, image_width, image_height))
image_height -= slice_height
return image
def _scale(self, image, width, height):
return image.resize((width, height), resample=Image.ANTIALIAS)
def _crop(self, image, width, height, x_offset, y_offset):
return image.crop((x_offset, y_offset,
width + x_offset, height + y_offset))
def _rounded(self, image, r):
i = round_rectangle(image.size, r, "notusedblack")
image.putalpha(i)
return image
def _blur(self, image, radius):
return image.filter(GaussianBlur(radius))
def _padding(self, image, geometry, options):
x_image, y_image = self.get_image_size(image)
left = int((geometry[0] - x_image) / 2)
top = int((geometry[1] - y_image) / 2)
color = options.get('padding_color')
im = Image.new(image.mode, geometry, color)
im.paste(image, (left, top))
return im
def _get_raw_data(self, image, format_, quality, image_info=None, progressive=False):
# Increase (but never decrease) PIL buffer size
ImageFile.MAXBLOCK = max(ImageFile.MAXBLOCK, image.size[0] * image.size[1])
bf = BufferIO()
params = {
'format': format_,
'quality': quality,
'optimize': 1,
}
# keeps icc_profile
if 'icc_profile' in image_info:
params['icc_profile'] = image_info['icc_profile']
raw_data = None
if format_ == 'JPEG' and progressive:
params['progressive'] = True
try:
# Do not save unnecessary exif data for smaller thumbnail size
params.pop('exif', {})
image.save(bf, **params)
except (IOError, OSError):
# Try without optimization.
params.pop('optimize')
image.save(bf, **params)
else:
raw_data = bf.getvalue()
finally:
bf.close()
return raw_data
def _get_image_entropy(self, image):
"""calculate the entropy of an image"""
hist = image.histogram()
hist_size = sum(hist)
hist = [float(h) / hist_size for h in hist]
return -sum([p * math.log(p, 2) for p in hist if p != 0])
| 35.588477 | 108 | 0.584644 |
700e1599bbf4b0bddfd1d0af752b2b73f3f533e9 | 777 | py | Python | app/tool_results/read_stats/tests/test_module.py | MetaGenScope/metagenscope-server | 609cd57c626c857c8efde8237a1f22f4d1e6065d | [
"MIT"
] | null | null | null | app/tool_results/read_stats/tests/test_module.py | MetaGenScope/metagenscope-server | 609cd57c626c857c8efde8237a1f22f4d1e6065d | [
"MIT"
] | null | null | null | app/tool_results/read_stats/tests/test_module.py | MetaGenScope/metagenscope-server | 609cd57c626c857c8efde8237a1f22f4d1e6065d | [
"MIT"
] | null | null | null | """Test suite for Read Stats tool result model."""
from app.tool_results.read_stats import ReadStatsToolResult
from app.tool_results.tool_result_test_utils.tool_result_base_test import BaseToolResultTest
from .factory import create_values
class TestReadStatsModel(BaseToolResultTest):
"""Test suite for ReadStats tool result model."""
def test_add_read_stats(self):
"""Ensure ReadStats tool result model is created correctly."""
read_stats = ReadStatsToolResult(**create_values())
self.generic_add_sample_tool_test(read_stats, 'read_stats')
def test_upload_read_stats(self):
"""Ensure a raw Methyl tool result can be uploaded."""
payload = create_values()
self.generic_test_upload_sample(payload, 'read_stats')
| 37 | 92 | 0.754183 |
02726594f2761c15601904dfbf00d9ba30c38338 | 4,585 | py | Python | dev_interactive.py | bacarbland/dev_interactive | 030e76828deb269cc5f0b5303aee60ae82ec58de | [
"MIT"
] | null | null | null | dev_interactive.py | bacarbland/dev_interactive | 030e76828deb269cc5f0b5303aee60ae82ec58de | [
"MIT"
] | 2 | 2021-05-13T20:27:10.000Z | 2021-05-15T02:23:56.000Z | dev_interactive.py | bacarbland/dev_interactive | 030e76828deb269cc5f0b5303aee60ae82ec58de | [
"MIT"
] | null | null | null | import signal, subprocess, sys, time
'''
The objective is to make easier the development and debugging for interactive competitive programming problems.
To run it, the command is like the default interactive_runner.
I only know how to run programs with unbuffered stdout, stdin, stderr in Python.
1. python dev_interactive.py python3 -u testing_tool.py args -- python3 -u my_solution.py
Please try to open programs in unbuffered mode. Altough i tried with a C++ buffered program and worked fine
2. pythyon dev_interactive.py python3 -u testing_tool.py args -- ./my_solution
3. python dev_interactive.py python3 -u testing_tool.py args -- java solution
4. python dev_interactive.py python3 -u testing_tool.py args -- my_solution.exe
'''
class TimeoutExpired(Exception):
pass
# signal will run this function when SIGALARM is raised
def alarm_handler(signum, frame):
raise TimeoutExpired
def input_with_timeout(pipa, timeout):
signal.signal(signal.SIGALRM, alarm_handler)
# Produce SIGALARM in timeout seconds
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return pipa.readline().strip()
finally:
signal.setitimer(signal.ITIMER_REAL, 0) # cancel alarm
# If you wanna kill the program, you also have to kill the children
def interrupt_handler(signum, frame):
solution.kill()
judge.kill()
raise KeyboardInterrupt
def processpipe(piparead, pipawrite, timeout, printformat='{}', prefix='', sufix=''):
answer = ''
try:
answer = input_with_timeout(piparead, timelimit)
except TimeoutExpired:
return False
else:
if answer == '':
return False
#You can change this next condition to fit any debug flag you want
if answer[0] == ':':
answer = answer[1:] # Get rid of the flag
else:
print(answer, file=pipawrite)
answer = prefix + answer + sufix
print(printformat.format(answer))
return True
signal.signal(signal.SIGINT, interrupt_handler)
if sys.argv.count("--") != 1:
sys.exit("Expected one and only one '--'")
index2hyphen = sys.argv.index("--")
judgecomm = sys.argv[1:index2hyphen]
solutcomm = sys.argv[index2hyphen + 1:]
spstdout = subprocess.PIPE
spstdin = subprocess.PIPE
spstderr = subprocess.PIPE # Could also be an object file if you wanna keep track of
jpstdout = subprocess.PIPE
jpstdin = subprocess.PIPE
jpstderr = subprocess.PIPE # Also like spstderr, don't forget to add close file in
# interrup_handler
timelimit = .002 # two miliseconds
maxnoinput = 5 # so 10 miliseconds for subprocess
maxchanges = 5 # If none of both have return anything, they might have paused for ever
# How the judge and solution output is going to be printed
# You can change this numbers to fit your console size
subprformat = ('{:<40}', '{:>40}')
# sufixes and prefixes for the console printing
suprefixes = (
('J:', ''),
('', ':S')
)
judge = subprocess.Popen(
judgecomm,
stdout = jpstdout,
stdin = jpstdin,
stderr = jpstderr,
bufsize = 1,
universal_newlines = True
)
solution = subprocess.Popen(
solutcomm,
stdout = spstdout,
stdin = spstdin,
stderr = spstderr,
bufsize = 1,
universal_newlines = True
)
noinputloop = 0
anyinput = False
control = False
changes = 0
# Programs, specially interpreters, take time to load. Specially Java
time.sleep(.2)
while solution.poll() == None and judge.poll() == None and changes < maxchanges:
# Read solution output and connect it to the judge input
if control:
anyinput = processpipe(solution.stdout, judge.stdin, timelimit, subprformat[control], *suprefixes[control])
else:
anyinput = processpipe(judge.stdout, solution.stdin, timelimit, subprformat[control], *suprefixes[control])
noinputloop += 1
# If there's any input, reset everything
if anyinput:
noinputloop = 0
changes = 0
elif noinputloop >= maxnoinput:
print()
noinputloop = 0
changes += 1
control = not control
anyinput = False
# There's no harm on being sure the subprocess died
solution.kill()
judge.kill() | 35.820313 | 115 | 0.631189 |
86841533682de4c82c111b9bb0d2624d18dadee8 | 6,157 | py | Python | datacoco_cloud/s3_to_s3_interaction.py | Phil-Ocone/datacoco-cloud | 182ba322a599a7436bbd0d834ed30c83cd82e01f | [
"MIT"
] | 1 | 2020-03-19T17:53:02.000Z | 2020-03-19T17:53:02.000Z | datacoco_cloud/s3_to_s3_interaction.py | Phil-Ocone/datacoco-cloud | 182ba322a599a7436bbd0d834ed30c83cd82e01f | [
"MIT"
] | 1 | 2021-03-25T12:56:31.000Z | 2021-03-25T13:02:24.000Z | datacoco_cloud/s3_to_s3_interaction.py | Phil-Ocone/datacoco-cloud | 182ba322a599a7436bbd0d834ed30c83cd82e01f | [
"MIT"
] | 2 | 2020-11-06T06:57:10.000Z | 2022-02-03T16:00:37.000Z | #!/usr/bin/env python
import boto3
import os
from datacoco_cloud import UNIT_TEST_KEY
import logging
class S3toS3Interaction(object):
"""
Class to simplify S3 to S3 Interactions using boto3
"""
def __init__(
self,
source_aws_key: str,
source_aws_secret: str,
target_aws_key: str,
target_aws_secret: str,
source_aws_region: str = "us-east-1",
target_aws_region: str = "us-east-1",
):
##########
### Setup configuration
##########
self.is_test = os.environ.get(UNIT_TEST_KEY, False)
self.source_aws_key = source_aws_key
self.source_aws_secret = source_aws_secret
self.source_aws_region = source_aws_region
self.target_aws_key = target_aws_key
self.target_aws_secret = target_aws_secret
self.target_aws_region = target_aws_region
### Setting up the S3 Clients
if not self.is_test:
self.s3_client_source = boto3.client(
"s3",
region_name=self.source_aws_region,
aws_access_key_id=self.source_aws_key,
aws_secret_access_key=self.source_aws_secret,
)
self.s3_client_target = boto3.client(
"s3",
region_name=self.target_aws_region,
aws_access_key_id=self.target_aws_key,
aws_secret_access_key=self.target_aws_secret,
)
def duplicate_objects(
self,
source_bucket: str,
target_bucket: str,
source_bucket_prefix: str,
target_path: str,
source_bucket_suffix: str = "",
):
self.__do_transfer(
source_bucket=source_bucket,
target_bucket=target_bucket,
source_bucket_prefix=source_bucket_prefix,
target_path=target_path,
source_bucket_suffix=source_bucket_suffix,
isMove=False,
)
def move_objects(
self,
source_bucket: str,
target_bucket: str,
source_bucket_prefix: str,
target_path: str,
source_bucket_suffix: str = "",
):
self.__do_transfer(
source_bucket=source_bucket,
target_bucket=target_bucket,
source_bucket_prefix=source_bucket_prefix,
target_path=target_path,
source_bucket_suffix=source_bucket_suffix,
isMove=True,
)
def __do_transfer(
self,
source_bucket: str,
target_bucket: str,
source_bucket_prefix: str,
target_path: str,
source_bucket_suffix: str,
isMove: bool = False,
):
# String for Printing Operations
operation = "copy"
if isMove:
operation = "move"
try:
payload = self.s3_client_source.list_objects_v2(
Bucket=source_bucket, Prefix=source_bucket_prefix
)
if payload["KeyCount"] == 0:
logging.info(f"No files to {operation}.")
else:
keyCount = 0
for item in payload["Contents"]:
filepath = item["Key"]
# Checks first if file matches suffix
if filepath.endswith(source_bucket_suffix):
# Increase Key Count per matched suffix
keyCount += 1
if len(filepath.split("/")) > 1:
deductLength = len(filepath.split("/")[0]) + 1
else:
deductLength = 0
filename = filepath[deductLength:]
logging.info(f"filename: {filename}")
if filename is not "":
logging.info(
f"Sending file {source_bucket}/{filepath} to {target_bucket}/{target_path}/{filename}"
)
logging.info(
f"filename to {operation}: {filename}"
)
copy_source = {
"Bucket": source_bucket,
"Key": filepath,
}
if not self.is_test:
copy_response = self.s3_client_target.copy_object(
CopySource=copy_source,
Bucket=target_bucket,
Key=f"{target_path}/{filename}",
)
logging.info(copy_response)
if (
copy_response["ResponseMetadata"][
"HTTPStatusCode"
]
!= 200
):
logging.error(
f"Failed to {operation}: {fileName}"
)
if isMove:
delete_response = self.s3_client_source.delete_object(
Bucket=source_bucket, Key=filepath
)
logging.info(delete_response)
if (
delete_response["ResponseMetadata"][
"HTTPStatusCode"
]
!= 200
):
logging.error(
f"Failed to delete: {fileName}"
)
if keyCount == 0:
logging.info(f"No files to {operation}.")
except Exception as e:
logging.error(e)
raise e
| 35.182857 | 118 | 0.450544 |
9155067295d5685c507942467d702f1866a8376e | 5,823 | py | Python | digsby/src/tests/testencodings.py | ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | [
"Python-2.0"
] | 35 | 2015-08-15T14:32:38.000Z | 2021-12-09T16:21:26.000Z | digsby/src/tests/testencodings.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 4 | 2015-09-12T10:42:57.000Z | 2017-02-27T04:05:51.000Z | digsby/src/tests/testencodings.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 15 | 2015-07-10T23:58:07.000Z | 2022-01-23T22:16:33.000Z | import util.auxencodings
from timeit import Timer
def main():
print Timer("assert s.encode('xml').decode('xml') == s",
'from __main__ import s').timeit(50)
s = '''<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"><title>Google</title><style>body,td,a,p,.h{font-family:arial,sans-serif}.h{font-size:20px}.h{color:#3366cc}.q{color:#00c}.ts td{padding:0}.ts{border-collapse:collapse}#gbar{float:left;font-weight:bold;height:22px;padding-left:2px}#gbh{border-top:1px solid #c9d7f1;font-size:0;height:0;position:absolute;right:0;top:24px;width:200%}#gbi{background:#fff;border:1px solid;border-color:#c9d7f1 #36c #36c #a2bae7;font-size:13px;top:24px;z-index:1000}#guser{padding-bottom:7px !important}#gbar,#guser{font-size:13px;padding-top:1px !important}@media all{.gb1,.gb3{height:22px;margin-right:.73em;vertical-align:top}}#gbi,.gb2{display:none;position:absolute;width:8em}.gb2{z-index:1001}#gbar a,#gbar a:active,#gbar a:visited{color:#00c;font-weight:normal}.gb2 a,.gb3 a{text-decoration:none}.gb2 a{display:block;padding:.2em .5em}#gbar .gb2 a:hover{background:#36c;color:#fff}</style><script>window.google={kEI:"9ayDR7uNOYmMeZGfwVE",kEXPI:"17259,17735",kHL:"en"};
function sf(){document.f.q.focus()}
window.rwt=function(b,d,e,g,h,f,i){var a=encodeURIComponent||escape,c=b.href.split("#");b.href="/url?sa=t"+(d?"&oi="+a(d):"")+(e?"&cad="+a(e):"")+"&ct="+a(g)+"&cd="+a(h)+"&url="+a(c[0]).replace(/\+/g,"%2B")+"&ei=9ayDR7uNOYmMeZGfwVE"+(f?"&usg="+f:"")+i+(c[1]?"#"+c[1]:"");b.onmousedown="";return true};
window.gbar={};(function(){;var g=window.gbar,a,l,d;function m(b,f,e){b.display=b.display=="block"?"none":"block";b.left=f+"px";b.top=e+"px"}g.tg=function(b){var f=0,e,c,h,i=0,j=window.navExtra;!l&&(l=document.getElementById("gbar"));!d&&(d=l.getElementsByTagName("span"));(b||window.event).cancelBubble=true;if(!a){a=document.createElement(Array.every||window.createPopup?"iframe":"div");a.frameBorder="0";a.id="gbi";a.scrolling="no";a.src="#";document.body.appendChild(a);if(j&&d[8])for(var n in j){var k=document.createElement("span");k.appendChild(j[n]);k.className="gb2";d[0].parentNode.insertBefore(k,d[8])}document.onclick=g.close}for(;d[i];i++){c=d[i];h=c.className;if(h=="gb3"){e=c.offsetLeft;while(c=c.offsetParent)e+=c.offsetLeft;m(a.style,e,24)}else if(h=="gb2"){m(c.style,e+1,25+f);f+=20}}a.style.height=f+"px"};g.close=function(b){a&&a.style.display=="block"&&g.tg(b)};})();</script></head><body bgcolor=#ffffff text=#000000 link=#0000cc vlink=#551a8b alink=#ff0000 onload="sf();if(document.images){new Image().src='/images/nav_logo3.png'}" topmargin=3 marginheight=3><div id=gbar><nobr><span class=gb1>Web</a></span> <span class=gb1><a href="http://images.google.com/imghp?hl=en&tab=wi">Images</a></span> <span class=gb1><a href="http://maps.google.com/maps?hl=en&tab=wl">Maps</a></span> <span class=gb1><a href="http://news.google.com/nwshp?hl=en&tab=wn">News</a></span> <span class=gb1><a href="http://www.google.com/prdhp?hl=en&tab=wf">Shopping</a></span> <span class=gb1><a href="http://mail.google.com/mail?hl=en&tab=wm">Gmail</a></span> <span class=gb3><a href="http://www.google.com/intl/en/options/" onclick="this.blur();gbar.tg(event);return false"><u>more</u> <span style=font-size:11px>▼</span></a></span> <span class=gb2><a href="http://blogsearch.google.com/?hl=en&tab=wb">Blogs</a></span> <span class=gb2><a href="http://books.google.com/bkshp?hl=en&tab=wp">Books</a></span> <span class=gb2><a href="http://www.google.com/calendar?hl=en&tab=wc">Calendar</a></span> <span class=gb2><a href="http://docs.google.com/?hl=en&tab=wo">Documents</a></span> <span class=gb2><a href="http://finance.google.com/finance?hl=en&tab=we">Finance</a></span> <span class=gb2><a href="http://groups.google.com/grphp?hl=en&tab=wg">Groups</a></span> <span class=gb2><a href="http://picasaweb.google.com/home?hl=en&tab=wq">Photos</a></span> <span class=gb2><a href="http://www.google.com/reader?hl=en&tab=wy">Reader</a></span> <span class=gb2><a href="http://scholar.google.com/schhp?hl=en&tab=ws">Scholar</a></span> <span class=gb2><a href="http://video.google.com/?hl=en&tab=wv">Video</a></span> <span class=gb2><a href="http://www.youtube.com/?hl=en&tab=w1">YouTube</a></span> <span class=gb2><a href="http://www.google.com/intl/en/options/">even more »</a></span> </nobr></div><div id=gbh></div><div align=right id=guser style="font-size:84%;padding:0 0 4px" width=100%><nobr><b>kevinwatters@gmail.com</b> | <a href="/url?sa=p&pref=ig&pval=3&q=http://www.google.com/ig%3Fhl%3Den&usg=AFQjCNEj49wK5T88bDwGcaZsW52jiYwJwg">iGoogle</a> | <a href="https://www.google.com/accounts/ManageAccount">My Account</a> | <a href="http://www.google.com/accounts/Logout?continue=http://www.google.com/">Sign out</a></nobr></div><center><br clear=all id=lgpd><img alt="Google" height=110 src="/intl/en_ALL/images/logo.gif" width=276><br><br><form action="/search" name=f><table cellpadding=0 cellspacing=0><tr valign=top><td width=25%> </td><td align=center nowrap><input name=hl type=hidden value=en><input maxlength=2048 name=q size=55 title="Google Search" value=""><br><input name=btnG type=submit value="Google Search"><input name=btnI type=submit value="I'm Feeling Lucky"></td><td nowrap width=25%><font size=-2> <a href=/advanced_search?hl=en>Advanced Search</a><br> <a href=/preferences?hl=en>Preferences</a><br> <a href=/language_tools?hl=en>Language Tools</a></font></td></tr></table></form><br><br><font size=-1><a href="/intl/en/ads/">Advertising Programs</a> - <a href="/services/">Business Solutions</a> - <a href="/intl/en/about.html">About Google</a></font><p><font size=-2>©2008 Google</font></p></center></body></html>
'''
if __name__ == '__main__':
print len(s)
main() | 342.529412 | 4,191 | 0.693972 |
43bda11954f16aad7f313c15e78fa36a458dee1a | 2,647 | py | Python | setup.py | redwardstern/python-pgm | dd2ffb36f97b0d6805db9c239e53a72f5fb0047f | [
"BSD-2-Clause"
] | null | null | null | setup.py | redwardstern/python-pgm | dd2ffb36f97b0d6805db9c239e53a72f5fb0047f | [
"BSD-2-Clause"
] | null | null | null | setup.py | redwardstern/python-pgm | dd2ffb36f97b0d6805db9c239e53a72f5fb0047f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='pgm',
version='0.1.0',
license='BSD',
description='foray into structured prediction and pgm',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='R. Edward Stern',
author_email='ruth@sternshus.com',
url='https://github.com/redwardstern/python-pgm',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
# eg: 'aspectlib==1.1.1', 'six>=1.7',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
'console_scripts': [
'pgm = pgm.cli:main',
]
},
)
| 32.679012 | 96 | 0.599547 |
6a709de9c289f552622f1e6d62c84814fdb722a5 | 4,493 | py | Python | tests/tensorflow/quantization/test_statistics.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 136 | 2020-06-01T14:03:31.000Z | 2020-10-28T06:10:50.000Z | tests/tensorflow/quantization/test_statistics.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 133 | 2020-05-26T13:48:04.000Z | 2020-10-28T05:25:55.000Z | tests/tensorflow/quantization/test_statistics.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 36 | 2020-05-28T08:18:39.000Z | 2020-10-27T14:46:58.000Z | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional, List
import pytest
from nncf.common.quantization.statistics import QuantizersCounter
from nncf.common.quantization.statistics import QuantizationStatistics
from tests.tensorflow import test_models
from tests.tensorflow.helpers import get_empty_config
from tests.tensorflow.helpers import create_compressed_model_and_algo_for_test
def _get_basic_quantization_config(mode: str,
granularity: str,
input_sample_sizes: Optional[List[int]] = None):
config = get_empty_config(input_sample_sizes)
per_channel = (granularity == 'per_channel')
compression_section = {
'algorithm': 'quantization',
'activations': {
'mode': mode,
'per_channel': per_channel,
},
'weights': {
'mode': mode,
'per_channel': per_channel,
}
}
config['compression'] = compression_section
config['target_device'] = 'TRIAL'
return config
class Case:
def __init__(self,
model_name: str,
model_builder,
input_sample_sizes: List[int],
mode: str,
granularity: str,
expected: QuantizationStatistics):
self._model_name = model_name
self._model_builder = model_builder
self._input_sample_sizes = input_sample_sizes
self._mode = mode
self._granularity = granularity
self._expected = expected
@property
def model(self):
return self._model_builder(input_shape=tuple(self._input_sample_sizes[1:]))
@property
def config(self):
return _get_basic_quantization_config(self._mode, self._granularity, self._input_sample_sizes)
@property
def expected(self):
return self._expected
def get_id(self) -> str:
return f'{self._model_name}-{self._mode}-{self._granularity}'
TEST_CASES = [
Case(
model_name='mobilenet_v2',
model_builder=test_models.MobileNetV2,
input_sample_sizes=[1, 96, 96, 3],
mode='symmetric',
granularity='per_tensor',
expected=QuantizationStatistics(
wq_counter=QuantizersCounter(53, 0, 53, 0, 53, 0, 53),
aq_counter=QuantizersCounter(64, 0, 64, 0, 64, 0, 64),
num_wq_per_bitwidth={8: 53},
num_aq_per_bitwidth={8: 64},
ratio_of_enabled_quantizations=100.0
)
),
Case(
model_name='mobilenet_v2',
model_builder=test_models.MobileNetV2,
input_sample_sizes=[1, 96, 96, 3],
mode='asymmetric',
granularity='per_channel',
expected=QuantizationStatistics(
wq_counter=QuantizersCounter(0, 53, 53, 0, 0, 53, 53),
aq_counter=QuantizersCounter(0, 64, 64, 0, 0, 64, 64),
num_wq_per_bitwidth={8: 53},
num_aq_per_bitwidth={8: 64},
ratio_of_enabled_quantizations=100.0
)
),
]
TEST_CASES_IDS = [test_case.get_id() for test_case in TEST_CASES]
@pytest.mark.parametrize('test_case', TEST_CASES, ids=TEST_CASES_IDS)
def test_quantization_statistics(test_case):
_, compression_ctrl = create_compressed_model_and_algo_for_test(test_case.model,
test_case.config,
force_no_init=True)
actual = compression_ctrl.statistics().quantization
expected = test_case.expected
assert expected.wq_counter.__dict__ == actual.wq_counter.__dict__
assert expected.aq_counter.__dict__ == actual.aq_counter.__dict__
assert expected.num_wq_per_bitwidth == actual.num_wq_per_bitwidth
assert expected.num_aq_per_bitwidth == actual.num_aq_per_bitwidth
assert expected.ratio_of_enabled_quantizations == actual.ratio_of_enabled_quantizations
| 35.944 | 102 | 0.655909 |
1efec286341ae44e0c97fcb8157cdc7319760c0a | 23,090 | py | Python | panoptic_mapping_utils/src/flat_dataset/flat_semantic_labelling.py | YuePanEdward/panoptic_mapping | 85212ed5e98b6552a75363117f7676a7676ac27f | [
"BSD-3-Clause"
] | 101 | 2021-10-19T13:05:32.000Z | 2022-03-31T13:04:07.000Z | panoptic_mapping_utils/src/flat_dataset/flat_semantic_labelling.py | YuePanEdward/panoptic_mapping | 85212ed5e98b6552a75363117f7676a7676ac27f | [
"BSD-3-Clause"
] | 4 | 2021-12-13T14:40:41.000Z | 2022-03-25T17:16:28.000Z | panoptic_mapping_utils/src/flat_dataset/flat_semantic_labelling.py | YuePanEdward/panoptic_mapping | 85212ed5e98b6552a75363117f7676a7676ac27f | [
"BSD-3-Clause"
] | 16 | 2021-12-21T10:36:34.000Z | 2022-03-22T05:40:00.000Z | #!/usr/bin/env python3
import csv
import airsim
import numpy as np
import imageio
def get_ir_ids(ir_correction_file):
# Read infrared label mapping (because airsims infrared values are screwed)
mesh_ids = []
ir_ids = [] # These match in count and order.
with open(ir_correction_file) as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
last_id = -1
for row in reader:
if row[0] != 'MeshID':
if int(row[1]) > last_id:
last_id = int(row[1])
mesh_ids.append(int(row[0]))
ir_ids.append(last_id)
return mesh_ids, ir_ids
def create_label_ids_flat(ir_correction_file):
"""
Create the class and instance labels for the objects in the flat dataset.
"""
# labels for the flat test dataset
labels = [
] # {InstanceID, ClassID, PanopticID, MeshID, InfraredID, Name, Size}
mesh_ids, ir_ids = get_ir_ids(ir_correction_file)
# NOTE: These are lists so they are mutable in set label.
id_counter = [0]
class_counter = [0]
panotpic_id = 0
letters = [
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
"o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"
]
def set_label(name, size="M", increment_class=True, count=1, new_id=True):
for i in range(count):
if i >= len(letters):
print("Warning: can only write %i (request is %i) suffixes "
"using 'letters'." % (len(letters), i))
full_name = name + "_" + letters[i]
label = {}
# Instance ID
label_id = id_counter[0]
if new_id:
id_counter[0] = id_counter[0] + 1
label["InstanceID"] = label_id
label["Name"] = full_name
label["Size"] = size
# IR and mesh
if label_id < len(mesh_ids):
label["MeshID"] = mesh_ids[label_id]
label["InfraredID"] = ir_ids[label_id]
else:
print("Warning: id '%i' is larger than the maximum supported "
"id count of %i." % (label_id, len(mesh_ids)))
# Class and Panoptic
label["ClassID"] = class_counter[0]
if increment_class:
class_counter[0] = class_counter[0] + 1
label["PanopticID"] = panotpic_id
increment_class = False # for multiple count
labels.append(label)
# Background classes
panotpic_id = 0
set_label("SM_Ceiling")
set_label("SM_Floor")
set_label("SM_Walls")
set_label("SM_Windows_glass")
set_label("SM_Windows_Windows")
set_label("SM_TV_Wall")
# Instances
panotpic_id = 1
set_label("SM_Bed", "L", count=2)
set_label("SM_Bed_lamp", count=2)
set_label("SM_Bed_table", count=2)
set_label("SM_Ceiling_lamp", count=12)
set_label("SM_Chair", count=2)
set_label("SM_Office_Chair_base", increment_class=False, new_id=False)
set_label("SM_Office_Chair_seat", increment_class=False)
set_label("SM_Coffee_table")
set_label("SM_Cup", "S", count=3)
set_label("SM_Decor", "S", count=2)
set_label("SM_Digital_Clock")
set_label("SM_Dimmer", count=2)
set_label("SM_Door", count=2)
set_label("SM_Floor_Lamp")
set_label("SM_Journal", "S", count=3)
set_label("SM_Kitchen", "L")
set_label("SM_Picture", count=7)
set_label("SM_Plant", "S")
set_label("SM_Plate", "S", count=3)
set_label("SM_Remote", "S")
set_label("SM_Sofa", "L")
set_label("SM_Stack_of_Books", "S", count=6)
set_label("SM_Table", count=2)
set_label("SM_Table_Decor", "S")
set_label("SM_Tumblr", "S", count=2)
set_label("SM_TV")
set_label("SM_Wall_Clock")
set_label("SM_Coffee_Machine", "S")
id_counter[0] = id_counter[0] + 1
set_label("SM_Laptop",
size="S",
count=2,
increment_class=False,
new_id=False)
print("Created a labeling with %i instances and %i classes." %
(id_counter[0], class_counter[0]))
return labels
class Labler(object):
def __init__(self, ir_correction_file):
self.panoptic_id = 0
self.instance_counter = 0
self.mesh_ids, self.ir_ids = get_ir_ids(ir_correction_file)
self.labels = [
] # {InstanceID, ClassID, PanopticID, MeshID, InfraredID, Name}
self.meshes_to_label = {} # {mesh_name: id}
self.class_labels = {} # {class_name: [id, instance counter]}
self.class_counter = 0
def set_panoptic_label(self, label):
self.panoptic_id = label # 0 = background, 1 = instance
def get_labels(self):
return self.labels
def get_meshes_to_label(self):
return self.meshes_to_label
def add_instance(self, mesh_names, class_name):
""" mesh names: list of all meshnames to include,
class_name: string of class name """
# Instance
label_id = self.instance_counter
self.instance_counter = self.instance_counter + 1
label = {"InstanceID": label_id}
# Class and Name
if class_name in self.class_labels:
self.class_labels[class_name][
1] = self.class_labels[class_name][1] + 1
else:
self.class_labels[class_name] = [self.class_counter, 0]
self.class_counter = self.class_counter + 1
label["Name"] = class_name + "_" + str(
self.class_labels[class_name][1])
label["ClassID"] = self.class_labels[class_name][0]
# IR and mesh
if label_id < len(self.mesh_ids):
label["MeshID"] = self.mesh_ids[label_id]
label["InfraredID"] = self.ir_ids[label_id]
else:
print("Warning: id '%i' is larger than the maximum supported "
"id count of %i." % (label_id, len(self.mesh_ids)))
label["MeshID"] = -1
label["InfraredID"] = -1
# Panoptic
label["PanopticID"] = self.panoptic_id
# Write
self.labels.append(label)
for m in mesh_names:
self.meshes_to_label[m] = label_id
def create_label_ids_large_flat(ir_correction_file):
labler = Labler(ir_correction_file)
# Background
labler.add_instance([
"Walls_StaticMesh", "SM_frame_door09_25", "SM_frame_door10",
"SM_frame_door11", "SM_frame_door12", "SM_frame_door13",
"SM_frame_door14", "SM_frame_door15", "SM_frame_door16",
"SM_frame_door17"
], "wall")
labler.add_instance(["Floor_StaticMesh"], "floor")
labler.add_instance(["Ceiling_StaticMesh"], "ceiling")
labler.add_instance(["pr_background_mountain_a_4_summer_16", "Ground_195"],
"landscape")
labler.add_instance([
"SM_window07_frame_14", "SM_window07_leaf01_22",
"SM_window26_leaf01_8", "SM_window26_frame2_134", "SM_window26_frame3"
] + ["SM_window07_frame%i" % i for i in range(2, 8)] +
["SM_window07_leaf%i" % i for i in range(2, 15)] +
["SM_window26_leaf%i" % i
for i in range(2, 5)], "window")
# Instances
labler.set_panoptic_label(1)
# Kitchen
labler.add_instance([
"SM_Kitchen_KNOXHULT_Exhaust_Hood3",
"SM_Kitchen_KNOXHULT_FloorCabinet_Boxes3",
"SM_Kitchen_KNOXHULT_FloorCabinet_Doors3",
"SM_Kitchen_KNOXHULT_FloorCabinet_TableTop3",
"SM_Kitchen_KNOXHULT_Lamps3", "SM_Kitchen_KNOXHULT_Sink3",
"SM_Kitchen_KNOXHULT_Tile_4", "SM_Kitchen_KNOXHULT_WallCabinet_Boxes3",
"SM_Kitchen_KNOXHULT_WallCabinet_Doors3"
], "kitchen")
labler.add_instance(["SM_Refregerator_01_140"], "refregerator")
labler.add_instance(
["SM_Stand3", "SM_Spice_jar11", "SM_Spice_jar12", "SM_Spice_jar13"],
"spices")
for i in range(11, 14):
labler.add_instance(["SM_Jar_%i" % i], "jar")
for i in range(14, 17):
labler.add_instance(["SM_Box_%i" % i], "box")
labler.add_instance(["SM_Plate_%i" % i for i in range(17, 21)], "plates")
labler.add_instance(["SM_Tool_3"], "tool")
labler.add_instance(["SM_Tools3"], "tool")
labler.add_instance(["SM_Stand_3"], "stand")
labler.add_instance(["SM_Cuttingboard_3"], "cuttingboard")
labler.add_instance(["SM_Cuttingboard_4"], "cuttingboard")
labler.add_instance(["SM_Cloth_3"], "cloth")
labler.add_instance(["SM_Knives_Stand_Hivla_3"], "knives")
labler.add_instance(["SM_Bowl_%i" % i for i in range(13, 17)], "bowls")
labler.add_instance(["SM_Jar3", "SM_Spice_jar14", "SM_Spice_jar15"],
"spices")
labler.add_instance(["SM_Plate_%i" % i for i in range(21, 25)], "plates")
labler.add_instance(["SM_Cup_3"], "cup")
labler.add_instance(["SM_Yukke_Watch3"], "clock")
# Dining Room
labler.add_instance(["SM_Table_Lisabo_01_26"], "table")
for s in ["_8", "2_11", "4_17", "5_20", "6_23", 7]:
labler.add_instance(["SM_Table_chair%s" % s], "chair")
labler.add_instance(["SM_Light_Hektar_3"], "lamp")
labler.add_instance(["SM_Light_Hektar_4"], "lamp")
labler.add_instance(["SM_Cup_01_149"], "cup")
labler.add_instance(["SM_Cup_02_143"], "cup")
labler.add_instance(["ChineseFoodBox_A_141"], "food")
labler.add_instance(["SM_Stack_of_Books_260"], "books")
labler.add_instance(["SM_Cup_146"], "cup")
# Living Room
labler.add_instance(["SM_Carpet_5"], "carpet")
labler.add_instance([
"SM_Sofa_01_Fold_32", "SM_Pillow_Sofa_03_38", "SM_Pillow_Sofa_4",
"SM_Pillow_Sofa_4"
], "sofa")
labler.add_instance(["SM_Pillow_03_43"], "pillow")
labler.add_instance(["SM_Pillow_04_49"], "pillow")
labler.add_instance(["Armchair_dense_27"], "chair")
labler.add_instance(["EdithLivingRoomTable_18"], "table")
labler.add_instance(["SM_Remote_110"], "remote")
labler.add_instance(["SM_Table_Decor_21"], "decor")
labler.add_instance(["SM_Decor_24"], "decor")
labler.add_instance(["SM_Picture_68"], "picture")
labler.add_instance(["SM_Picture_71"], "picture")
labler.add_instance(["SM_Wall_TV_2"], "tv")
labler.add_instance([
"SM_TVSet_SM_MailUnitBdvE4102", "SM_TVSet_SM_SurroundSpeakers5",
"SM_TVSet_SM_SurroundSpeakers6", "SM_TVSet_SM_centerpeaker2"
], "soundsystem")
labler.add_instance([
"SM_RackKallax_2x5", "SM_RackKallax_2x6", "SM_RackKallax_2x7",
"SM_BoxDoorKallax_12", "SM_BoxDoorKallax_14", "SM_BoxDoorKallax_23",
"SM_BoxDoorKallax_24", "SM_BoxTiena_5", "SM_BoxDrena_9",
"SM_BoxDrena_8", "SM_Box9", "SM_Box8_151"
], "shelf")
# Entry Area
labler.add_instance(["SM_MainDoor_30"], "door")
labler.add_instance(["SM_Rack_4x2_92"], "shelf")
labler.add_instance(["SM_Stack_of_Books_101"], "books")
labler.add_instance(["SM_Stack_of_Books_98"], "books")
labler.add_instance(["SM_Stack_of_Books_95"], "books")
labler.add_instance(["SM_Plant_107"], "plant")
labler.add_instance(["SM_Decor_104"], "decor")
labler.add_instance(["Shoe_shelf_Hemnes3", "SM_Coat_hanger_2"] +
["SM_Hanger_%i" % i for i in range(23, 27)],
"wardrobe")
labler.add_instance(["SM_Picture_140x100_74"], "picture")
# Toilet
labler.add_instance(
["SM_Toilet_02_229", "SM_ToiletPaper_02_235", "SM_Toiletbrush_03_232"],
"toilet")
labler.add_instance(["SM_Washbasin_2", "SM_BathroomSinkMixer_5"], "basin")
labler.add_instance(["SM_Mirror_06_8"], "mirror")
# Washing Room
labler.add_instance(["SM_WashingMachine_38"], "washingmachine")
labler.add_instance(["DawnDresser_a_161"], "dresser")
labler.add_instance(
["SM_Door_80_7", "SM_handleMetal11", "SM_handleMetal12"], "door")
labler.add_instance(
["SM_Door_80_8", "SM_handleMetal13", "SM_handleMetal14"], "door")
labler.add_instance(["Fiddle_a_144"], "fiddle")
labler.add_instance(["WateringCan_152"], "wateringcan")
# Floor
labler.add_instance(["DresserWood_b_123"], "dresser")
labler.add_instance(["DawnDresser_a2_167"], "dresser")
labler.add_instance(["EdithPhoto_a_170"], "picture")
labler.add_instance(["FloorBottle_c_179"], "decor")
# Bath
labler.add_instance(["SM_Toilet_03_68"], "toilet")
labler.add_instance(["SM_Washbasin_03_188", "SM_BathroomSinkMixer_02_105"],
"basin")
labler.add_instance(["SM_Mirror_01_76"], "mirror")
labler.add_instance([
"SM_Cosmetics_4_01_120", "SM_Cosmetics_04_01_111",
"SM_Cosmetics_3_01_123"
], "cosmetics")
labler.add_instance([
"SM_ToothbrushHolder_02_126", "SM_Cosmetics_04_02_114",
"SM_Cosmetics_1_01_117"
], "cosmetics")
labler.add_instance(["SM_RecycleBin_01_108"], "bin")
labler.add_instance(["SM_Branas_102"], "basket")
labler.add_instance(["SM_BathCarpet_01_27"], "carpet")
labler.add_instance(["SM_Bath_01_71", "SM_Shower_01_SM_Shower_01_79"],
"bath")
labler.add_instance(["SM_Branas_01_98"], "basket")
labler.add_instance(["SM_WashingMachine_01_92"], "washingmachine")
# Bedroom
labler.add_instance(["SM_P1sh12_%s" % i for i in [158, 174, 181, 184]] +
["SM_Hanger_%i" % i for i in range(14, 23)], "shelf")
labler.add_instance(["SM_Laundry_Basket_01_187"], "basket")
labler.add_instance(["SM_Pillow_%i" % i for i in range(8, 12)], "pillows")
labler.add_instance(["SM_Carpet_11"], "carpet")
labler.add_instance(["SM_Bed_03_59"], "bed")
labler.add_instance(["SM_Bed_table_62"], "table")
labler.add_instance(["SM_Journal_263"], "books")
labler.add_instance(["SM_WallLamp_01_58"], "lamp")
labler.add_instance(["SM_Nightstand_01_197"], "nightstand")
labler.add_instance(["SM_TableLamp_02_61"], "lamp")
labler.add_instance(["SM_Digital_Clock_266"], "clock")
labler.add_instance(["SM_Picture_194"], "picture")
labler.add_instance([
"SM_RackKallax_4x5_189", "SM_BoxDoorKallax_25", "SM_BoxFiella_6",
"SM_BoxDoorKallax_26"
], "shelf")
labler.add_instance(["SM_GrassBox_7", "SM_GrassBox_8"], "plant")
labler.add_instance(["SM_Curtain_3"], "curtain")
labler.add_instance(["SM_Curtain_02_44"], "curtain")
# Guest Room
labler.add_instance(["EdithRoomBed_a_129"], "bed")
labler.add_instance(["SM_Nightstand_220"], "nightstand")
labler.add_instance(["SM_Picture_120x120_138"], "picture")
labler.add_instance(["SM_Sofa_3"], "chair")
labler.add_instance(
["SM_Notebook_01_keyboard_223", "SM_Notebook_01_top_226"], "pc")
labler.add_instance(
["SM_Door_80_6", "SM_handleMetal10", "SM_handleMetal9"], "door")
labler.add_instance(["SM_Picture_140x101_117"], "picture")
labler.add_instance(["SM_Metal_Rack_2"], "shelf")
labler.add_instance(["SM_BoxFiella_7"], "box")
labler.add_instance(["SM_BoxTiena_7", "SM_BoxTiena_7"], "box")
labler.add_instance([
"SM_Pen_Stand_Tiera_01_v4", "SM_Marker_01_SM_Marker_01_Cap5",
"SM_Marker_01_SM_Marker_01_Cap5", "SM_Marker_01_SM_Marker_9",
"SM_Marker_01_SM_Marker_01_Cap6", "SM_Marker_01_SM_Marker_7",
"SM_Marker_01_SM_Marker_01_Cap4"
], "pens")
labler.add_instance(["SM_Pen_Stand_Tiera_01_v5"], "pens")
labler.add_instance(["SM_GrassBox_9"], "plant")
# Office
labler.add_instance(["SM_Door_80_5", "SM_handleMetal8", "SM_handleMetal7"],
"door")
labler.add_instance(["SM_Picture_01_49"], "picture")
labler.add_instance(["SM_Picture_62"], "picture")
labler.add_instance(["SM_Table_3"], "table")
labler.add_instance(["SM_Office_Chair_43"], "chair")
labler.add_instance(["SM_Keyboard_01_213"], "keyboard")
labler.add_instance(["SM_Printer_01_210"], "pc")
labler.add_instance(["SM_Monitor_02_40"], "monitor")
labler.add_instance(["SM_Floor_Lamp_207"], "lamp")
labler.add_instance(["SM_Table_02_200"], "table")
labler.add_instance(["SM_Office_Chair_01_46"], "chair")
labler.add_instance(["SM_Monitor_01_37"], "monitor")
labler.add_instance(["SM_Keyboard_2"], "keyboard")
labler.add_instance(["SM_TableLamp_01_52"], "lamp")
# Lamps
for i in range(3, 16):
labler.add_instance(["SM_Light_ALENG%i" % i], "lamp")
for s in ["4_9", 5, 6, 7, 8]:
labler.add_instance(["SM_Light_MEZOSPHERE%s" % s], "lamp")
print("Created a labeling with %i instances and %i classes." %
(len(labler.get_labels()), labler.class_counter))
return labler.get_labels(), labler.get_meshes_to_label()
def apply_labels_flat(labels):
"""
Set the segmentation id for all object in unreal based on the info in
'mesh_labels'
"""
client = airsim.MultirotorClient()
success = 0
client.confirmConnection()
# Reset the labels of everything to invisible.
client.simSetSegmentationObjectID(r"[\w]*", -1, True)
for label in labels:
key = label["Name"]
if client.simSetSegmentationObjectID(key, label["MeshID"], False):
success = success + 1
print("Successfully set label for '%s'." % key)
else:
print("Failed to set label for '%s'." % key)
print("Applied %i/%i labels." % (success, len(labels)))
def apply_labels_large_flat(meshes_to_label):
"""
Set the segmentation id for all object in unreal based on the info in
'mesh_labels'
"""
client = airsim.MultirotorClient()
success = 0
client.confirmConnection()
# Reset the labels of everything to invisible.
client.simSetSegmentationObjectID(r"[\w]*", -1, True)
for mesh_name in meshes_to_label:
mesh_id = meshes_to_label[mesh_name]
if client.simSetSegmentationObjectID(mesh_name, mesh_id, False):
success = success + 1
print("Successfully set label for '%s'." % mesh_name)
else:
print("Failed to set label for '%s'." % mesh_name)
print("Applied %i/%i labels." % (success, len(meshes_to_label)))
def get_available_meshes(comparison_labels=None):
"""
Print the names of all meshes in the unreal world. If comparison_labels is
set print how many of these are uniquely
matched with the existing names.
"""
client = airsim.MultirotorClient()
client.confirmConnection()
names = client.simListSceneObjects(r"[\w]*")
names = [str(name) for name in names]
counts = []
print("Available mesh names: ")
for name in sorted(names):
print(str(name))
if comparison_labels is not None:
for label in comparison_labels:
matches = [name == label["Name"] for name in names]
counts.append(np.sum(np.array(matches)))
# TODO(schmluk): These last parts are not cleaned up, change these if
# the function is needed.
print("Comparison Label names found in the scene: ", counts)
print("Unique labels matched: %.1f percent" %
(np.mean(np.array(counts) == 1) * 100))
def get_infrared_correction(target_file):
"""
Compute the transfer dynamics from segmentation id in unreal and the value
in the infrared image and save it to file.
"""
client = airsim.MultirotorClient()
client.confirmConnection()
counter = 0
print("Computing infrared corrections ...")
with open(target_file, 'w') as csvfile:
writer = csv.writer(csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["MeshID", "InfraRedID"])
for i in range(256):
client.simSetSegmentationObjectID(r"[\w]*", i, True)
responses = client.simGetImages([
airsim.ImageRequest("Id_cam", airsim.ImageType.Infrared, False,
False)
])
response = responses[0]
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)
writer.writerow([i, img1d[0]])
if i * 100 / 256 > counter:
counter = counter + 5
print("%i percent done ..." % counter)
print("Saved infrared corrections in '%s'." % target_file)
def export_labels(labels, out_file_name):
"""
Save label data to file.
"""
color_palette = imageio.imread(
"/home/lukas/programs/AirSim/Unreal/Plugins/AirSim"
"/Content/HUDAssets/seg_color_pallet.png")
with open(out_file_name, 'w') as csvfile:
writer = csv.writer(csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
writer.writerow([
"InstanceID", "ClassID", "PanopticID", "MeshID", "InfraredID", "R",
"G", "B", "Name", "Size"
])
previous_id = None
for label in labels:
if label["InstanceID"] != previous_id:
previous_id = label["InstanceID"]
writer.writerow([
label["InstanceID"], label["ClassID"], label["PanopticID"],
label["MeshID"], label["InfraredID"],
color_palette[0, label["MeshID"] * 4,
0], color_palette[0, label["MeshID"] * 4, 1],
color_palette[0, label["MeshID"] * 4,
2], label["Name"], label["Size"]
])
print("Saved %i labels in '%s'." % (len(labels), out_file_name))
def main_flat(get_ir_corrections, apply_mesh_labels, export_mesh_labels):
ir_file = "/home/lukas/Documents/Datasets/flat_dataset/" \
"infrared_corrections.csv"
label_file = "/home/lukas/Documents/Datasets/flat_dataset/labels.csv"
if get_ir_corrections:
get_infrared_correction(ir_file)
f_labels = create_label_ids_flat(ir_file)
if apply_mesh_labels:
apply_labels_flat(f_labels)
if export_mesh_labels:
export_labels(f_labels, label_file)
def main_large_flat(get_ir_corrections, apply_mesh_labels, export_mesh_labels):
ir_file = "/home/lukas/Documents/Datasets/large_flat_dataset/" \
"infrared_corrections.csv"
label_file = "/home/lukas/Documents/Datasets/large_flat_dataset/labels.csv"
if get_ir_corrections:
get_infrared_correction(ir_file)
f_labels, meshes = create_label_ids_large_flat(ir_file)
if apply_mesh_labels:
apply_labels_large_flat(meshes)
if export_mesh_labels:
export_labels(f_labels, label_file)
if __name__ == "__main__":
# Args.
get_ir_corrections = False
apply_mesh_labels = True
export_mesh_labels = True
# Run
main_flat(get_ir_corrections, apply_mesh_labels, export_mesh_labels)
# main_large_flat(get_ir_corrections, apply_mesh_labels, export_mesh_labels)
# Tools.
# get_available_meshes(f_labels)
| 40.156522 | 80 | 0.632655 |
b29305712fc2dd320ca526395c92ecac7de67926 | 1,527 | py | Python | static_res/info/prepare_resources.py | rrozek/common-objects | 614d25b4283485ebdedd512d7a0985ab9611a8a7 | [
"MIT"
] | null | null | null | static_res/info/prepare_resources.py | rrozek/common-objects | 614d25b4283485ebdedd512d7a0985ab9611a8a7 | [
"MIT"
] | null | null | null | static_res/info/prepare_resources.py | rrozek/common-objects | 614d25b4283485ebdedd512d7a0985ab9611a8a7 | [
"MIT"
] | null | null | null |
import glob
import sys
import hashlib
import os
import json
root_dir = os.path.abspath(sys.argv[1])
work_dir = os.path.join(root_dir, "res/")
result_dir = os.path.join(root_dir, "info")
print ("root: " + root_dir)
print ("work: " + work_dir)
print ("result: " + result_dir)
def hash_bytestr_iter(bytesiter, hasher, ashexstr=False):
for block in bytesiter:
hasher.update(block)
return hasher.hexdigest() if ashexstr else hasher.digest()
def file_as_blockiter(afile, blocksize=65536):
with afile:
block = afile.read(blocksize)
while len(block) > 0:
yield block
block = afile.read(blocksize)
resources_file = {}
resources_array = []
for filename in glob.iglob(work_dir + '**/*', recursive=True):
if os.path.isfile(filename):
hexSha256Half = hash_bytestr_iter(file_as_blockiter(open(os.path.abspath(filename), 'rb')), hashlib.sha256(), True)[:16]
item = {}
item['id'] = os.path.relpath(filename, root_dir).replace(os.sep, '_').split('.')[0]
item['path'] = os.path.relpath(filename, root_dir)
item['checksum']=hexSha256Half
resources_array.append(item)
print(item)
resources_file['data'] = resources_array
hasher = hashlib.sha256()
hasher.update(json.dumps(resources_array, sort_keys=True).encode('utf-8'))
resources_file['checksum'] = hasher.hexdigest()[:16]
with open(os.path.join(result_dir, "description.json"), 'wt') as destFile:
destFile.write(json.dumps(resources_file, sort_keys=True))
| 31.163265 | 128 | 0.680419 |
0bcd5ca2bcd13552fc7eaaefbae797adf5f0f1ea | 25,766 | py | Python | optuna/multi_objective/samplers/_motpe.py | srijan-deepsource/optuna | 2a83adf1e5104a4cde2f8f275788dc1aaf246097 | [
"MIT"
] | 1 | 2021-10-09T16:06:45.000Z | 2021-10-09T16:06:45.000Z | optuna/multi_objective/samplers/_motpe.py | srijan-deepsource/optuna | 2a83adf1e5104a4cde2f8f275788dc1aaf246097 | [
"MIT"
] | 2 | 2020-11-09T13:56:48.000Z | 2021-02-10T08:20:31.000Z | optuna/multi_objective/samplers/_motpe.py | crcrpar/optuna | d25c7ee8e103575207978ec09a14ad9a4fefa53d | [
"MIT"
] | 1 | 2020-12-25T03:27:49.000Z | 2020-12-25T03:27:49.000Z | import math
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import optuna
from optuna import distributions
from optuna import multi_objective
from optuna._deprecated import deprecated
from optuna.distributions import BaseDistribution
from optuna.multi_objective import _hypervolume
from optuna.multi_objective.samplers import BaseMultiObjectiveSampler
from optuna.multi_objective.samplers._random import RandomMultiObjectiveSampler
from optuna.samplers import TPESampler
from optuna.samplers._tpe.parzen_estimator import _ParzenEstimator
from optuna.samplers._tpe.parzen_estimator import _ParzenEstimatorParameters
from optuna.study import StudyDirection
EPS = 1e-12
_SPLITCACHE_KEY = "multi_objective:motpe:splitcache"
_WEIGHTS_BELOW_KEY = "multi_objective:motpe:weights_below"
def default_gamma(x: int) -> int:
return int(np.floor(0.1 * x))
def _default_weights_above(x: int) -> np.ndarray:
return np.ones(x)
@deprecated("2.4.0", "4.0.0")
class MOTPEMultiObjectiveSampler(TPESampler, BaseMultiObjectiveSampler):
"""Multi-objective sampler using the MOTPE algorithm.
This sampler is a multiobjective version of :class:`~optuna.samplers.TPESampler`.
For further information about MOTPE algorithm, please refer to the following paper:
- `Multiobjective tree-structured parzen estimator for computationally expensive optimization
problems <https://dl.acm.org/doi/abs/10.1145/3377930.3389817>`_
Args:
consider_prior:
Enhance the stability of Parzen estimator by imposing a Gaussian prior when
:obj:`True`. The prior is only effective if the sampling distribution is
either :class:`~optuna.distributions.UniformDistribution`,
:class:`~optuna.distributions.DiscreteUniformDistribution`,
:class:`~optuna.distributions.LogUniformDistribution`,
:class:`~optuna.distributions.IntUniformDistribution`,
or :class:`~optuna.distributions.IntLogUniformDistribution`.
prior_weight:
The weight of the prior. This argument is used in
:class:`~optuna.distributions.UniformDistribution`,
:class:`~optuna.distributions.DiscreteUniformDistribution`,
:class:`~optuna.distributions.LogUniformDistribution`,
:class:`~optuna.distributions.IntUniformDistribution`,
:class:`~optuna.distributions.IntLogUniformDistribution`, and
:class:`~optuna.distributions.CategoricalDistribution`.
consider_magic_clip:
Enable a heuristic to limit the smallest variances of Gaussians used in
the Parzen estimator.
consider_endpoints:
Take endpoints of domains into account when calculating variances of Gaussians
in Parzen estimator. See the original paper for details on the heuristics
to calculate the variances.
n_startup_trials:
The random sampling is used instead of the MOTPE algorithm until the given number
of trials finish in the same study. 11 * number of variables - 1 is recommended in the
original paper.
n_ehvi_candidates:
Number of candidate samples used to calculate the expected hypervolume improvement.
gamma:
A function that takes the number of finished trials and returns the number of trials to
form a density function for samples with low grains. See the original paper for more
details.
weights_above:
A function that takes the number of finished trials and returns a weight for them. As
default, weights are automatically calculated by the MOTPE's default strategy.
seed:
Seed for random number generator.
.. note::
Initialization with Latin hypercube sampling may improve optimization performance.
However, the current implementation only supports initialization with random sampling.
Example:
.. testcode::
import optuna
seed = 128
num_variables = 9
n_startup_trials = 11 * num_variables - 1
def objective(trial):
x = []
for i in range(1, num_variables + 1):
x.append(trial.suggest_float(f"x{i}", 0.0, 2.0 * i))
return x
sampler = optuna.multi_objective.samplers.MOTPEMultiObjectiveSampler(
n_startup_trials=n_startup_trials, n_ehvi_candidates=24, seed=seed
)
study = optuna.multi_objective.create_study(
["minimize"] * num_variables, sampler=sampler
)
study.optimize(objective, n_trials=250)
"""
def __init__(
self,
consider_prior: bool = True,
prior_weight: float = 1.0,
consider_magic_clip: bool = True,
consider_endpoints: bool = True,
n_startup_trials: int = 10,
n_ehvi_candidates: int = 24,
gamma: Callable[[int], int] = default_gamma,
weights_above: Callable[[int], np.ndarray] = _default_weights_above,
seed: Optional[int] = None,
) -> None:
super().__init__(
consider_prior=consider_prior,
prior_weight=prior_weight,
consider_magic_clip=consider_magic_clip,
consider_endpoints=consider_endpoints,
n_startup_trials=n_startup_trials,
n_ei_candidates=n_ehvi_candidates,
gamma=gamma,
weights=weights_above,
seed=seed,
)
self._n_ehvi_candidates = n_ehvi_candidates
self._mo_random_sampler = RandomMultiObjectiveSampler(seed=seed)
def reseed_rng(self) -> None:
self._rng = np.random.RandomState()
self._mo_random_sampler.reseed_rng()
def infer_relative_search_space(
self,
study: Union[optuna.study.Study, "multi_objective.study.MultiObjectiveStudy"],
trial: Union[optuna.trial.FrozenTrial, "multi_objective.trial.FrozenMultiObjectiveTrial"],
) -> Dict[str, BaseDistribution]:
return {}
def sample_relative(
self,
study: Union[optuna.study.Study, "multi_objective.study.MultiObjectiveStudy"],
trial: Union[optuna.trial.FrozenTrial, "multi_objective.trial.FrozenMultiObjectiveTrial"],
search_space: Dict[str, BaseDistribution],
) -> Dict[str, Any]:
return {}
def sample_independent(
self,
study: Union[optuna.study.Study, "multi_objective.study.MultiObjectiveStudy"],
trial: Union[optuna.trial.FrozenTrial, "multi_objective.trial.FrozenMultiObjectiveTrial"],
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
assert isinstance(study, multi_objective.study.MultiObjectiveStudy)
assert isinstance(trial, multi_objective.trial.FrozenMultiObjectiveTrial)
if len(study.directions) < 2:
raise ValueError(
"Number of objectives must be >= 2. "
"Please use optuna.samplers.TPESampler for single-objective optimization."
) from None
values, scores = _get_observation_pairs(study, param_name)
n = len(values)
if n < self._n_startup_trials:
return self._mo_random_sampler.sample_independent(
study, trial, param_name, param_distribution
)
below_param_values, above_param_values = self._split_mo_observation_pairs(
study, trial, values, scores
)
if isinstance(param_distribution, distributions.UniformDistribution):
return self._sample_mo_uniform(
study, trial, param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.LogUniformDistribution):
return self._sample_mo_loguniform(
study, trial, param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):
return self._sample_mo_discrete_uniform(
study, trial, param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.IntUniformDistribution):
return self._sample_mo_int(
study, trial, param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.IntLogUniformDistribution):
return self._sample_mo_int_loguniform(
study, trial, param_distribution, below_param_values, above_param_values
)
elif isinstance(param_distribution, distributions.CategoricalDistribution):
index = self._sample_mo_categorical_index(
study, trial, param_distribution, below_param_values, above_param_values
)
return param_distribution.choices[index]
else:
distribution_list = [
distributions.UniformDistribution.__name__,
distributions.LogUniformDistribution.__name__,
distributions.DiscreteUniformDistribution.__name__,
distributions.IntUniformDistribution.__name__,
distributions.IntLogUniformDistribution.__name__,
distributions.CategoricalDistribution.__name__,
]
raise NotImplementedError(
"The distribution {} is not implemented. "
"The parameter distribution should be one of the {}".format(
param_distribution, distribution_list
)
)
def _split_mo_observation_pairs(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
config_vals: List[Optional[float]],
loss_vals: List[List[float]],
) -> Tuple[np.ndarray, np.ndarray]:
"""Split observations into observations for l(x) and g(x) with the ratio of gamma:1-gamma.
Weights for l(x) are also calculated in this method.
This splitting strategy consists of the following two steps:
1. Nondonation rank-based selection
2. Hypervolume subset selection problem (HSSP)-based selection
Please refer to the `original paper <https://dl.acm.org/doi/abs/10.1145/3377930.3389817>`_
for more details.
"""
cvals = np.asarray(config_vals)
lvals = np.asarray(loss_vals)
# Solving HSSP for variables number of times is a waste of time.
# We cache the result of splitting.
if _SPLITCACHE_KEY in trial.system_attrs:
split_cache = trial.system_attrs[_SPLITCACHE_KEY]
indices_below = np.asarray(split_cache["indices_below"])
weights_below = np.asarray(split_cache["weights_below"])
indices_above = np.asarray(split_cache["indices_above"])
else:
nondomination_ranks = _calculate_nondomination_rank(lvals)
n_below = self._gamma(len(lvals))
assert 0 <= n_below <= len(lvals)
indices = np.array(range(len(lvals)))
indices_below = np.array([], dtype=int)
# Nondomination rank-based selection
i = 0
while len(indices_below) + sum(nondomination_ranks == i) <= n_below:
indices_below = np.append(indices_below, indices[nondomination_ranks == i])
i += 1
# Hypervolume subset selection problem (HSSP)-based selection
subset_size = n_below - len(indices_below)
if subset_size > 0:
rank_i_lvals = lvals[nondomination_ranks == i]
rank_i_indices = indices[nondomination_ranks == i]
worst_point = np.max(rank_i_lvals, axis=0)
reference_point = np.maximum(1.1 * worst_point, 0.9 * worst_point)
reference_point[reference_point == 0] = EPS
selected_indices = self._solve_hssp(
rank_i_lvals, rank_i_indices, subset_size, reference_point
)
indices_below = np.append(indices_below, selected_indices)
assert len(indices_below) == n_below
indices_above = np.setdiff1d(indices, indices_below)
attrs = {
"indices_below": indices_below.tolist(),
"indices_above": indices_above.tolist(),
}
weights_below = self._calculate_weights_below(lvals, indices_below)
attrs["weights_below"] = weights_below.tolist()
study._storage.set_trial_system_attr(trial._trial_id, _SPLITCACHE_KEY, attrs)
below = cvals[indices_below]
study._storage.set_trial_system_attr(
trial._trial_id,
_WEIGHTS_BELOW_KEY,
[w for w, v in zip(weights_below, below) if v is not None],
)
below = np.asarray([v for v in below if v is not None], dtype=float)
above = cvals[indices_above]
above = np.asarray([v for v in above if v is not None], dtype=float)
return below, above
def _sample_mo_uniform(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
distribution: distributions.UniformDistribution,
below: np.ndarray,
above: np.ndarray,
) -> float:
low = distribution.low
high = distribution.high
return self._sample_mo_numerical(study, trial, low, high, below, above)
def _sample_mo_loguniform(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
distribution: distributions.LogUniformDistribution,
below: np.ndarray,
above: np.ndarray,
) -> float:
low = distribution.low
high = distribution.high
return self._sample_mo_numerical(study, trial, low, high, below, above, is_log=True)
def _sample_mo_discrete_uniform(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
distribution: distributions.DiscreteUniformDistribution,
below: np.ndarray,
above: np.ndarray,
) -> float:
q = distribution.q
r = distribution.high - distribution.low
# [low, high] is shifted to [0, r] to align sampled values at regular intervals.
low = 0 - 0.5 * q
high = r + 0.5 * q
# Shift below and above to [0, r]
above -= distribution.low
below -= distribution.low
best_sample = (
self._sample_mo_numerical(study, trial, low, high, below, above, q=q)
+ distribution.low
)
return min(max(best_sample, distribution.low), distribution.high)
def _sample_mo_int(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
distribution: distributions.IntUniformDistribution,
below: np.ndarray,
above: np.ndarray,
) -> int:
d = distributions.DiscreteUniformDistribution(
low=distribution.low, high=distribution.high, q=distribution.step
)
return int(self._sample_mo_discrete_uniform(study, trial, d, below, above))
def _sample_mo_int_loguniform(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
distribution: distributions.IntLogUniformDistribution,
below: np.ndarray,
above: np.ndarray,
) -> int:
low = distribution.low - 0.5
high = distribution.high + 0.5
sample = self._sample_mo_numerical(study, trial, low, high, below, above, is_log=True)
best_sample = np.round(sample)
return int(min(max(best_sample, distribution.low), distribution.high))
def _sample_mo_numerical(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
low: float,
high: float,
below: np.ndarray,
above: np.ndarray,
q: Optional[float] = None,
is_log: bool = False,
) -> float:
if is_log:
low = np.log(low)
high = np.log(high)
below = np.log(below)
above = np.log(above)
size = (self._n_ehvi_candidates,)
weights_below: Callable[[int], np.ndarray]
weights_below = lambda _: np.asarray( # NOQA
study._storage.get_trial(trial._trial_id).system_attrs[_WEIGHTS_BELOW_KEY],
dtype=float,
)
parzen_estimator_parameters_below = _ParzenEstimatorParameters(
self._parzen_estimator_parameters.consider_prior,
self._parzen_estimator_parameters.prior_weight,
self._parzen_estimator_parameters.consider_magic_clip,
self._parzen_estimator_parameters.consider_endpoints,
weights_below,
)
parzen_estimator_below = _ParzenEstimator(
mus=below, low=low, high=high, parameters=parzen_estimator_parameters_below
)
samples_below = self._sample_from_gmm(
parzen_estimator=parzen_estimator_below,
low=low,
high=high,
q=q,
size=size,
)
log_likelihoods_below = self._gmm_log_pdf(
samples=samples_below,
parzen_estimator=parzen_estimator_below,
low=low,
high=high,
q=q,
)
weights_above = self._weights
parzen_estimator_parameters_above = _ParzenEstimatorParameters(
self._parzen_estimator_parameters.consider_prior,
self._parzen_estimator_parameters.prior_weight,
self._parzen_estimator_parameters.consider_magic_clip,
self._parzen_estimator_parameters.consider_endpoints,
weights_above,
)
parzen_estimator_above = _ParzenEstimator(
mus=above, low=low, high=high, parameters=parzen_estimator_parameters_above
)
log_likelihoods_above = self._gmm_log_pdf(
samples=samples_below,
parzen_estimator=parzen_estimator_above,
low=low,
high=high,
q=q,
)
ret = float(
TPESampler._compare(
samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above
)[0]
)
return math.exp(ret) if is_log else ret
def _sample_mo_categorical_index(
self,
study: "multi_objective.study.MultiObjectiveStudy",
trial: "multi_objective.trial.FrozenMultiObjectiveTrial",
distribution: distributions.CategoricalDistribution,
below: np.ndarray,
above: np.ndarray,
) -> int:
choices = distribution.choices
below = list(map(int, below))
above = list(map(int, above))
upper = len(choices)
size = (self._n_ehvi_candidates,)
weights_below = study._storage.get_trial(trial._trial_id).system_attrs[_WEIGHTS_BELOW_KEY]
counts_below = np.bincount(below, minlength=upper, weights=weights_below)
weighted_below = counts_below + self._prior_weight
weighted_below /= weighted_below.sum()
samples_below = self._sample_from_categorical_dist(weighted_below, size)
log_likelihoods_below = TPESampler._categorical_log_pdf(samples_below, weighted_below)
weights_above = self._weights(len(above))
counts_above = np.bincount(above, minlength=upper, weights=weights_above)
weighted_above = counts_above + self._prior_weight
weighted_above /= weighted_above.sum()
log_likelihoods_above = TPESampler._categorical_log_pdf(samples_below, weighted_above)
return int(
TPESampler._compare(
samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above
)[0]
)
@staticmethod
def _compute_hypervolume(solution_set: np.ndarray, reference_point: np.ndarray) -> float:
return _hypervolume.WFG().compute(solution_set, reference_point)
def _solve_hssp(
self,
rank_i_loss_vals: np.ndarray,
rank_i_indices: np.ndarray,
subset_size: int,
reference_point: np.ndarray,
) -> np.ndarray:
"""Solve a hypervolume subset selection problem (HSSP) via a greedy algorithm.
This method is a 1-1/e approximation algorithm to solve HSSP.
For further information about algorithms to solve HSSP, please refer to the following
paper:
- `Greedy Hypervolume Subset Selection in Low Dimensions
<https://ieeexplore.ieee.org/document/7570501>`_
"""
selected_vecs = [] # type: List[np.ndarray]
selected_indices = [] # type: List[int]
contributions = [
self._compute_hypervolume(np.asarray([v]), reference_point) for v in rank_i_loss_vals
]
hv_selected = 0.0
while len(selected_indices) < subset_size:
max_index = np.argmax(contributions)
contributions[max_index] = -1 # mark as selected
selected_index = rank_i_indices[max_index]
selected_vec = rank_i_loss_vals[max_index]
for j, v in enumerate(rank_i_loss_vals):
if contributions[j] == -1:
continue
p = np.max([selected_vec, v], axis=0)
contributions[j] -= (
self._compute_hypervolume(np.asarray(selected_vecs + [p]), reference_point)
- hv_selected
)
selected_vecs += [selected_vec]
selected_indices += [selected_index]
hv_selected = self._compute_hypervolume(np.asarray(selected_vecs), reference_point)
return np.asarray(selected_indices, dtype=int)
def _calculate_weights_below(
self,
lvals: np.ndarray,
indices_below: np.ndarray,
) -> np.ndarray:
# Calculate weights based on hypervolume contributions.
n_below = len(indices_below)
if n_below == 0:
return np.asarray([])
elif n_below == 1:
return np.asarray([1.0])
else:
lvals_below = lvals[indices_below].tolist()
worst_point = np.max(lvals_below, axis=0)
reference_point = np.maximum(1.1 * worst_point, 0.9 * worst_point)
reference_point[reference_point == 0] = EPS
hv = self._compute_hypervolume(np.asarray(lvals_below), reference_point)
contributions = np.asarray(
[
hv
- self._compute_hypervolume(
np.asarray(lvals_below[:i] + lvals_below[i + 1 :]), reference_point
)
for i in range(len(lvals))
]
)
weights_below = np.clip(contributions / np.max(contributions), 0, 1)
return weights_below
def _calculate_nondomination_rank(loss_vals: np.ndarray) -> np.ndarray:
vecs = loss_vals.copy()
# Normalize values
lb = vecs.min(axis=0, keepdims=True)
ub = vecs.max(axis=0, keepdims=True)
vecs = (vecs - lb) / (ub - lb)
ranks = np.zeros(len(vecs))
num_unranked = len(vecs)
rank = 0
while num_unranked > 0:
extended = np.tile(vecs, (vecs.shape[0], 1, 1))
counts = np.sum(
np.logical_and(
np.all(extended <= np.swapaxes(extended, 0, 1), axis=2),
np.any(extended < np.swapaxes(extended, 0, 1), axis=2),
),
axis=1,
)
vecs[counts == 0] = 1.1 # mark as ranked
ranks[counts == 0] = rank
rank += 1
num_unranked -= np.sum(counts == 0)
return ranks
def _get_observation_pairs(
study: "multi_objective.study.MultiObjectiveStudy",
param_name: str,
) -> Tuple[List[Optional[float]], List[List[float]]]:
"""Get observation pairs from the study.
This function collects observation pairs from the complete trials of the study.
Pruning is currently not supported.
The values for trials that don't contain the parameter named ``param_name`` are set to None.
Objective values are negated if their directions are maximization and all objectives are
treated as minimization in the MOTPE algorithm.
"""
trials = [
multi_objective.trial.FrozenMultiObjectiveTrial(study.n_objectives, trial)
for trial in study._storage.get_all_trials(study._study_id, deepcopy=False)
]
values = []
scores = []
for trial in trials:
if trial.state != optuna.trial.TrialState.COMPLETE or None in trial.values:
continue
param_value = None # type: Optional[float]
if param_name in trial.params:
distribution = trial.distributions[param_name]
param_value = distribution.to_internal_repr(trial.params[param_name])
# Convert all objectives to minimization
score = [
cast(float, v) if d == StudyDirection.MINIMIZE else -cast(float, v)
for d, v in zip(study.directions, trial.values)
]
values.append(param_value)
scores.append(score)
return values, scores
| 40.259375 | 99 | 0.645308 |
9eaee0eb4acf1d9ea9dd71ca7263509237fbd3b9 | 3,042 | py | Python | tfcgp/ga.py | d9w/TFCGP | 641d968d6b4d46a91acb6a5d3e00d3aaa5bc227e | [
"Apache-2.0"
] | 3 | 2019-05-29T07:27:04.000Z | 2021-06-23T03:36:20.000Z | tfcgp/ga.py | d9w/TFCGP | 641d968d6b4d46a91acb6a5d3e00d3aaa5bc227e | [
"Apache-2.0"
] | null | null | null | tfcgp/ga.py | d9w/TFCGP | 641d968d6b4d46a91acb6a5d3e00d3aaa5bc227e | [
"Apache-2.0"
] | null | null | null | from tfcgp.problem import Problem
from tfcgp.chromosome import Chromosome
import numpy as np
import os
class GA:
def __init__(self, problem, config, logname='test', root_dir='.'):
self.config = config
self.problem = problem
self.max_fit = 0.0
self.population = []
for i in range(self.config.cfg["ga_population"]):
ch = Chromosome(self.problem.nin, self.problem.nout)
ch.random(config)
self.population += [ch]
self.fits = -np.inf*np.ones(self.config.cfg["ga_population"])
self.logfile = os.path.join(root_dir, 'logs', logname+'.log')
self.logname = logname
self.generation = 0
def run(self, n_steps):
for i in range(n_steps):
self.step()
def mutate(self, chromosome):
child_genes = np.copy(chromosome.genes)
change = np.random.rand(len(child_genes)) < self.config.cfg["mutation_rate"]
child_genes[change] = np.random.rand(np.sum(change))
child = Chromosome(self.problem.nin, self.problem.nout)
child.from_genes(child_genes, self.config)
return child
def select(self):
# return the winner of a random three-way tournament
inds = np.arange(len(self.population))
np.random.shuffle(inds)
winner = inds[np.argmax(self.fits[inds[:3]])]
return winner
def step(self):
for i in range(len(self.population)):
if self.fits[i] == -np.inf:
fitness, history = self.problem.get_fitness(self.population[i])
self.fits[i] = fitness
if np.max(self.fits) > self.max_fit:
self.max_fit = np.max(self.fits)
with open(self.logfile, 'a') as f:
f.write('E,%s,%d,%d,%d,%d,%0.10f,%0.10f,%0.10f\n' %
(self.logname, self.generation, self.problem.eval_count, 0,
self.problem.epochs, 0.0, 0.0, self.max_fit))
self.generation += 1
new_pop = []
new_fits = -np.inf*np.ones(self.config.cfg["ga_population"])
n_elites = int(round(self.config.cfg["ga_population"]*
self.config.cfg["ga_elitism"]))
elites = np.argsort(self.fits)[::-1]
for i in range(n_elites):
new_pop += [self.population[elites[i]]]
new_fits[i] = self.fits[elites[i]]
n_mutate = int(round(self.config.cfg["ga_population"]*
self.config.cfg["ga_mutation"]))
for i in range(n_mutate):
pid = self.select()
child = self.mutate(self.population[pid])
new_pop += [child]
new_fits[n_elites+i] = -np.inf
n_rest = self.config.cfg["ga_population"] - n_elites - n_mutate
for i in range(n_elites + n_mutate, self.config.cfg["ga_population"]):
pid = self.select()
new_pop += [self.population[pid]]
new_fits[i] = self.fits[pid]
self.population = new_pop
self.fits = new_fits
| 36.650602 | 84 | 0.574951 |
487df6b081936a292ef50ddbcf4f3d96b9cf151f | 82 | py | Python | Python-Fundamentals-June-2019/03_lists/09_sort_numbers.py | marinakolova/Python-Courses | eb95c782307be561b5026c5adafaa001b04caf4f | [
"MIT"
] | null | null | null | Python-Fundamentals-June-2019/03_lists/09_sort_numbers.py | marinakolova/Python-Courses | eb95c782307be561b5026c5adafaa001b04caf4f | [
"MIT"
] | null | null | null | Python-Fundamentals-June-2019/03_lists/09_sort_numbers.py | marinakolova/Python-Courses | eb95c782307be561b5026c5adafaa001b04caf4f | [
"MIT"
] | null | null | null | print(' <= '.join([str(n) for n in sorted([int(n) for n in input().split(' ')])])) | 82 | 82 | 0.54878 |
26bd303fad54d5d379c3b7a9de332f1cbc596526 | 650 | py | Python | opencensus/trace/ext/mysql/__init__.py | bshaffer/opencensus-python | c624558c6829982d3464a5df29b48952f1fe23bc | [
"Apache-2.0"
] | null | null | null | opencensus/trace/ext/mysql/__init__.py | bshaffer/opencensus-python | c624558c6829982d3464a5df29b48952f1fe23bc | [
"Apache-2.0"
] | 1 | 2021-06-10T23:59:36.000Z | 2021-06-10T23:59:36.000Z | opencensus/trace/ext/mysql/__init__.py | bshaffer/opencensus-python | c624558c6829982d3464a5df29b48952f1fe23bc | [
"Apache-2.0"
] | 1 | 2019-09-01T06:00:13.000Z | 2019-09-01T06:00:13.000Z | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opencensus.trace.ext.mysql import trace
__all__ = ['trace']
| 36.111111 | 74 | 0.763077 |
ccfa5c0d0be7112453e707fc3f4dcbc9c0d2fa75 | 127 | py | Python | regularexpression/password regular ex.py | poojavaibhavsahu/Pooja_Python | 58122bfa8586883145042b11fe1cc013c803ab4f | [
"bzip2-1.0.6"
] | null | null | null | regularexpression/password regular ex.py | poojavaibhavsahu/Pooja_Python | 58122bfa8586883145042b11fe1cc013c803ab4f | [
"bzip2-1.0.6"
] | null | null | null | regularexpression/password regular ex.py | poojavaibhavsahu/Pooja_Python | 58122bfa8586883145042b11fe1cc013c803ab4f | [
"bzip2-1.0.6"
] | null | null | null | import re
str="pooja.sahoo123@"
a=re.search(r"(^[A-Za-z0-9.@]{8})",str)
if(a):
print("valid")
else:
print("not valid")
| 15.875 | 39 | 0.582677 |
f2b9d40e29e46e525303714ee67f97a953240129 | 2,875 | py | Python | Events/dukes_archives_constants.py | grimrhapsody/StJudeSouls | c6123ceff260faa76997c1eba3c5832d75d52ece | [
"MIT"
] | null | null | null | Events/dukes_archives_constants.py | grimrhapsody/StJudeSouls | c6123ceff260faa76997c1eba3c5832d75d52ece | [
"MIT"
] | null | null | null | Events/dukes_archives_constants.py | grimrhapsody/StJudeSouls | c6123ceff260faa76997c1eba3c5832d75d52ece | [
"MIT"
] | null | null | null | import soulstruct.emevd.ds_types as dt
"""
Mimics info:
- They have special effect 5421 when they are closed and waiting to eat you.
- Warp player to Mimic model point 100 and force animation 7521.
- Issue Mimic AI command 10 (slot 0), replan, wait 0.1 seconds, and revert to command -1 (slot 0).
- This should work if the Mimic is hostile to whoever is opening it.
- Make Mimic invincible so it can't be disturbed.
"""
"""
Looter positions:
1. Patrols on ground floor of first room. Mimic is next to rotating stairs pillar.
2. Patrols on ground floor of second room. Chest is somewhere.
3. Patrols on upper floors of first room. Chest is at one end.
4. Patrols on upper floors of second room. Chest is at one end.
5. Patrols along route to prison. Mimic is in the little room in the middle.
6. Patrols in dark part near stairs to exterior. Chest is in the dark room.
"""
class CHR(dt.Character):
Logan = 6030
LoganTalk = 6032
Havel = 6580
NakedHavel = 6581
Crow = 1700675
Looter1 = 1700600
Looter2 = 1700601
Looter3 = 1700602
Looter4 = 1700603
Looter5 = 1700604
Looter6 = 1700605
LoganMimic1 = 1700410
LoganMimic2 = 1700411
LoganMimic3 = 1700412
LoganMimic4 = 1700413
LoganMimic5 = 1700414
LoganMimic6 = 1700415
HungryMimic1 = 1700400
HungryMimic2 = 1700401
HungryMimic3 = 1700402
HungryMimic4 = 1700403
HungryMimic5 = 1700404
HungryMimic6 = 1700405
class FLAG(dt.Flag):
LoganRecruited = 1904
HavelRecruited = 1905
MimicRingGiven = 11702003
HavelGearReturned = 11702007
PisacasAllDead = 11702005
GiantCellOpened = 11700320
GiantCellIsOpen = 61700320
Mimic1Fed = 11702020
Mimic2Fed = 11702021
Mimic3Fed = 11702022
Mimic4Fed = 11702023
Mimic5Fed = 11702024
Mimic6Fed = 11702025
class OBJECT(dt.Object):
Chest1 = 1701650
Chest2 = 1701651
Chest3 = 1701652
Chest4 = 1701653
Chest5 = 1701654
Chest6 = 1701655
GiantCellDoor = 1701506
class WEAPON(dt.Weapon):
DragonTooth = 854000
HavelsGreatshield = 1505000
class ARMOR(dt.Armor):
HavelsHelm = 440000
HavelsArmor = 441000
HavelsGauntlets = 442000
HavelsLeggings = 443000
class ITEMLOT(dt.ItemLot):
MimicRing = 4620
class REGION(dt.Region):
Mimic1 = 1702740
Mimic2 = 1702741
Mimic3 = 1702742
Mimic4 = 1702743
Mimic5 = 1702744
Mimic6 = 1702745
class TEXT(dt.Text):
ReturnToFirelink = 10010140
Talk = 10010200
SummonMimic = 10010350
MimicSatiated = 10010351
LoganInstructions = 10010352
ReturnHavelGear = 10010353
MissingHavelGear = 10010354
Caught = 10010355
class SPEFFECT(dt.IntEnum):
MimicRing = 4706
MimicRingStealth = 4707
HavelHealth = 4716
LoganReward = 4804
HavelReward = 4805
| 23.185484 | 102 | 0.688696 |
1edb62d78a8b27de5a20e8d7187cd7b0aea6bcde | 23,040 | py | Python | cheapglk/casemap.py | matthewturk/zvm | d9ac7935a047cba05eaaf15384ff6f14e9aa1307 | [
"BSD-3-Clause"
] | 27 | 2016-01-02T09:00:54.000Z | 2022-02-05T20:28:28.000Z | cheapglk/casemap.py | agentcox/glknode | 1f87f3bc9bd66d6d14a7b15e330c06cff91cfcf1 | [
"MIT"
] | 3 | 2019-08-26T13:18:59.000Z | 2021-04-11T15:58:07.000Z | cheapglk/casemap.py | agentcox/glknode | 1f87f3bc9bd66d6d14a7b15e330c06cff91cfcf1 | [
"MIT"
] | 11 | 2016-06-19T08:59:40.000Z | 2021-04-11T10:54:41.000Z | #!/usr/bin/python
# Moderately dumb script to generate the Glk library tables for Unicode
# case-mapping and normalization.
#
# python casemap.py /path/to/unicode/directory > cgunigen.c
# or
# python casemap.py --js /path/to/unicode/directory > unicodemap.js
#
# The argument should be a directory which contains UnicodeData.txt
# and SpecialCasing.txt. These files can be found at
# <http://www.unicode.org/Public/4.0-Update1/>, which is version 4.0.1
# of the Unicode spec. This script has only been tested with that version
# (and the included files are from that version). It is not current.
import sys
import os
import re
output = 'c'
args = sys.argv[ 1 : ]
if ('--js' in args):
output = 'js'
args.remove('--js')
if ('--c' in args):
output = 'c'
args.remove('--c')
if ('--none' in args):
output = None
args.remove('--none')
if (len(args) != 1):
print 'Usage: casemap.py [ --js | --c | --none ] /path/to/unicode/directory'
sys.exit(1)
unicode_dir = args[0]
unicode_version = '???'
try:
ucdfl = open(os.path.join(unicode_dir, 'UnicodeData.txt'))
specfl = open(os.path.join(unicode_dir, 'SpecialCasing.txt'))
except IOError:
print unicode_dir, 'must contain the files UnicodeData.txt and SpecialCasing.txt.'
sys.exit(1)
# parse UnicodeData.txt
combintable = {}
decomptable = {}
recdecomptable = {}
compotable = {}
casetable = {}
upcasetable = {}
downcasetable = {}
titlecasetable = {}
totalchars = 0
titleablechars = 0
totalspecialcases = 0
specialtable = {}
while 1:
ln = ucdfl.readline()
if (not ln):
break
ln = ln.strip()
pos = ln.find('#')
if (pos >= 0):
ln = ln[ : pos]
ls = ln.split(';')
if ((not ls) or (not ls[0])):
continue
val = int(ls[0], 16)
totalchars = totalchars+1
if (len(ls) > 3 and ls[3]):
combin = int(ls[3])
if (combin):
combintable[val] = combin
if (len(ls) > 5 and ls[5]):
decomp = ls[5]
if not decomp.startswith('<'):
ent = [ int(el, 16) for el in decomp.split(' ') ]
recdecomptable[val] = ent
upcase = val
downcase = val
titlecase = val
if (len(ls) > 12 and ls[12]):
upcase = int(ls[12], 16)
if (val != upcase):
upcasetable[val] = [upcase]
if (len(ls) > 13 and ls[13]):
downcase = int(ls[13], 16)
if (val != downcase):
downcasetable[val] = [downcase]
if (len(ls) > 14 and ls[14]):
titlecase = int(ls[14], 16)
if (val != titlecase):
titlecasetable[val] = [titlecase]
if (val == upcase and val == downcase and val == titlecase):
continue
if (upcase != titlecase):
titleablechars = titleablechars+1
specialtable[val] = ([upcase], [downcase], [titlecase])
casetable[val] = (upcase, downcase, titlecase)
while 1:
ln = specfl.readline()
if (not ln):
break
if ln.startswith('# SpecialCasing'):
match = re.search('SpecialCasing-([0-9.]+).txt', ln)
if (match):
unicode_version = match.group(1)
continue
ln = ln.strip()
pos = ln.find('#')
if (pos >= 0):
ln = ln[ : pos]
ls = ln.split(';')
ls = [st.strip() for st in ls]
if ((not ls) or (not ls[0])):
continue
val = int(ls[0], 16)
if (len(ls) > 4 and ls[4]):
# conditional case, ignore
continue
totalspecialcases = totalspecialcases+1
upcase = [ int(st, 16) for st in ls[3].split(' ') ]
downcase = [ int(st, 16) for st in ls[1].split(' ') ]
titlecase = [ int(st, 16) for st in ls[2].split(' ') ]
if (upcase != [val]):
upcasetable[val] = upcase
if (downcase != [val]):
downcasetable[val] = downcase
if (titlecase != [val]):
titlecasetable[val] = titlecase
speccase = ( upcase, downcase, titlecase )
casetable[val] = (val, val, val) # placeholder
specialtable[val] = speccase
# The decomposition data we have extracted is recursive; a character can
# decompose to more decomposable characters. We now expand that into
# flat lists. (It only takes a little more space, because most characters
# aren't recursive that way.)
def try_decompose(val):
if decomptable.has_key(val):
return decomptable[val]
res = recdecomptable.get(val)
if not res:
ls = [ val ]
decomptable[val] = ls
return ls
ls = []
for subval in res:
ls.extend(try_decompose(subval))
decomptable[val] = ls
return ls
for val in recdecomptable.keys():
try_decompose(val)
for val in decomptable.keys():
if decomptable[val] == [ val ]:
decomptable.pop(val)
if (len(recdecomptable) != len(decomptable)):
raise Exception('Decomposition table changed length in expansion!')
# Generate the composition mapping, which is roughly the inverse of the
# (recursive) decomposition data. It only includes decompositions into
# *two* characters, though. (Decompositions of one character to one
# character are actually canonicalizations, and we don't want to reverse
# those.)
for (val, ls) in recdecomptable.items():
if len(ls) not in [1, 2]:
raise Exception('Character %x has decomposition %s' % (val, ls))
head = ls[0]
if len(ls) == 2:
map = compotable.get(head)
if (map is None):
map = {}
compotable[head] = map
map[ls[1]] = val
max_decompose_length = max([ len(ls) for ls in decomptable.values() ])
sys.stderr.write(str(totalchars) + ' characters in the Unicode database\n')
sys.stderr.write(str(len(combintable)) + ' characters with combining classes\n')
sys.stderr.write(str(len(decomptable)) + ' characters with decompositions (max length ' + str(max_decompose_length) + ')\n')
sys.stderr.write(str(len(compotable)) + ' character compositions\n')
sys.stderr.write(str(len(casetable)) + ' characters which can change case\n')
sys.stderr.write(str(titleablechars) + ' characters with a distinct title-case\n')
sys.stderr.write(str(totalspecialcases) + ' characters with length changes\n')
sys.stderr.write(str(len(specialtable)) + ' special-case characters\n')
# This semi-clever function takes a (sorted) list of integers, and
# divides it into a list of arithmetic runs, and a list of leftovers:
#
# ([ (start, end, jump), (start, end, jump), ...], [ ... ])
#
# In the worst case, you get back ([], ls) -- no runs, and the entire
# original list as leftovers. The minlength argument tunes the results;
# you get no runs shorter than minlength.
#
def find_runs(ls, minlength=3, jumpone=False):
runs = []
extras = []
minlength = max(minlength, 2)
lslen = len(ls)
pos = 0
while True:
if (lslen - pos < minlength):
break
start = ls[pos]
jump = ls[pos+1] - start
if (jump == 0):
raise Exception("Repeated value")
newpos = pos
val = start
while True:
if (newpos == lslen or ls[newpos] != val):
break
newpos += 1
val += jump
if (newpos - pos >= minlength and not (jump != 1 and jumpone)):
runs.append( (start, val-jump, jump) )
pos = newpos
continue
extras.append(start)
pos += 1
extras.extend(ls[pos:])
return (runs, extras)
# Produce the output, in whichever form was requested.
if (output == 'c'):
# C code output
blocktable = {}
for val in casetable.keys():
(upcase, downcase, titlecase) = casetable[val]
blocknum = val >> 8
if (not blocktable.has_key(blocknum)):
block = [ None ] * 256
blocktable[blocknum] = block
else:
block = blocktable[blocknum]
block[val & 0xFF] = (upcase, downcase)
print '/* This file was generated by casemap.py. */'
print '/* Derived from Unicode data files, Unicode version %s. */' % (unicode_version,)
print '/* This does not get compiled into a cgunigen.o file; it\'s'
print ' * #included in cgunicod.c. */'
print
# The case-folding tables.
blockkeys = blocktable.keys()
blockkeys.sort()
for blocknum in blockkeys:
print 'static gli_case_block_t unigen_case_block_' + hex(blocknum) + '[256] = {'
block = blocktable[blocknum]
for ix in range(256):
ch = blocknum * 0x100 + ix
res = block[ix]
if (res == None):
upcase = ch
downcase = ch
else:
(upcase, downcase) = res
if (specialtable.has_key(ch)):
print ' { 0xFFFFFFFF, 0xFFFFFFFF },'
else:
if (upcase != downcase):
if (upcase == ch):
comment = ' /* upper */'
elif (downcase == ch):
comment = ' /* lower */'
else:
comment = ' /* different */'
else:
comment = ''
print ' { ' + hex(upcase) + ', ' + hex(downcase) + ' },' + comment
print '};'
print
print '#define GET_CASE_BLOCK(ch, blockptr) \\'
print 'switch ((glui32)(ch) >> 8) { \\'
for blocknum in blockkeys:
print ' case ' + hex(blocknum) + ': \\'
print ' *blockptr = unigen_case_block_' + hex(blocknum) + '; \\'
print ' break; \\'
print ' default: \\'
print ' *blockptr = NULL; \\'
print '}'
specialkeys = specialtable.keys()
specialkeys.sort()
pos = 0
specialstructs = []
print 'static glui32 unigen_special_array[] = {'
for val in specialkeys:
speccase = specialtable[val]
(upcasel, downcasel, titlecasel) = speccase
comment = ' /* ' + hex(val) + ' upcase */'
strarr = ', '.join([hex(st) for st in upcasel])
print ' ' + str(len(upcasel)) + ', ' + strarr + ',' + comment
pos0 = pos
pos = pos + len(upcasel) + 1
comment = ' /* ' + hex(val) + ' downcase */'
strarr = ', '.join([hex(st) for st in downcasel])
print ' ' + str(len(downcasel)) + ', ' + strarr + ',' + comment
pos1 = pos
pos = pos + len(downcasel) + 1
comment = ' /* ' + hex(val) + ' titlecase */'
strarr = ', '.join([hex(st) for st in titlecasel])
print ' ' + str(len(titlecasel)) + ', ' + strarr + ',' + comment
pos2 = pos
pos = pos + len(titlecasel) + 1
specialstructs.append( (val, pos0, pos1, pos2) )
print '};'
print
for (val, pos0, pos1, pos2) in specialstructs:
print 'static gli_case_special_t unigen_special_' + hex(val) + ' = { ' + str(pos0) + ', ' + str(pos1) + ', ' + str(pos2) + ' };'
print
print '#define GET_CASE_SPECIAL(ch, specptr) \\'
print 'switch (ch) { \\'
for (val, pos0, pos1, pos2) in specialstructs:
print ' case ' + hex(val) + ': \\'
print ' *specptr = unigen_special_' + hex(val) + '; \\'
print ' break; \\'
print ' default: \\'
print ' *specptr = NULL; \\'
print '}'
print
# The combining-class table.
usetable = {}
for (val, ent) in combintable.items():
blocknum = val >> 8
if not usetable.has_key(blocknum):
usetable[blocknum] = {}
if not usetable[blocknum].has_key(ent):
usetable[blocknum][ent] = []
usetable[blocknum][ent].append(val)
usels = usetable.keys()
usels.sort()
print '#define RETURN_COMBINING_CLASS(ch) \\'
print 'switch ((glui32)(ch) >> 8) { \\'
for blocknum in usels:
print ' case %d: \\' % (blocknum,)
print ' switch (ch) { \\'
entls = usetable[blocknum].keys()
entls.sort()
for ent in entls:
valls = usetable[blocknum][ent]
valls.sort()
for val in valls:
print ' case %d: \\' % (val,)
print ' return %d; \\' % (ent,)
print ' } \\'
print ' return 0; \\'
print '} \\'
print 'return 0;'
print
# The composition tables.
usetable = {}
for (val, map) in compotable.items():
blocknum = val >> 8
if not usetable.has_key(blocknum):
usetable[blocknum] = {}
usetable[blocknum][val] = map
usels = usetable.keys()
usels.sort()
print '#define RETURN_COMPOSITION(ch1, ch2) \\'
print 'switch ((glui32)(ch1) >> 8) { \\'
for blocknum in usels:
print ' case %d: \\' % (blocknum,)
print ' switch (ch1) { \\'
map = usetable[blocknum]
ls = map.keys()
ls.sort()
for val in ls:
print ' case %d: \\' % (val,)
print ' switch (ch2) { \\'
subls = map[val].items()
subls.sort()
for (val2, ent) in subls:
print ' case %d: return %d; \\' % (val2, ent)
print ' } \\'
print ' return 0; \\'
print ' } \\'
print ' return 0; \\'
print '} \\'
print 'return 0;'
print
# The decomposition tables.
usetable = {}
for val in decomptable.keys():
blocknum = val >> 8
usetable[blocknum] = 1 + usetable.get(blocknum, 0)
for (blocknum, count) in usetable.items():
if (count < 30):
usetable[blocknum] = None
blocktable = {}
extratable = {}
ls = decomptable.keys()
ls.sort()
offsets = []
for val in ls:
pos = len(offsets)
ent = decomptable[val]
if (type(ent) == list):
offsets.extend(ent)
count = len(ent)
else:
offsets.append(ent)
count = 1
blocknum = val >> 8
if not usetable[blocknum]:
extratable[val] = (count, pos)
else:
if (not blocktable.has_key(blocknum)):
block = [ None ] * 256
blocktable[blocknum] = block
else:
block = blocktable[blocknum]
block[val & 0xFF] = (count, pos)
print 'static glui32 unigen_decomp_data[%d] = {' % (len(offsets),)
rowcount = 0
for val in offsets:
if (rowcount >= 8):
print
rowcount = 0
print '%s,' % (hex(val)),
rowcount += 1
print '};'
print
blockkeys = blocktable.keys()
blockkeys.sort()
for blocknum in blockkeys:
print 'static gli_decomp_block_t unigen_decomp_block_%s[256] = {' % (hex(blocknum),)
block = blocktable[blocknum]
for ix in range(256):
ch = blocknum * 0x100 + ix
res = block[ix]
if (res == None):
count = 0
pos = 0
else:
(count, pos) = res
print ' { %s, %s },' % (str(count), str(pos))
print '};'
print
print '#define GET_DECOMP_BLOCK(ch, blockptr) \\'
print 'switch ((glui32)(ch) >> 8) { \\'
for blocknum in blockkeys:
print ' case ' + hex(blocknum) + ': \\'
print ' *blockptr = unigen_decomp_block_' + hex(blocknum) + '; \\'
print ' break; \\'
print ' default: \\'
print ' *blockptr = NULL; \\'
print '}'
print
extrakeys = extratable.keys()
extrakeys.sort()
print '#define GET_DECOMP_SPECIAL(ch, countptr, posptr) \\'
print 'switch (ch) { \\'
for val in extrakeys:
(count, pos) = extratable[val]
print ' case ' + hex(val) + ': \\'
print ' *countptr = ' + str(count) + '; *posptr = ' + str(pos) + '; \\'
print ' break; \\'
print ' default: \\'
print ' *countptr = 0; \\'
print '}'
print
# Some helper functions for generating the Javascript data tables. We
# have separate functions for the case tables and the decomp tables,
# because their particular structures are amenable to different
# optimizations. (The case tables have long runs of "N => N+K",
# whereas the decomp tables have long runs of arbitrary values.)
def generate_js_table_case(label, pairs, offsets):
special_offsets = dict([ (key, offsets[key]) for key in offsets.keys()
if offsets[key] >= 16 ])
offmaps = {}
for offset in special_offsets.keys():
offmaps[offset] = []
print '/* list all the special cases in unicode_%s_table */' % (label,)
print 'var unicode_%s_table = {' % (label,)
outls = []
for (key, val) in pairs:
if (type(val) == list):
ls = val
ls = [ str(val) for val in ls ]
outls.append('%s: [ %s ]' % (str(key), ','.join(ls)))
continue
offset = key-val
if (offmaps.has_key(offset)):
offmaps[offset].append(key)
continue
outls.append('%s: %s' % (str(key), str(val)))
rowcount = 0
for ix in range(len(outls)):
val = outls[ix]
islast = (ix == len(outls)-1)
if (rowcount >= 5):
print
rowcount = 0
print val+('' if islast else ','),
rowcount += 1
print
print '};'
if (not offmaps):
print
return
print '/* add all the regular cases to unicode_%s_table */' % (label,)
print '(function() {'
print ' var ls, ix, val;'
print ' var map = unicode_%s_table;' % (label,)
ls = offmaps.keys()
ls.sort()
for offset in ls:
if (offset < 0):
op = '+' + str(-offset)
else:
op = '-' + str(offset)
# Divide the list of values into a list of runs (which we can
# do with a simple for loop) and a list of leftovers (which
# we have to do one by one).
# The minlength value of 16 is about optimal (by experiment)
(runs, extras) = find_runs(offmaps[offset], 16)
for (start, end, jump) in runs:
print ' for (val=%s; val<=%s; val+=%s) {' % (str(start), str(end), str(jump))
print ' map[val] = val%s;' % (op,)
print ' }'
if (extras and len(extras) < 3):
# It's more efficient to dump a few extras as single lines.
for val in extras:
print ' map[%d] = %d;' % (val, val-offset)
elif (extras):
# But if we have a lot of extras, we should loop over an array.
print ' ls = ['
rowcount = 0
for val in extras:
if (rowcount >= 8):
print
rowcount = 0
print '%s,' % (str(val)),
rowcount += 1
print
print ' ];'
print ' for (ix=0; ix<%d; ix++) {' % (len(extras),)
print ' val = ls[ix];'
print ' map[val] = val%s;' % (op,)
print ' }'
print '})();'
print
def generate_js_table_decomp(label, table, runmin=16):
keys = table.keys()
keys.sort()
(runs, extras) = find_runs(keys, runmin, True)
print '/* list all the special cases in unicode_%s_table */' % (label,)
print 'var unicode_%s_table = {' % (label,)
outls = []
for key in extras:
val = table[key]
if (type(val) == list):
ls = val
ls = [ str(val) for val in ls ]
outls.append('%s: [ %s ]' % (str(key), ','.join(ls)))
continue
outls.append('%s: %s' % (str(key), str(val)))
rowcount = 0
for ix in range(len(outls)):
val = outls[ix]
islast = (ix == len(outls)-1)
if (rowcount >= 5):
print
rowcount = 0
print val+('' if islast else ','),
rowcount += 1
print
print '};'
if (not runs):
print
return
print '/* add all the regular cases to unicode_%s_table */' % (label,)
print '(function() {'
print ' var ls, ix, val;'
print ' var map = unicode_%s_table;' % (label,)
for (start, end, jump) in runs:
print ' ls = ['
rowcount = 0
for ix in range(start, end+1):
val = table[ix]
if (rowcount >= 8):
print
rowcount = 0
if (type(val) == list):
val = [ str(ent) for ent in val ]
ent = '[' + ','.join(val) + ']'
else:
ent = str(val)
print '%s,' % (ent),
rowcount += 1
print
print ' ];'
print ' for (ix=0; ix<%d; ix++) {' % (end-start+1,)
print ' val = ls[ix];'
print ' map[ix+%d] = val;' % (start,)
print ' }'
print '})();'
print
if (output == 'js'):
# javascript code output
print '/* These tables were generated by casemap.py. */'
print '/* Derived from Unicode data files, Unicode version %s. */' % (unicode_version,)
print
tablelist = [ (upcasetable, 'upper'),
(downcasetable, 'lower') ]
for (map, label) in tablelist:
keys = map.keys()
keys.sort()
pairs = []
offsets = {}
for key in keys:
if (not map.has_key(key)):
continue
ls = map[key]
if (len(ls) != 1):
pairs.append( (key, ls) )
continue
val = ls[0]
offset = key-val
offsets[offset] = offsets.get(offset, 0) + 1
pairs.append( (key, val) )
generate_js_table_case(label, pairs, offsets)
map = {}
for key in upcasetable.keys():
if (not titlecasetable.has_key(key)):
map[key] = key
for key in titlecasetable.keys():
if (titlecasetable[key] != upcasetable.get(key)):
val = titlecasetable[key]
if (len(val) == 1):
val = val[0]
map[key] = val
generate_js_table_decomp('title', map)
map = {}
for (key, val) in decomptable.items():
if (len(val) == 1):
val = val[0]
map[key] = val
generate_js_table_decomp('decomp', map, 16)
generate_js_table_decomp('combin', combintable, 100)
print '/* list all of unicode_compo_table */'
print 'var unicode_compo_table = {'
ls = compotable.keys()
ls.sort()
for key in ls:
islast = (key == ls[-1])
subls = compotable[key].items()
subls.sort()
val = ', '.join([ '%d:%d' % (subkey, subval) for (subkey, subval) in subls ])
print ' %d: { %s }%s' % (key, val, ('' if islast else ','))
print '};'
print '/* End of tables generated by casemap.py. */'
| 30.275953 | 136 | 0.51888 |
9dc01ad9a365fb00096f1aa4f89e4292f6933acc | 4,436 | py | Python | cogs/misc.py | bdon-htb/richard_bot | e5b9271a60ccf4e95844d637e82b51e76c3470ae | [
"MIT"
] | null | null | null | cogs/misc.py | bdon-htb/richard_bot | e5b9271a60ccf4e95844d637e82b51e76c3470ae | [
"MIT"
] | null | null | null | cogs/misc.py | bdon-htb/richard_bot | e5b9271a60ccf4e95844d637e82b51e76c3470ae | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import random
# Custom imports
import cfg
class Misc(commands.Cog):
"""A class containing miscellaneous command implementations.
"""
def __init__(self, client):
self.client = client
async def _clear(self, ctx, n, is_valid):
"""Removes n number of messages. Counter only goes up when message
passes the is_valid function.
Precondition: n > 0
"""
async for message in ctx.channel.history():
if n <= 0:
break
elif is_valid(message):
await message.delete()
n -= 1
async def _bulk_clear(self, ctx, n, is_valid):
"""Does a bulk clear of messages using purge.
Favours speed over accuracy.
"""
await ctx.channel.purge(limit=n, check=is_valid, bulk=True)
async def _wipe_helper(self, ctx, amount, args=[]):
"""Handles the removal of messages for r!wipe.
"""
if '-a' in args: # Remove the wipe message call too.
message_check = lambda m: m.author == ctx.author
elif '-b' in args: # Remove only bot messages of specified amount.
message_check = lambda m: m.author.bot
else: # Remove specified amount of user messagess NOT including the wipe message.
message_check = lambda m: m != ctx.message and m.author == ctx.author
if amount > 25:
await self._bulk_clear(ctx, amount, message_check)
else:
await self._clear(ctx, amount, message_check)
# Events
@commands.Cog.listener()
async def on_ready(self):
print('Bot running')
# Commands
@commands.command()
async def help(self, ctx, c=''):
"""Dynamically construct a help message containing commands and
their usage. If a valid command is specified then the bot will
send a help message of the specific command using the method's
docstring.
Usage: {PREFIX}help [c: str]
[c] is the name of any bot command.
"""
commands = self.get_commands()
com_names = [com.name for com in commands]
if c in com_names:
method = commands[com_names.index(c)].callback
docstring = method.__doc__.replace('{PREFIX}', cfg.PREFIX)
message = f'```===Documentation for {c}===\n'
for line in docstring.split('\n'):
message += line.lstrip() + '\n'
message += '```'
await ctx.send(message)
else:
message = '```===List of Commands===\n'
for name in com_names:
message += cfg.PREFIX + name + '\n'
message += f'\nIf you need help with using a particular command\nadd the name of it to {cfg.PREFIX}help.\ni.e. {cfg.PREFIX}help wipe```'
await ctx.send(message)
@commands.command()
async def wipe(self, ctx, amount=0, *args):
"""Delete message(s) from the server.
Usage: {PREFIX}wipe [amount: int] [args]
[amount] is the number of messages to remove.
The command message is not included by default.
Note:
If [amount] is greater than 25 the bot will fallback to a bulk
delete implementations which won't be as accurate.
Accepted arguments:
-a include the command message in deletion.
-b wipe only the last [amount] messages by bots.
-s stops the bot from sending the success message.
"""
channel = ctx.channel.name
valid_channels = cfg.SETTINGS['wipe_channel']
if '-a' in args:
amount += 1
if amount > 0 and (valid_channels == [] or channel in valid_channels):
message = random.choice(cfg.WORDS['success']) + ' ' + ctx.author.name
message += f'. Purged {amount} messages'
await self._wipe_helper(ctx, amount, args)
else:
message = random.choice(cfg.WORDS['failed']) + ' ' + ctx.author.name
if amount <= 0:
message += f'. I can\'t delete {amount} messages.'
else:
message += '. I\'ve been told not to delete messages in this channel.'
if '-s' not in args:
if '-a' in args:
await ctx.send(message)
else:
await ctx.reply(message)
def setup(client):
client.add_cog(Misc(client))
| 34.929134 | 148 | 0.580027 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.