hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73ec8a3d647d6148fda25e3c11b5c3edea0ccce | 3,464 | py | Python | python/GafferUI/PathParameterValueWidget.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | python/GafferUI/PathParameterValueWidget.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | python/GafferUI/PathParameterValueWidget.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import IECore
import Gaffer
import GafferUI
class PathParameterValueWidget( GafferUI.ParameterValueWidget ) :
def __init__( self, parameterHandler, **kw ) :
self.__pathWidget = GafferUI.PathPlugValueWidget(
parameterHandler.plug(),
self._path(),
pathChooserDialogueKeywords = Gaffer.WeakMethod( self._pathChooserDialogueKeywords ),
)
GafferUI.ParameterValueWidget.__init__(
self,
self.__pathWidget,
parameterHandler,
**kw
)
def _path( self ) :
return Gaffer.FileSystemPath( "/", filter = self._filter() )
def _filter( self ) :
return Gaffer.FileSystemPath.createStandardFilter()
def _pathChooserDialogueKeywords( self ) :
result = {}
bookmarksCategory = None
with IECore.IgnoredExceptions( KeyError ) :
bookmarksCategory = self.parameter().userData()["UI"]["bookmarksCategory"].value
result["bookmarks"] = GafferUI.Bookmarks.acquire(
# sometimes parameter widgets are used with nodes which are parented to an application,
# but where the window isn't. and sometimes they're used with nodes with no application,
# but where the window does belong to an application. so we hedge our bets and use both
# the widget and the plug to try to find bookmarks for the application.
( self, self.plug() ),
# deliberately using FileSystemPath directly rather than using _path().__class__
# so that file sequences share the same set of bookmarks as files.
pathType = Gaffer.FileSystemPath,
category = bookmarksCategory,
)
return result
GafferUI.ParameterValueWidget.registerType( IECore.PathParameter, PathParameterValueWidget )
| 37.247312 | 92 | 0.706409 | true | true | |
f73ec953ced26c903337e19ca86ba26fc4a6046e | 2,484 | py | Python | authentik/policies/migrations/0005_binding_group.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 15 | 2020-01-05T09:09:57.000Z | 2020-11-28T05:27:39.000Z | authentik/policies/migrations/0005_binding_group.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 302 | 2020-01-21T08:03:59.000Z | 2020-12-04T05:04:57.000Z | authentik/policies/migrations/0005_binding_group.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 3 | 2020-03-04T08:21:59.000Z | 2020-08-01T20:37:18.000Z | # Generated by Django 3.1.6 on 2021-02-08 18:36
import django.db.models.deletion
from django.apps.registry import Apps
from django.conf import settings
from django.db import migrations, models
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
import authentik.lib.models
def migrate_from_groupmembership(apps: Apps, schema_editor: BaseDatabaseSchemaEditor):
try:
GroupMembershipPolicy = apps.get_model(
"authentik_policies_group_membership", "GroupMembershipPolicy"
)
except LookupError:
# GroupMembership app isn't installed, ignore migration
return
PolicyBinding = apps.get_model("authentik_policies", "PolicyBinding")
db_alias = schema_editor.connection.alias
for membership in GroupMembershipPolicy.objects.using(db_alias).all():
for binding in PolicyBinding.objects.using(db_alias).filter(policy=membership):
binding.group = membership.group
binding.policy = None
binding.save()
membership.delete()
class Migration(migrations.Migration):
dependencies = [
("authentik_core", "0017_managed"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("authentik_policies", "0004_policy_execution_logging"),
]
operations = [
migrations.AddField(
model_name="policybinding",
name="group",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="authentik_core.group",
),
),
migrations.AddField(
model_name="policybinding",
name="user",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="policybinding",
name="policy",
field=authentik.lib.models.InheritanceForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="authentik_policies.policy",
),
),
migrations.RunPython(migrate_from_groupmembership),
]
| 32.25974 | 87 | 0.610306 |
import django.db.models.deletion
from django.apps.registry import Apps
from django.conf import settings
from django.db import migrations, models
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
import authentik.lib.models
def migrate_from_groupmembership(apps: Apps, schema_editor: BaseDatabaseSchemaEditor):
try:
GroupMembershipPolicy = apps.get_model(
"authentik_policies_group_membership", "GroupMembershipPolicy"
)
except LookupError:
return
PolicyBinding = apps.get_model("authentik_policies", "PolicyBinding")
db_alias = schema_editor.connection.alias
for membership in GroupMembershipPolicy.objects.using(db_alias).all():
for binding in PolicyBinding.objects.using(db_alias).filter(policy=membership):
binding.group = membership.group
binding.policy = None
binding.save()
membership.delete()
class Migration(migrations.Migration):
dependencies = [
("authentik_core", "0017_managed"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("authentik_policies", "0004_policy_execution_logging"),
]
operations = [
migrations.AddField(
model_name="policybinding",
name="group",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="authentik_core.group",
),
),
migrations.AddField(
model_name="policybinding",
name="user",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="policybinding",
name="policy",
field=authentik.lib.models.InheritanceForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="authentik_policies.policy",
),
),
migrations.RunPython(migrate_from_groupmembership),
]
| true | true |
f73ecacbe4cb787e06512814164b21187d010431 | 1,347 | py | Python | alluvian/commands/cmd/look.py | rparker-indeed/mudserver | 70a6227af3e977ecda2fc4a1752dd4703f206778 | [
"MIT"
] | 2 | 2020-09-22T23:02:44.000Z | 2021-07-12T02:50:55.000Z | alluvian/commands/cmd/look.py | rparker-indeed/mudserver | 70a6227af3e977ecda2fc4a1752dd4703f206778 | [
"MIT"
] | 24 | 2020-09-08T19:15:24.000Z | 2020-09-20T22:05:32.000Z | alluvian/commands/cmd/look.py | rparker-indeed/alluvian-engine | 70a6227af3e977ecda2fc4a1752dd4703f206778 | [
"MIT"
] | null | null | null | from alluvian.commands.mud_command import MudCommand
import alluvian.globals as glob
from util.colors import Colors
from util.asciimap import show_map
class Look(MudCommand):
key = 'look'
aliases = ['l', 'loo']
def execute(self):
user = glob.sessions[self.actor]
msg = f'{Colors.fg.BCYAN}{user.player.room.name}{Colors.style.RESET_ALL}\r\n' \
f'{Colors.fg.CYAN}{user.player.room.description}{Colors.style.RESET_ALL}\r\n'
msg += show_map(self.room) + '\r\n'
# Get all players that are not the current player.
for connection_id, player in self.get_players_in_room().items():
if connection_id != self.actor:
msg += f'{Colors.fg.BGREEN}{player.name} is standing here.{Colors.style.RESET_ALL}\r\n'
# Get Exits
msg += Colors.fg.BBLUE
msg += 'Obvious Exits\r\n'
if not self.room.has_exits():
msg += f'{Colors.fg.BWHITE}None.'
else:
exits = [att for att in dir(self.room) if att.startswith('exit_')]
for exit in exits:
if getattr(self.room, exit):
direction = exit.replace("exit_", "")
msg += f'{direction}\t - {glob.rooms[getattr(self.room, exit)].name}\r\n'
msg += Colors.style.RESET_ALL
self.msg(msg)
| 34.538462 | 103 | 0.593912 | from alluvian.commands.mud_command import MudCommand
import alluvian.globals as glob
from util.colors import Colors
from util.asciimap import show_map
class Look(MudCommand):
key = 'look'
aliases = ['l', 'loo']
def execute(self):
user = glob.sessions[self.actor]
msg = f'{Colors.fg.BCYAN}{user.player.room.name}{Colors.style.RESET_ALL}\r\n' \
f'{Colors.fg.CYAN}{user.player.room.description}{Colors.style.RESET_ALL}\r\n'
msg += show_map(self.room) + '\r\n'
for connection_id, player in self.get_players_in_room().items():
if connection_id != self.actor:
msg += f'{Colors.fg.BGREEN}{player.name} is standing here.{Colors.style.RESET_ALL}\r\n'
msg += Colors.fg.BBLUE
msg += 'Obvious Exits\r\n'
if not self.room.has_exits():
msg += f'{Colors.fg.BWHITE}None.'
else:
exits = [att for att in dir(self.room) if att.startswith('exit_')]
for exit in exits:
if getattr(self.room, exit):
direction = exit.replace("exit_", "")
msg += f'{direction}\t - {glob.rooms[getattr(self.room, exit)].name}\r\n'
msg += Colors.style.RESET_ALL
self.msg(msg)
| true | true |
f73ecad5273b1dcbd0d8a76ed4c009ba8b1403bf | 120 | py | Python | nodenet/python/nodenet/interface/console/__init__.py | NOOXY-research/NodeNet | 8bf7e0c2fd0e4fae4a51b2900014004728f3c935 | [
"Apache-2.0"
] | 2 | 2018-01-31T05:52:23.000Z | 2020-08-07T19:14:18.000Z | nodenet/python/nodenet/interface/console/__init__.py | NOOXY-research/NodeNet | 8bf7e0c2fd0e4fae4a51b2900014004728f3c935 | [
"Apache-2.0"
] | 1 | 2017-11-22T09:39:50.000Z | 2017-11-22T09:39:50.000Z | nodenet/python/nodenet/interface/console/__init__.py | magneticchen/NodeNet | 8bf7e0c2fd0e4fae4a51b2900014004728f3c935 | [
"Apache-2.0"
] | null | null | null | # Create alias
from nodenet.interface.console.commons import *
from nodenet.interface.console.neuralneteditor import *
| 24 | 55 | 0.825 |
from nodenet.interface.console.commons import *
from nodenet.interface.console.neuralneteditor import *
| true | true |
f73ecb27ebebf2c25ca51870ce539bc4c932b442 | 221 | py | Python | angola_erp/rent_a_car/doctype/vehicle_lastmile/test_vehicle_lastmile.py | smehata/angola_erp | 51614992709476e353aef1c03099d78f2a7cedb2 | [
"MIT"
] | 4 | 2019-06-12T06:54:10.000Z | 2021-08-28T06:07:42.000Z | angola_erp/rent_a_car/doctype/vehicle_lastmile/test_vehicle_lastmile.py | proenterprise/angola_erp | 1c171362b132e567390cf702e6ebd72577297cdf | [
"MIT"
] | 4 | 2017-08-24T17:33:45.000Z | 2017-09-24T16:54:01.000Z | angola_erp/rent_a_car/doctype/vehicle_lastmile/test_vehicle_lastmile.py | proenterprise/angola_erp | 1c171362b132e567390cf702e6ebd72577297cdf | [
"MIT"
] | 4 | 2018-02-10T21:08:10.000Z | 2021-08-28T06:08:11.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Helio de Jesus and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestVehicle_lastmile(unittest.TestCase):
pass
| 20.090909 | 53 | 0.773756 |
from __future__ import unicode_literals
import frappe
import unittest
class TestVehicle_lastmile(unittest.TestCase):
pass
| true | true |
f73eccca1f6451c4287340924638a57533a180b5 | 5,938 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/opaque_f86f4a4822065deb46f8e3927f1f473f.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/opaque_f86f4a4822065deb46f8e3927f1f473f.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/opaque_f86f4a4822065deb46f8e3927f1f473f.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Opaque(Base):
"""
The Opaque class encapsulates a list of opaque resources that are managed by the system.
A list of resources can be retrieved from the server using the Opaque.find() method.
"""
__slots__ = ()
_SDM_NAME = 'opaque'
_SDM_ATT_MAP = {
'EnableRouterTlv': 'enableRouterTlv',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Opaque, self).__init__(parent, list_op)
@property
def LinkTlv(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linktlv_452cab99b16a3494d6169df873b31fc6.LinkTlv): An instance of the LinkTlv class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linktlv_452cab99b16a3494d6169df873b31fc6 import LinkTlv
if self._properties.get('LinkTlv', None) is not None:
return self._properties.get('LinkTlv')
else:
return LinkTlv(self)
@property
def RouterTlv(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.routertlv_7bd1e801f928228f94fc1e60463de9a3.RouterTlv): An instance of the RouterTlv class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.routertlv_7bd1e801f928228f94fc1e60463de9a3 import RouterTlv
if self._properties.get('RouterTlv', None) is not None:
return self._properties.get('RouterTlv')
else:
return RouterTlv(self)
@property
def EnableRouterTlv(self):
# type: () -> bool
"""
Returns
-------
- bool:
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableRouterTlv'])
@EnableRouterTlv.setter
def EnableRouterTlv(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableRouterTlv'], value)
def update(self, EnableRouterTlv=None):
# type: (bool) -> Opaque
"""Updates opaque resource on the server.
Args
----
- EnableRouterTlv (bool):
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableRouterTlv=None):
# type: (bool) -> Opaque
"""Adds a new opaque resource on the json, only valid with config assistant
Args
----
- EnableRouterTlv (bool):
Returns
-------
- self: This instance with all currently retrieved opaque resources using find and the newly added opaque resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, EnableRouterTlv=None):
# type: (bool) -> Opaque
"""Finds and retrieves opaque resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve opaque resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all opaque resources from the server.
Args
----
- EnableRouterTlv (bool):
Returns
-------
- self: This instance with matching opaque resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of opaque data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the opaque resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 36.429448 | 168 | 0.665712 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Opaque(Base):
__slots__ = ()
_SDM_NAME = 'opaque'
_SDM_ATT_MAP = {
'EnableRouterTlv': 'enableRouterTlv',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Opaque, self).__init__(parent, list_op)
@property
def LinkTlv(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linktlv_452cab99b16a3494d6169df873b31fc6 import LinkTlv
if self._properties.get('LinkTlv', None) is not None:
return self._properties.get('LinkTlv')
else:
return LinkTlv(self)
@property
def RouterTlv(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.routertlv_7bd1e801f928228f94fc1e60463de9a3 import RouterTlv
if self._properties.get('RouterTlv', None) is not None:
return self._properties.get('RouterTlv')
else:
return RouterTlv(self)
@property
def EnableRouterTlv(self):
return self._get_attribute(self._SDM_ATT_MAP['EnableRouterTlv'])
@EnableRouterTlv.setter
def EnableRouterTlv(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableRouterTlv'], value)
def update(self, EnableRouterTlv=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableRouterTlv=None):
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, EnableRouterTlv=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
| true | true |
f73ecdd06eb906b293865cb023a4ce0a095ae660 | 1,624 | py | Python | 3. data_structures/linked_list/linked_lists_circular.py | sourcery-ai-bot/udacity-datastructures-algorithms | c3dfdb8db744540c03a69f4abf833a0fa1f0a345 | [
"MIT"
] | 3 | 2021-05-30T17:21:17.000Z | 2021-09-12T15:18:17.000Z | 3. data_structures/linked_list/linked_lists_circular.py | sourcery-ai-bot/udacity-datastructures-algorithms | c3dfdb8db744540c03a69f4abf833a0fa1f0a345 | [
"MIT"
] | null | null | null | 3. data_structures/linked_list/linked_lists_circular.py | sourcery-ai-bot/udacity-datastructures-algorithms | c3dfdb8db744540c03a69f4abf833a0fa1f0a345 | [
"MIT"
] | 1 | 2021-09-12T15:18:18.000Z | 2021-09-12T15:18:18.000Z | class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, init_list: list = None):
self.head = None
if init_list:
for value in init_list:
self.append(value)
def append(self, value):
if self.head is None:
self.head = Node(value)
return
# Move to the tail (the last node)
node = self.head
while node.next:
node = node.next
node.next = Node(value)
return
list_with_loop = LinkedList([2, -1, 3, 0, 5])
# Creating a loop where the last node points back to the second node
loop_start = list_with_loop.head.next
node = list_with_loop.head
while node.next:
node = node.next
node.next = loop_start
def iscircular(linked_list):
"""
Determine whether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
"""
if linked_list.head is None:
return False
slow = linked_list.head
fast = linked_list.head
# doing just while fast, you can reach the end and hit an attribution error
while fast and fast.next:
# slow pointer moves one node
slow = slow.next
# fast pointer moves two nodes
fast = fast.next.next
if slow == fast:
return True
# If we get to a node where fast doesn't have a next node or doesn't exist itself,
# the list has an end and isn't circular
return False
| 24.606061 | 86 | 0.616379 | class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, init_list: list = None):
self.head = None
if init_list:
for value in init_list:
self.append(value)
def append(self, value):
if self.head is None:
self.head = Node(value)
return
node = self.head
while node.next:
node = node.next
node.next = Node(value)
return
list_with_loop = LinkedList([2, -1, 3, 0, 5])
loop_start = list_with_loop.head.next
node = list_with_loop.head
while node.next:
node = node.next
node.next = loop_start
def iscircular(linked_list):
if linked_list.head is None:
return False
slow = linked_list.head
fast = linked_list.head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
| true | true |
f73ece18807f3472e1a42d321949f3a118b450c1 | 7,057 | py | Python | lamb/nascell.py | jkkummerfeld/lamb | 769adaa76394dc74746c2fd8d31afe9c3ca7895b | [
"Apache-2.0"
] | 130 | 2020-01-29T13:50:34.000Z | 2022-02-21T01:24:28.000Z | lamb/nascell.py | jkkummerfeld/lamb | 769adaa76394dc74746c2fd8d31afe9c3ca7895b | [
"Apache-2.0"
] | 10 | 2020-02-06T17:15:00.000Z | 2021-06-13T07:25:55.000Z | lamb/nascell.py | jkkummerfeld/lamb | 769adaa76394dc74746c2fd8d31afe9c3ca7895b | [
"Apache-2.0"
] | 25 | 2020-01-30T13:20:20.000Z | 2022-03-07T07:17:55.000Z | # Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""rnn_cell.NASCell adapted to support transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class NASCell(tf.nn.rnn_cell.RNNCell):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
Barret Zoph and Quoc V. Le.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
"""
def __init__(self, num_units, num_proj=None,
use_biases=False, reuse=None,
initializer=None,
input_transform=None,
state_transform=None,
update_transform=None):
"""Initialize the parameters for a NAS cell.
Args:
num_units: int, The number of units in the NAS cell
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
use_biases: (optional) bool, If True then use biases within the cell. This
is False by default.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
initializer: Initializer for the variables.
input_transform: None, or a function of one argument that
massages the input in some way. For example, variational
dropout can be implemted by passing a Dropout object here.
state_transform: Similar to input_transform, this is
applied to the recurrent state.
update_transform: Similar to input_transform, this is
applied to the proposed update ('j').
"""
super(NASCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._num_proj = num_proj
self._use_biases = use_biases
self._reuse = reuse
if num_proj is not None:
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._initializer = initializer
self._input_transform = input_transform
self._state_transform = state_transform
assert update_transform is None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of NAS Cell.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: This must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
NAS Cell after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of NAS Cell after reading `inputs`
when the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = tf.sigmoid
tanh = tf.tanh
relu = tf.nn.relu
num_proj = self._num_units if self._num_proj is None else self._num_proj
def maybe_transform(transform, x):
if transform is None:
return x
else:
return transform(x)
(c_prev, m_prev) = state
m_prev = maybe_transform(self._state_transform, m_prev)
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
inputs = maybe_transform(self._input_transform, inputs)
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# Variables for the NAS cell. W_m is all matrices multiplying the
# hiddenstate and W_inputs is all matrices multiplying the inputs.
concat_w_m = tf.get_variable(
"recurrent_kernel", [num_proj, 8 * self._num_units],
initializer=self._initializer, dtype=dtype)
concat_w_inputs = tf.get_variable(
"kernel", [input_size.value, 8 * self._num_units],
initializer=self._initializer, dtype=dtype)
m_matrix = tf.matmul(m_prev, concat_w_m)
inputs_matrix = tf.matmul(inputs, concat_w_inputs)
if self._use_biases:
b = tf.get_variable(
"bias",
shape=[8 * self._num_units],
initializer=tf.zeros_initializer(),
dtype=dtype)
m_matrix = tf.nn.bias_add(m_matrix, b)
# The NAS cell branches into 8 different splits for both the hiddenstate
# and the input
m_matrix_splits = tf.split(axis=1, num_or_size_splits=8,
value=m_matrix)
inputs_matrix_splits = tf.split(axis=1, num_or_size_splits=8,
value=inputs_matrix)
# First layer
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
# Second layer
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
# Inject the cell
l2_0 = tanh(l2_0 + c_prev)
# Third layer
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre # create new cell
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
# Final layer
new_m = tanh(l3_0 * l3_1)
# Projection layer if specified
if self._num_proj is not None:
concat_w_proj = tf.get_variable(
"projection_weights", [self._num_units, self._num_proj],
dtype)
new_m = tf.matmul(new_m, concat_w_proj)
new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_m)
return new_m, new_state
| 36.189744 | 80 | 0.680176 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class NASCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, num_units, num_proj=None,
use_biases=False, reuse=None,
initializer=None,
input_transform=None,
state_transform=None,
update_transform=None):
super(NASCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._num_proj = num_proj
self._use_biases = use_biases
self._reuse = reuse
if num_proj is not None:
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._initializer = initializer
self._input_transform = input_transform
self._state_transform = state_transform
assert update_transform is None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
sigmoid = tf.sigmoid
tanh = tf.tanh
relu = tf.nn.relu
num_proj = self._num_units if self._num_proj is None else self._num_proj
def maybe_transform(transform, x):
if transform is None:
return x
else:
return transform(x)
(c_prev, m_prev) = state
m_prev = maybe_transform(self._state_transform, m_prev)
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
inputs = maybe_transform(self._input_transform, inputs)
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
concat_w_m = tf.get_variable(
"recurrent_kernel", [num_proj, 8 * self._num_units],
initializer=self._initializer, dtype=dtype)
concat_w_inputs = tf.get_variable(
"kernel", [input_size.value, 8 * self._num_units],
initializer=self._initializer, dtype=dtype)
m_matrix = tf.matmul(m_prev, concat_w_m)
inputs_matrix = tf.matmul(inputs, concat_w_inputs)
if self._use_biases:
b = tf.get_variable(
"bias",
shape=[8 * self._num_units],
initializer=tf.zeros_initializer(),
dtype=dtype)
m_matrix = tf.nn.bias_add(m_matrix, b)
m_matrix_splits = tf.split(axis=1, num_or_size_splits=8,
value=m_matrix)
inputs_matrix_splits = tf.split(axis=1, num_or_size_splits=8,
value=inputs_matrix)
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
l2_0 = tanh(l2_0 + c_prev)
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
new_m = tanh(l3_0 * l3_1)
if self._num_proj is not None:
concat_w_proj = tf.get_variable(
"projection_weights", [self._num_units, self._num_proj],
dtype)
new_m = tf.matmul(new_m, concat_w_proj)
new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_m)
return new_m, new_state
| true | true |
f73ece756e563abcdc1083081570082d648bf77e | 304 | py | Python | main/routes/__init__.py | ohioh/ohioh_Framework_Cluster_1_Flask | 7e4343d48e84fb98593da4baacbcae595722f766 | [
"Apache-2.0"
] | 1 | 2020-08-11T18:37:35.000Z | 2020-08-11T18:37:35.000Z | main/routes/__init__.py | ohioh/ohioh_Framework_Cluster_1_Flask | 7e4343d48e84fb98593da4baacbcae595722f766 | [
"Apache-2.0"
] | null | null | null | main/routes/__init__.py | ohioh/ohioh_Framework_Cluster_1_Flask | 7e4343d48e84fb98593da4baacbcae595722f766 | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint
from .users import UserList, User
from flask_restful import Api
from .beat import Beat
api_bp = Blueprint('api', __name__)
api = Api(api_bp, prefix='/ohioh/api/v1')
api.add_resource(Beat, '/')
api.add_resource(UserList, '/users')
api.add_resource(User, '/users/<user_id>')
| 23.384615 | 42 | 0.746711 | from flask import Blueprint
from .users import UserList, User
from flask_restful import Api
from .beat import Beat
api_bp = Blueprint('api', __name__)
api = Api(api_bp, prefix='/ohioh/api/v1')
api.add_resource(Beat, '/')
api.add_resource(UserList, '/users')
api.add_resource(User, '/users/<user_id>')
| true | true |
f73ed01fac9c6ea3af473a0a56e63ee296ecd3a6 | 978 | py | Python | ivy/functional/backends/tensorflow/array_api/linear_algebra.py | sert121/ivy | 286f86e487b0c83d46a3ef8d30aa96316337db32 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/tensorflow/array_api/linear_algebra.py | sert121/ivy | 286f86e487b0c83d46a3ef8d30aa96316337db32 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/tensorflow/array_api/linear_algebra.py | sert121/ivy | 286f86e487b0c83d46a3ef8d30aa96316337db32 | [
"Apache-2.0"
] | null | null | null | # global
import tensorflow as tf
from tensorflow.python.types.core import Tensor
from typing import Union, Optional, Tuple, Literal
# local
from ivy import inf
# noinspection PyUnusedLocal,PyShadowingBuiltins
def vector_norm(x: Tensor,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, - inf]] = 2)\
-> Tensor:
if ord == -float('inf'):
tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims)
elif ord == -1:
tn_normalized_vector = tf.reduce_sum(tf.abs(x)**ord, axis, keepdims)**(1./ord)
elif ord == 0:
tn_normalized_vector = tf.reduce_sum(tf.cast(x != 0, 'float32'), axis, keepdims).numpy()
else:
tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims)
if tn_normalized_vector.shape == tuple():
return tf.expand_dims(tn_normalized_vector, 0)
return tn_normalized_vector
| 31.548387 | 96 | 0.643149 |
import tensorflow as tf
from tensorflow.python.types.core import Tensor
from typing import Union, Optional, Tuple, Literal
from ivy import inf
def vector_norm(x: Tensor,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, - inf]] = 2)\
-> Tensor:
if ord == -float('inf'):
tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims)
elif ord == -1:
tn_normalized_vector = tf.reduce_sum(tf.abs(x)**ord, axis, keepdims)**(1./ord)
elif ord == 0:
tn_normalized_vector = tf.reduce_sum(tf.cast(x != 0, 'float32'), axis, keepdims).numpy()
else:
tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims)
if tn_normalized_vector.shape == tuple():
return tf.expand_dims(tn_normalized_vector, 0)
return tn_normalized_vector
| true | true |
f73ed21e930460ee4ea180f65f931421c418acab | 23,530 | py | Python | cart_venv/Lib/site-packages/tensorboard/compat/proto/event_pb2.py | juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow- | 654be60f7986ac9bb7ce1d080ddee377c3389f93 | [
"MIT"
] | null | null | null | cart_venv/Lib/site-packages/tensorboard/compat/proto/event_pb2.py | juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow- | 654be60f7986ac9bb7ce1d080ddee377c3389f93 | [
"MIT"
] | null | null | null | cart_venv/Lib/site-packages/tensorboard/compat/proto/event_pb2.py | juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow- | 654be60f7986ac9bb7ce1d080ddee377c3389f93 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorboard/compat/proto/event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorboard.compat.proto import summary_pb2 as tensorboard_dot_compat_dot_proto_dot_summary__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorboard/compat/proto/event.proto',
package='tensorboard',
syntax='proto3',
serialized_options=_b('\n\023org.tensorflow.utilB\013EventProtosP\001\370\001\001'),
serialized_pb=_b('\n$tensorboard/compat/proto/event.proto\x12\x0btensorboard\x1a&tensorboard/compat/proto/summary.proto\"\xbf\x02\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x01(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x16\n\x0c\x66ile_version\x18\x03 \x01(\tH\x00\x12\x13\n\tgraph_def\x18\x04 \x01(\x0cH\x00\x12\'\n\x07summary\x18\x05 \x01(\x0b\x32\x14.tensorboard.SummaryH\x00\x12.\n\x0blog_message\x18\x06 \x01(\x0b\x32\x17.tensorboard.LogMessageH\x00\x12.\n\x0bsession_log\x18\x07 \x01(\x0b\x32\x17.tensorboard.SessionLogH\x00\x12=\n\x13tagged_run_metadata\x18\x08 \x01(\x0b\x32\x1e.tensorboard.TaggedRunMetadataH\x00\x12\x18\n\x0emeta_graph_def\x18\t \x01(\x0cH\x00\x42\x06\n\x04what\"\x9a\x01\n\nLogMessage\x12,\n\x05level\x18\x01 \x01(\x0e\x32\x1d.tensorboard.LogMessage.Level\x12\x0f\n\x07message\x18\x02 \x01(\t\"M\n\x05Level\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tDEBUGGING\x10\n\x12\x08\n\x04INFO\x10\x14\x12\x08\n\x04WARN\x10\x1e\x12\t\n\x05\x45RROR\x10(\x12\t\n\x05\x46\x41TAL\x10\x32\"\xb7\x01\n\nSessionLog\x12\x35\n\x06status\x18\x01 \x01(\x0e\x32%.tensorboard.SessionLog.SessionStatus\x12\x17\n\x0f\x63heckpoint_path\x18\x02 \x01(\t\x12\x0b\n\x03msg\x18\x03 \x01(\t\"L\n\rSessionStatus\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\t\n\x05START\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\x0e\n\nCHECKPOINT\x10\x03\"6\n\x11TaggedRunMetadata\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x14\n\x0crun_metadata\x18\x02 \x01(\x0c\"$\n\x0eWatchdogConfig\x12\x12\n\ntimeout_ms\x18\x01 \x01(\x03\"\x86\x01\n\x16WorkerHeartbeatRequest\x12\x36\n\rshutdown_mode\x18\x01 \x01(\x0e\x32\x1f.tensorboard.WorkerShutdownMode\x12\x34\n\x0fwatchdog_config\x18\x02 \x01(\x0b\x32\x1b.tensorboard.WatchdogConfig\"\x85\x01\n\x17WorkerHeartbeatResponse\x12\x30\n\rhealth_status\x18\x01 \x01(\x0e\x32\x19.tensorboard.WorkerHealth\x12&\n\nworker_log\x18\x02 \x03(\x0b\x32\x12.tensorboard.Event\x12\x10\n\x08hostname\x18\x03 \x01(\t*[\n\x0cWorkerHealth\x12\x06\n\x02OK\x10\x00\x12\x1c\n\x18RECEIVED_SHUTDOWN_SIGNAL\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\x11\n\rSHUTTING_DOWN\x10\x03*k\n\x12WorkerShutdownMode\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x12\n\x0eNOT_CONFIGURED\x10\x01\x12\x18\n\x14WAIT_FOR_COORDINATOR\x10\x02\x12\x1a\n\x16SHUTDOWN_AFTER_TIMEOUT\x10\x03\x42\'\n\x13org.tensorflow.utilB\x0b\x45ventProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorboard_dot_compat_dot_proto_dot_summary__pb2.DESCRIPTOR,])
_WORKERHEALTH = _descriptor.EnumDescriptor(
name='WorkerHealth',
full_name='tensorboard.WorkerHealth',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECEIVED_SHUTDOWN_SIGNAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUTTING_DOWN', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1125,
serialized_end=1216,
)
_sym_db.RegisterEnumDescriptor(_WORKERHEALTH)
WorkerHealth = enum_type_wrapper.EnumTypeWrapper(_WORKERHEALTH)
_WORKERSHUTDOWNMODE = _descriptor.EnumDescriptor(
name='WorkerShutdownMode',
full_name='tensorboard.WorkerShutdownMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_CONFIGURED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WAIT_FOR_COORDINATOR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUTDOWN_AFTER_TIMEOUT', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1218,
serialized_end=1325,
)
_sym_db.RegisterEnumDescriptor(_WORKERSHUTDOWNMODE)
WorkerShutdownMode = enum_type_wrapper.EnumTypeWrapper(_WORKERSHUTDOWNMODE)
OK = 0
RECEIVED_SHUTDOWN_SIGNAL = 1
INTERNAL_ERROR = 2
SHUTTING_DOWN = 3
DEFAULT = 0
NOT_CONFIGURED = 1
WAIT_FOR_COORDINATOR = 2
SHUTDOWN_AFTER_TIMEOUT = 3
_LOGMESSAGE_LEVEL = _descriptor.EnumDescriptor(
name='Level',
full_name='tensorboard.LogMessage.Level',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEBUGGING', index=1, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFO', index=2, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WARN', index=3, number=30,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=40,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL', index=5, number=50,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=493,
serialized_end=570,
)
_sym_db.RegisterEnumDescriptor(_LOGMESSAGE_LEVEL)
_SESSIONLOG_SESSIONSTATUS = _descriptor.EnumDescriptor(
name='SessionStatus',
full_name='tensorboard.SessionLog.SessionStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='START', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOP', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHECKPOINT', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=680,
serialized_end=756,
)
_sym_db.RegisterEnumDescriptor(_SESSIONLOG_SESSIONSTATUS)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='tensorboard.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wall_time', full_name='tensorboard.Event.wall_time', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='tensorboard.Event.step', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='file_version', full_name='tensorboard.Event.file_version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='graph_def', full_name='tensorboard.Event.graph_def', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='summary', full_name='tensorboard.Event.summary', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='log_message', full_name='tensorboard.Event.log_message', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='session_log', full_name='tensorboard.Event.session_log', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tagged_run_metadata', full_name='tensorboard.Event.tagged_run_metadata', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='meta_graph_def', full_name='tensorboard.Event.meta_graph_def', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='what', full_name='tensorboard.Event.what',
index=0, containing_type=None, fields=[]),
],
serialized_start=94,
serialized_end=413,
)
_LOGMESSAGE = _descriptor.Descriptor(
name='LogMessage',
full_name='tensorboard.LogMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level', full_name='tensorboard.LogMessage.level', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='tensorboard.LogMessage.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOGMESSAGE_LEVEL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=416,
serialized_end=570,
)
_SESSIONLOG = _descriptor.Descriptor(
name='SessionLog',
full_name='tensorboard.SessionLog',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='tensorboard.SessionLog.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_path', full_name='tensorboard.SessionLog.checkpoint_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='tensorboard.SessionLog.msg', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SESSIONLOG_SESSIONSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=573,
serialized_end=756,
)
_TAGGEDRUNMETADATA = _descriptor.Descriptor(
name='TaggedRunMetadata',
full_name='tensorboard.TaggedRunMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tag', full_name='tensorboard.TaggedRunMetadata.tag', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='run_metadata', full_name='tensorboard.TaggedRunMetadata.run_metadata', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=758,
serialized_end=812,
)
_WATCHDOGCONFIG = _descriptor.Descriptor(
name='WatchdogConfig',
full_name='tensorboard.WatchdogConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timeout_ms', full_name='tensorboard.WatchdogConfig.timeout_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=814,
serialized_end=850,
)
_WORKERHEARTBEATREQUEST = _descriptor.Descriptor(
name='WorkerHeartbeatRequest',
full_name='tensorboard.WorkerHeartbeatRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shutdown_mode', full_name='tensorboard.WorkerHeartbeatRequest.shutdown_mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='watchdog_config', full_name='tensorboard.WorkerHeartbeatRequest.watchdog_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=853,
serialized_end=987,
)
_WORKERHEARTBEATRESPONSE = _descriptor.Descriptor(
name='WorkerHeartbeatResponse',
full_name='tensorboard.WorkerHeartbeatResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='health_status', full_name='tensorboard.WorkerHeartbeatResponse.health_status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='worker_log', full_name='tensorboard.WorkerHeartbeatResponse.worker_log', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostname', full_name='tensorboard.WorkerHeartbeatResponse.hostname', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=990,
serialized_end=1123,
)
_EVENT.fields_by_name['summary'].message_type = tensorboard_dot_compat_dot_proto_dot_summary__pb2._SUMMARY
_EVENT.fields_by_name['log_message'].message_type = _LOGMESSAGE
_EVENT.fields_by_name['session_log'].message_type = _SESSIONLOG
_EVENT.fields_by_name['tagged_run_metadata'].message_type = _TAGGEDRUNMETADATA
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['file_version'])
_EVENT.fields_by_name['file_version'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['graph_def'])
_EVENT.fields_by_name['graph_def'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['summary'])
_EVENT.fields_by_name['summary'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['log_message'])
_EVENT.fields_by_name['log_message'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['session_log'])
_EVENT.fields_by_name['session_log'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['tagged_run_metadata'])
_EVENT.fields_by_name['tagged_run_metadata'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['meta_graph_def'])
_EVENT.fields_by_name['meta_graph_def'].containing_oneof = _EVENT.oneofs_by_name['what']
_LOGMESSAGE.fields_by_name['level'].enum_type = _LOGMESSAGE_LEVEL
_LOGMESSAGE_LEVEL.containing_type = _LOGMESSAGE
_SESSIONLOG.fields_by_name['status'].enum_type = _SESSIONLOG_SESSIONSTATUS
_SESSIONLOG_SESSIONSTATUS.containing_type = _SESSIONLOG
_WORKERHEARTBEATREQUEST.fields_by_name['shutdown_mode'].enum_type = _WORKERSHUTDOWNMODE
_WORKERHEARTBEATREQUEST.fields_by_name['watchdog_config'].message_type = _WATCHDOGCONFIG
_WORKERHEARTBEATRESPONSE.fields_by_name['health_status'].enum_type = _WORKERHEALTH
_WORKERHEARTBEATRESPONSE.fields_by_name['worker_log'].message_type = _EVENT
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['LogMessage'] = _LOGMESSAGE
DESCRIPTOR.message_types_by_name['SessionLog'] = _SESSIONLOG
DESCRIPTOR.message_types_by_name['TaggedRunMetadata'] = _TAGGEDRUNMETADATA
DESCRIPTOR.message_types_by_name['WatchdogConfig'] = _WATCHDOGCONFIG
DESCRIPTOR.message_types_by_name['WorkerHeartbeatRequest'] = _WORKERHEARTBEATREQUEST
DESCRIPTOR.message_types_by_name['WorkerHeartbeatResponse'] = _WORKERHEARTBEATRESPONSE
DESCRIPTOR.enum_types_by_name['WorkerHealth'] = _WORKERHEALTH
DESCRIPTOR.enum_types_by_name['WorkerShutdownMode'] = _WORKERSHUTDOWNMODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.Event)
})
_sym_db.RegisterMessage(Event)
LogMessage = _reflection.GeneratedProtocolMessageType('LogMessage', (_message.Message,), {
'DESCRIPTOR' : _LOGMESSAGE,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.LogMessage)
})
_sym_db.RegisterMessage(LogMessage)
SessionLog = _reflection.GeneratedProtocolMessageType('SessionLog', (_message.Message,), {
'DESCRIPTOR' : _SESSIONLOG,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.SessionLog)
})
_sym_db.RegisterMessage(SessionLog)
TaggedRunMetadata = _reflection.GeneratedProtocolMessageType('TaggedRunMetadata', (_message.Message,), {
'DESCRIPTOR' : _TAGGEDRUNMETADATA,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.TaggedRunMetadata)
})
_sym_db.RegisterMessage(TaggedRunMetadata)
WatchdogConfig = _reflection.GeneratedProtocolMessageType('WatchdogConfig', (_message.Message,), {
'DESCRIPTOR' : _WATCHDOGCONFIG,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.WatchdogConfig)
})
_sym_db.RegisterMessage(WatchdogConfig)
WorkerHeartbeatRequest = _reflection.GeneratedProtocolMessageType('WorkerHeartbeatRequest', (_message.Message,), {
'DESCRIPTOR' : _WORKERHEARTBEATREQUEST,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.WorkerHeartbeatRequest)
})
_sym_db.RegisterMessage(WorkerHeartbeatRequest)
WorkerHeartbeatResponse = _reflection.GeneratedProtocolMessageType('WorkerHeartbeatResponse', (_message.Message,), {
'DESCRIPTOR' : _WORKERHEARTBEATRESPONSE,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.WorkerHeartbeatResponse)
})
_sym_db.RegisterMessage(WorkerHeartbeatResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.746622 | 2,343 | 0.755164 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from tensorboard.compat.proto import summary_pb2 as tensorboard_dot_compat_dot_proto_dot_summary__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorboard/compat/proto/event.proto',
package='tensorboard',
syntax='proto3',
serialized_options=_b('\n\023org.tensorflow.utilB\013EventProtosP\001\370\001\001'),
serialized_pb=_b('\n$tensorboard/compat/proto/event.proto\x12\x0btensorboard\x1a&tensorboard/compat/proto/summary.proto\"\xbf\x02\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x01(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x16\n\x0c\x66ile_version\x18\x03 \x01(\tH\x00\x12\x13\n\tgraph_def\x18\x04 \x01(\x0cH\x00\x12\'\n\x07summary\x18\x05 \x01(\x0b\x32\x14.tensorboard.SummaryH\x00\x12.\n\x0blog_message\x18\x06 \x01(\x0b\x32\x17.tensorboard.LogMessageH\x00\x12.\n\x0bsession_log\x18\x07 \x01(\x0b\x32\x17.tensorboard.SessionLogH\x00\x12=\n\x13tagged_run_metadata\x18\x08 \x01(\x0b\x32\x1e.tensorboard.TaggedRunMetadataH\x00\x12\x18\n\x0emeta_graph_def\x18\t \x01(\x0cH\x00\x42\x06\n\x04what\"\x9a\x01\n\nLogMessage\x12,\n\x05level\x18\x01 \x01(\x0e\x32\x1d.tensorboard.LogMessage.Level\x12\x0f\n\x07message\x18\x02 \x01(\t\"M\n\x05Level\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tDEBUGGING\x10\n\x12\x08\n\x04INFO\x10\x14\x12\x08\n\x04WARN\x10\x1e\x12\t\n\x05\x45RROR\x10(\x12\t\n\x05\x46\x41TAL\x10\x32\"\xb7\x01\n\nSessionLog\x12\x35\n\x06status\x18\x01 \x01(\x0e\x32%.tensorboard.SessionLog.SessionStatus\x12\x17\n\x0f\x63heckpoint_path\x18\x02 \x01(\t\x12\x0b\n\x03msg\x18\x03 \x01(\t\"L\n\rSessionStatus\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\t\n\x05START\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\x0e\n\nCHECKPOINT\x10\x03\"6\n\x11TaggedRunMetadata\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x14\n\x0crun_metadata\x18\x02 \x01(\x0c\"$\n\x0eWatchdogConfig\x12\x12\n\ntimeout_ms\x18\x01 \x01(\x03\"\x86\x01\n\x16WorkerHeartbeatRequest\x12\x36\n\rshutdown_mode\x18\x01 \x01(\x0e\x32\x1f.tensorboard.WorkerShutdownMode\x12\x34\n\x0fwatchdog_config\x18\x02 \x01(\x0b\x32\x1b.tensorboard.WatchdogConfig\"\x85\x01\n\x17WorkerHeartbeatResponse\x12\x30\n\rhealth_status\x18\x01 \x01(\x0e\x32\x19.tensorboard.WorkerHealth\x12&\n\nworker_log\x18\x02 \x03(\x0b\x32\x12.tensorboard.Event\x12\x10\n\x08hostname\x18\x03 \x01(\t*[\n\x0cWorkerHealth\x12\x06\n\x02OK\x10\x00\x12\x1c\n\x18RECEIVED_SHUTDOWN_SIGNAL\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\x11\n\rSHUTTING_DOWN\x10\x03*k\n\x12WorkerShutdownMode\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x12\n\x0eNOT_CONFIGURED\x10\x01\x12\x18\n\x14WAIT_FOR_COORDINATOR\x10\x02\x12\x1a\n\x16SHUTDOWN_AFTER_TIMEOUT\x10\x03\x42\'\n\x13org.tensorflow.utilB\x0b\x45ventProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorboard_dot_compat_dot_proto_dot_summary__pb2.DESCRIPTOR,])
_WORKERHEALTH = _descriptor.EnumDescriptor(
name='WorkerHealth',
full_name='tensorboard.WorkerHealth',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECEIVED_SHUTDOWN_SIGNAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUTTING_DOWN', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1125,
serialized_end=1216,
)
_sym_db.RegisterEnumDescriptor(_WORKERHEALTH)
WorkerHealth = enum_type_wrapper.EnumTypeWrapper(_WORKERHEALTH)
_WORKERSHUTDOWNMODE = _descriptor.EnumDescriptor(
name='WorkerShutdownMode',
full_name='tensorboard.WorkerShutdownMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_CONFIGURED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WAIT_FOR_COORDINATOR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUTDOWN_AFTER_TIMEOUT', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1218,
serialized_end=1325,
)
_sym_db.RegisterEnumDescriptor(_WORKERSHUTDOWNMODE)
WorkerShutdownMode = enum_type_wrapper.EnumTypeWrapper(_WORKERSHUTDOWNMODE)
OK = 0
RECEIVED_SHUTDOWN_SIGNAL = 1
INTERNAL_ERROR = 2
SHUTTING_DOWN = 3
DEFAULT = 0
NOT_CONFIGURED = 1
WAIT_FOR_COORDINATOR = 2
SHUTDOWN_AFTER_TIMEOUT = 3
_LOGMESSAGE_LEVEL = _descriptor.EnumDescriptor(
name='Level',
full_name='tensorboard.LogMessage.Level',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEBUGGING', index=1, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFO', index=2, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WARN', index=3, number=30,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=40,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL', index=5, number=50,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=493,
serialized_end=570,
)
_sym_db.RegisterEnumDescriptor(_LOGMESSAGE_LEVEL)
_SESSIONLOG_SESSIONSTATUS = _descriptor.EnumDescriptor(
name='SessionStatus',
full_name='tensorboard.SessionLog.SessionStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='START', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOP', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHECKPOINT', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=680,
serialized_end=756,
)
_sym_db.RegisterEnumDescriptor(_SESSIONLOG_SESSIONSTATUS)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='tensorboard.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wall_time', full_name='tensorboard.Event.wall_time', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='tensorboard.Event.step', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='file_version', full_name='tensorboard.Event.file_version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='graph_def', full_name='tensorboard.Event.graph_def', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='summary', full_name='tensorboard.Event.summary', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='log_message', full_name='tensorboard.Event.log_message', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='session_log', full_name='tensorboard.Event.session_log', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tagged_run_metadata', full_name='tensorboard.Event.tagged_run_metadata', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='meta_graph_def', full_name='tensorboard.Event.meta_graph_def', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='what', full_name='tensorboard.Event.what',
index=0, containing_type=None, fields=[]),
],
serialized_start=94,
serialized_end=413,
)
_LOGMESSAGE = _descriptor.Descriptor(
name='LogMessage',
full_name='tensorboard.LogMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level', full_name='tensorboard.LogMessage.level', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='tensorboard.LogMessage.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOGMESSAGE_LEVEL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=416,
serialized_end=570,
)
_SESSIONLOG = _descriptor.Descriptor(
name='SessionLog',
full_name='tensorboard.SessionLog',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='tensorboard.SessionLog.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_path', full_name='tensorboard.SessionLog.checkpoint_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='tensorboard.SessionLog.msg', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SESSIONLOG_SESSIONSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=573,
serialized_end=756,
)
_TAGGEDRUNMETADATA = _descriptor.Descriptor(
name='TaggedRunMetadata',
full_name='tensorboard.TaggedRunMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tag', full_name='tensorboard.TaggedRunMetadata.tag', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='run_metadata', full_name='tensorboard.TaggedRunMetadata.run_metadata', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=758,
serialized_end=812,
)
_WATCHDOGCONFIG = _descriptor.Descriptor(
name='WatchdogConfig',
full_name='tensorboard.WatchdogConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timeout_ms', full_name='tensorboard.WatchdogConfig.timeout_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=814,
serialized_end=850,
)
_WORKERHEARTBEATREQUEST = _descriptor.Descriptor(
name='WorkerHeartbeatRequest',
full_name='tensorboard.WorkerHeartbeatRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shutdown_mode', full_name='tensorboard.WorkerHeartbeatRequest.shutdown_mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='watchdog_config', full_name='tensorboard.WorkerHeartbeatRequest.watchdog_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=853,
serialized_end=987,
)
_WORKERHEARTBEATRESPONSE = _descriptor.Descriptor(
name='WorkerHeartbeatResponse',
full_name='tensorboard.WorkerHeartbeatResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='health_status', full_name='tensorboard.WorkerHeartbeatResponse.health_status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='worker_log', full_name='tensorboard.WorkerHeartbeatResponse.worker_log', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostname', full_name='tensorboard.WorkerHeartbeatResponse.hostname', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=990,
serialized_end=1123,
)
_EVENT.fields_by_name['summary'].message_type = tensorboard_dot_compat_dot_proto_dot_summary__pb2._SUMMARY
_EVENT.fields_by_name['log_message'].message_type = _LOGMESSAGE
_EVENT.fields_by_name['session_log'].message_type = _SESSIONLOG
_EVENT.fields_by_name['tagged_run_metadata'].message_type = _TAGGEDRUNMETADATA
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['file_version'])
_EVENT.fields_by_name['file_version'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['graph_def'])
_EVENT.fields_by_name['graph_def'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['summary'])
_EVENT.fields_by_name['summary'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['log_message'])
_EVENT.fields_by_name['log_message'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['session_log'])
_EVENT.fields_by_name['session_log'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['tagged_run_metadata'])
_EVENT.fields_by_name['tagged_run_metadata'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['meta_graph_def'])
_EVENT.fields_by_name['meta_graph_def'].containing_oneof = _EVENT.oneofs_by_name['what']
_LOGMESSAGE.fields_by_name['level'].enum_type = _LOGMESSAGE_LEVEL
_LOGMESSAGE_LEVEL.containing_type = _LOGMESSAGE
_SESSIONLOG.fields_by_name['status'].enum_type = _SESSIONLOG_SESSIONSTATUS
_SESSIONLOG_SESSIONSTATUS.containing_type = _SESSIONLOG
_WORKERHEARTBEATREQUEST.fields_by_name['shutdown_mode'].enum_type = _WORKERSHUTDOWNMODE
_WORKERHEARTBEATREQUEST.fields_by_name['watchdog_config'].message_type = _WATCHDOGCONFIG
_WORKERHEARTBEATRESPONSE.fields_by_name['health_status'].enum_type = _WORKERHEALTH
_WORKERHEARTBEATRESPONSE.fields_by_name['worker_log'].message_type = _EVENT
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['LogMessage'] = _LOGMESSAGE
DESCRIPTOR.message_types_by_name['SessionLog'] = _SESSIONLOG
DESCRIPTOR.message_types_by_name['TaggedRunMetadata'] = _TAGGEDRUNMETADATA
DESCRIPTOR.message_types_by_name['WatchdogConfig'] = _WATCHDOGCONFIG
DESCRIPTOR.message_types_by_name['WorkerHeartbeatRequest'] = _WORKERHEARTBEATREQUEST
DESCRIPTOR.message_types_by_name['WorkerHeartbeatResponse'] = _WORKERHEARTBEATRESPONSE
DESCRIPTOR.enum_types_by_name['WorkerHealth'] = _WORKERHEALTH
DESCRIPTOR.enum_types_by_name['WorkerShutdownMode'] = _WORKERSHUTDOWNMODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.Event)
})
_sym_db.RegisterMessage(Event)
LogMessage = _reflection.GeneratedProtocolMessageType('LogMessage', (_message.Message,), {
'DESCRIPTOR' : _LOGMESSAGE,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.LogMessage)
})
_sym_db.RegisterMessage(LogMessage)
SessionLog = _reflection.GeneratedProtocolMessageType('SessionLog', (_message.Message,), {
'DESCRIPTOR' : _SESSIONLOG,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.SessionLog)
})
_sym_db.RegisterMessage(SessionLog)
TaggedRunMetadata = _reflection.GeneratedProtocolMessageType('TaggedRunMetadata', (_message.Message,), {
'DESCRIPTOR' : _TAGGEDRUNMETADATA,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.TaggedRunMetadata)
})
_sym_db.RegisterMessage(TaggedRunMetadata)
WatchdogConfig = _reflection.GeneratedProtocolMessageType('WatchdogConfig', (_message.Message,), {
'DESCRIPTOR' : _WATCHDOGCONFIG,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.WatchdogConfig)
})
_sym_db.RegisterMessage(WatchdogConfig)
WorkerHeartbeatRequest = _reflection.GeneratedProtocolMessageType('WorkerHeartbeatRequest', (_message.Message,), {
'DESCRIPTOR' : _WORKERHEARTBEATREQUEST,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.WorkerHeartbeatRequest)
})
_sym_db.RegisterMessage(WorkerHeartbeatRequest)
WorkerHeartbeatResponse = _reflection.GeneratedProtocolMessageType('WorkerHeartbeatResponse', (_message.Message,), {
'DESCRIPTOR' : _WORKERHEARTBEATRESPONSE,
'__module__' : 'tensorboard.compat.proto.event_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.WorkerHeartbeatResponse)
})
_sym_db.RegisterMessage(WorkerHeartbeatResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f73ed3c3be2ef1739d2732a5f01a67362bcf79c0 | 6,410 | py | Python | feature_extraction.py | ddboline/kaggle_predict_west_nile | b4dbb3eed450beaf2b73d2a772e0fb3266926418 | [
"MIT"
] | null | null | null | feature_extraction.py | ddboline/kaggle_predict_west_nile | b4dbb3eed450beaf2b73d2a772e0fb3266926418 | [
"MIT"
] | null | null | null | feature_extraction.py | ddboline/kaggle_predict_west_nile | b4dbb3eed450beaf2b73d2a772e0fb3266926418 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 16:28:06 2015
@author: ddboline
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import csv
import gzip
import numpy as np
import pandas as pd
from dateutil.parser import parse
WEATHER_VARS_WITH_M_T = (u'Tmax', u'Tmin', u'Tavg', u'Depart', u'DewPoint',
u'WetBulb', u'Heat', u'Cool', u'Snowfall',
u'PrecipTotal', u'StnPressure', u'SeaLevel',
u'ResultSpeed', u'ResultDir', u'AvgSpeed', u'Water1')
WEATHER_PHENOMENA = ('BCFG', 'BLDU', 'BLSN', 'BR', 'DU', 'DZ', 'FG', 'FG+',
'FU', 'FZDZ', 'FZFG', 'FZRA', 'GR', 'GS', 'HZ', 'MIFG',
'PL', 'PRFG', 'RA', 'SG', 'SN', 'SQ', 'TS', 'TSRA',
'TSSN', 'UP', 'VCFG', 'VCTS')
def haversine_distance(lat1, lon1, lat2, lon2):
r_earth = 6371.
dlat = np.abs(lat1-lat2)*np.pi/180.
dlon = np.abs(lon1-lon2)*np.pi/180.
lat1 *= np.pi/180.
lat2 *= np.pi/180.
dist = 2. * r_earth * np.arcsin(
np.sqrt(
np.sin(dlat/2.)**2 +
np.cos(lat1) * np.cos(lat2) *
np.sin(dlon/2.)**2))
return dist
def lat_lon_box(lat, lon, dist):
r_earth = 6371.
d_2r = dist/(2.*r_earth)
dlat = 2. * (d_2r)
dlon = 2. * np.arcsin((np.sin(d_2r))/(np.cos(lat)))
dlat *= 180./np.pi
dlon *= 180./np.pi
return abs(dlat), abs(dlon)
def feature_extraction():
spray_df = pd.read_csv('spray.csv.gz', compression='gzip')
spray_lat_lon_list = []
for idx, row in spray_df.iterrows():
spray_lat_lon_list.append((row['Latitude'], row['Longitude']))
weather_features = []
cumu_labels = ('Tmax', 'Tmin', 'PrecipTotal')
cumu_features = {}
cumu_total = 0
current_year = -1
with gzip.open('weather.csv.gz', 'r') as wfile:
wcsv = csv.reader(wfile)
weather_labels = next(wcsv)
for row in wcsv:
rowdict = dict(zip(weather_labels, row))
rowdict['Date'] = parse(rowdict['Date'])
current_date = rowdict['Date']
if current_date.year != current_year:
current_year = current_date.year
cumu_features = {k: 0 for k in cumu_labels}
cumu_total = 0
for k in WEATHER_VARS_WITH_M_T:
if k in rowdict:
rowdict[k] = rowdict[k].replace('M', 'nan')
rowdict[k] = rowdict[k].replace('T', '0.0')
for k in rowdict:
if rowdict[k] == '-':
rowdict[k] = 'nan'
if type(rowdict[k]) == str:
rowdict[k] = rowdict[k].strip()
for ph in WEATHER_PHENOMENA:
rowdict['wp%s' % ph] = '0'
for ph in rowdict['CodeSum'].split():
if ph in WEATHER_PHENOMENA:
rowdict['wp%s' % ph] = '1'
for lab in cumu_labels:
_tmp = float(rowdict[lab])
if not np.isnan(_tmp):
cumu_features[lab] += _tmp
cumu_total += 1
for lab in ('Tmax', 'Tmin', 'PrecipTotal'):
rowdict['%s_cumu' % lab] = cumu_features[lab] / cumu_total
weather_features.append(rowdict)
# print('\n'.join(['%s: %s' % (k, rowdict[k]) for k in rowdict]))
# exit(0)
for ph in WEATHER_PHENOMENA:
weather_labels.append('wp%s' % ph)
for lab in cumu_labels:
weather_labels.append('%s_cumu' % lab)
for prefix in 'train', 'test':
with gzip.open('%s.csv.gz' % prefix, 'rb') as csvfile:
outfile = gzip.open('%s_full.csv.gz' % prefix, 'wb')
csv_reader = csv.reader(csvfile)
labels = next(csv_reader)
out_labels = labels +\
['n_spray_%d' % x for x in range(1,11)]
for lab in weather_labels:
if lab == 'Date':
continue
out_labels.append(lab)
csv_writer = csv.writer(outfile)
csv_writer.writerow(out_labels)
for idx, row in enumerate(csv_reader):
if idx % 1000 == 0:
print('processed %d' % idx)
# if idx > 100:
# exit(0)
row_dict = dict(zip(labels, row))
current_date = parse(row_dict['Date'])
cur_lat = float(row_dict['Latitude'])
cur_lon = float(row_dict['Longitude'])
for idx in range(1, 11):
row_dict['n_spray_%d' % idx] = 0
dlat, dlon = lat_lon_box(cur_lat, cur_lon, 1.5)
for slat, slon in spray_lat_lon_list:
# print(dlat, dlon, abs(slat-cur_lat), abs(slon-cur_lon))
if abs(slat-cur_lat) > dlat or abs(slon-cur_lon) > dlon:
continue
sdist = haversine_distance(cur_lat, cur_lon, slat, slon)
for idx in range(1,11):
if sdist < idx/10.0:
row_dict['n_spray_%d' % idx] += 1
for lab in ['Tmax_cumu', 'Tmin_cumu', 'PrecipTotal_cumu']:
row_dict[lab] = 0
most_recent = 1000000
most_recent_w = weather_features[0]
for wfeat in weather_features:
wdate = wfeat['Date']
if current_date.year != wdate.year:
continue
wdur = abs((current_date - wdate).days)
if wdur < most_recent:
most_recent = wdur
most_recent_w = wfeat
for lab in weather_labels:
if lab == 'Date':
continue
row_dict[lab] = most_recent_w[lab]
row_val = [row_dict[col] for col in out_labels]
csv_writer.writerow(row_val)
# outfile.flush()
# print('\n'.join(['%s: %s' % (k, row_dict[k]) for k in row_dict]))
# exit(0)
return
if __name__ == '__main__':
feature_extraction()
| 37.705882 | 82 | 0.493916 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import csv
import gzip
import numpy as np
import pandas as pd
from dateutil.parser import parse
WEATHER_VARS_WITH_M_T = (u'Tmax', u'Tmin', u'Tavg', u'Depart', u'DewPoint',
u'WetBulb', u'Heat', u'Cool', u'Snowfall',
u'PrecipTotal', u'StnPressure', u'SeaLevel',
u'ResultSpeed', u'ResultDir', u'AvgSpeed', u'Water1')
WEATHER_PHENOMENA = ('BCFG', 'BLDU', 'BLSN', 'BR', 'DU', 'DZ', 'FG', 'FG+',
'FU', 'FZDZ', 'FZFG', 'FZRA', 'GR', 'GS', 'HZ', 'MIFG',
'PL', 'PRFG', 'RA', 'SG', 'SN', 'SQ', 'TS', 'TSRA',
'TSSN', 'UP', 'VCFG', 'VCTS')
def haversine_distance(lat1, lon1, lat2, lon2):
r_earth = 6371.
dlat = np.abs(lat1-lat2)*np.pi/180.
dlon = np.abs(lon1-lon2)*np.pi/180.
lat1 *= np.pi/180.
lat2 *= np.pi/180.
dist = 2. * r_earth * np.arcsin(
np.sqrt(
np.sin(dlat/2.)**2 +
np.cos(lat1) * np.cos(lat2) *
np.sin(dlon/2.)**2))
return dist
def lat_lon_box(lat, lon, dist):
r_earth = 6371.
d_2r = dist/(2.*r_earth)
dlat = 2. * (d_2r)
dlon = 2. * np.arcsin((np.sin(d_2r))/(np.cos(lat)))
dlat *= 180./np.pi
dlon *= 180./np.pi
return abs(dlat), abs(dlon)
def feature_extraction():
spray_df = pd.read_csv('spray.csv.gz', compression='gzip')
spray_lat_lon_list = []
for idx, row in spray_df.iterrows():
spray_lat_lon_list.append((row['Latitude'], row['Longitude']))
weather_features = []
cumu_labels = ('Tmax', 'Tmin', 'PrecipTotal')
cumu_features = {}
cumu_total = 0
current_year = -1
with gzip.open('weather.csv.gz', 'r') as wfile:
wcsv = csv.reader(wfile)
weather_labels = next(wcsv)
for row in wcsv:
rowdict = dict(zip(weather_labels, row))
rowdict['Date'] = parse(rowdict['Date'])
current_date = rowdict['Date']
if current_date.year != current_year:
current_year = current_date.year
cumu_features = {k: 0 for k in cumu_labels}
cumu_total = 0
for k in WEATHER_VARS_WITH_M_T:
if k in rowdict:
rowdict[k] = rowdict[k].replace('M', 'nan')
rowdict[k] = rowdict[k].replace('T', '0.0')
for k in rowdict:
if rowdict[k] == '-':
rowdict[k] = 'nan'
if type(rowdict[k]) == str:
rowdict[k] = rowdict[k].strip()
for ph in WEATHER_PHENOMENA:
rowdict['wp%s' % ph] = '0'
for ph in rowdict['CodeSum'].split():
if ph in WEATHER_PHENOMENA:
rowdict['wp%s' % ph] = '1'
for lab in cumu_labels:
_tmp = float(rowdict[lab])
if not np.isnan(_tmp):
cumu_features[lab] += _tmp
cumu_total += 1
for lab in ('Tmax', 'Tmin', 'PrecipTotal'):
rowdict['%s_cumu' % lab] = cumu_features[lab] / cumu_total
weather_features.append(rowdict)
for ph in WEATHER_PHENOMENA:
weather_labels.append('wp%s' % ph)
for lab in cumu_labels:
weather_labels.append('%s_cumu' % lab)
for prefix in 'train', 'test':
with gzip.open('%s.csv.gz' % prefix, 'rb') as csvfile:
outfile = gzip.open('%s_full.csv.gz' % prefix, 'wb')
csv_reader = csv.reader(csvfile)
labels = next(csv_reader)
out_labels = labels +\
['n_spray_%d' % x for x in range(1,11)]
for lab in weather_labels:
if lab == 'Date':
continue
out_labels.append(lab)
csv_writer = csv.writer(outfile)
csv_writer.writerow(out_labels)
for idx, row in enumerate(csv_reader):
if idx % 1000 == 0:
print('processed %d' % idx)
row_dict = dict(zip(labels, row))
current_date = parse(row_dict['Date'])
cur_lat = float(row_dict['Latitude'])
cur_lon = float(row_dict['Longitude'])
for idx in range(1, 11):
row_dict['n_spray_%d' % idx] = 0
dlat, dlon = lat_lon_box(cur_lat, cur_lon, 1.5)
for slat, slon in spray_lat_lon_list:
if abs(slat-cur_lat) > dlat or abs(slon-cur_lon) > dlon:
continue
sdist = haversine_distance(cur_lat, cur_lon, slat, slon)
for idx in range(1,11):
if sdist < idx/10.0:
row_dict['n_spray_%d' % idx] += 1
for lab in ['Tmax_cumu', 'Tmin_cumu', 'PrecipTotal_cumu']:
row_dict[lab] = 0
most_recent = 1000000
most_recent_w = weather_features[0]
for wfeat in weather_features:
wdate = wfeat['Date']
if current_date.year != wdate.year:
continue
wdur = abs((current_date - wdate).days)
if wdur < most_recent:
most_recent = wdur
most_recent_w = wfeat
for lab in weather_labels:
if lab == 'Date':
continue
row_dict[lab] = most_recent_w[lab]
row_val = [row_dict[col] for col in out_labels]
csv_writer.writerow(row_val)
return
if __name__ == '__main__':
feature_extraction()
| true | true |
f73ed4313e2920afdf2322264eecf863e4fd48bf | 4,583 | py | Python | lang_id.py | neulab/idiomata-bot | f397e49fb9d1d59b9b74e0e528a72307637a18e9 | [
"BSD-3-Clause"
] | null | null | null | lang_id.py | neulab/idiomata-bot | f397e49fb9d1d59b9b74e0e528a72307637a18e9 | [
"BSD-3-Clause"
] | 14 | 2019-08-13T14:01:46.000Z | 2019-08-16T20:24:46.000Z | lang_id.py | neulab/idiomata-bot | f397e49fb9d1d59b9b74e0e528a72307637a18e9 | [
"BSD-3-Clause"
] | 1 | 2019-08-13T14:46:18.000Z | 2019-08-13T14:46:18.000Z | import numpy as np
import iso639
from collections import defaultdict
all_langs = ('cay', 'dan', 'deu', 'eng', 'fra', 'kwk', 'see', 'swe')
codelang = [('cay', 'Cayuga'), ('see', 'Seneca'), ('other', 'Other')]
code2lang_dict = {c:l for (c,l) in codelang}
lang2code_dict = {l:c for (c,l) in codelang}
def code2lang(code):
if code in code2lang_dict:
return code2lang_dict[code]
elif code in iso639.languages.terminology:
return iso639.languages.terminology[code].inverted
else:
return None
def lang2code(lang):
if lang in lang2code_dict:
return lang2code_dict[lang]
elif lang in iso639.languages.inverted:
return iso639.languages.inverted[lang].terminology
else:
return None
class LanguageID(object):
def __init__(self, langs=all_langs):
"""
Create a language identifier for the specified languages.
Args:
langs: The ISO-639 lexographic language codes for each language.
Defaults to all_langs.
"""
self.langs = langs
raise NotImplementedError('Need to implement in a subclass')
def predict_word(word):
"""
Calculate the log probability of a word belonging to a particular language specified in `langs`. If `langs` is not specified, it will use `all_langs`.
Args:
word: A single word string
Returns:
A numpy array with the log probability of each language
"""
raise NotImplementedError('Need to implement in a subclass')
def predict_words(self, words):
"""
Calculate the log probability of words in a sentence belonging to a particular language specified in `langs`. If `langs` is not specified, it will use `all_langs`.
Args:
words: A tokenized list of word strings
langs: A list of three-letter language codes
Returns:
A numpy array with the log probability of each word (rows) for each language or other (columns)
"""
ret = np.zeros( (len(words), len(self.langs)+1) )
for i, word in enumerate(words):
ret[i] = self.predict_word(word)
return ret
def id_words(self, words, id_type='pos'):
ret = list(np.argmax(self.predict_words(words), axis=1))
if id_type == 'pos': return ret
ret = ['other' if pos == len(self.langs) else self.langs[pos] for pos in ret]
if id_type == 'code': return ret
ret = [code2lang(code) for code in ret]
return ret
class WordCountBasedLanguageID(LanguageID):
def __init__(self, langs=all_langs, other_alpha=1.0e-9, lang_alpha=1.0e-10):
self.langs = langs
self.other_alpha = other_alpha
self.lang_alpha = lang_alpha
self.counts = [self.load_counts(lang) for lang in langs]
def load_counts(self, lang):
counts = {}
with open(f'data/word_counts/{lang}.txt', 'r') as f:
for line in f:
word, count = line.strip().split()
counts[word.lower()] = int(count)
my_sum = float(sum(counts.values()))
counts = {word: count/my_sum for (word, count) in counts.items()}
return counts
def predict_word(self, word):
my_counts = np.zeros(len(self.langs)+1)
my_counts[len(self.langs)] = self.other_alpha
for i, counts in enumerate(self.counts):
my_counts[i] = counts.get(word.lower(), self.lang_alpha)
return np.log(my_counts/np.sum(my_counts))
class WordClassifierLanguageID(LanguageID):
def __init__(self, langs=all_langs, alpha=0.5, ns=(3,4,5), other_bias=1):
self.langs = langs
self.alpha = alpha
self.other_bias = other_bias
self.ns = ns
self.ngram_probs = defaultdict(lambda: np.zeros(len(langs)+1) + alpha)
for i, lang in enumerate(langs):
with open(f'data/word_counts/{lang}.txt', 'r') as f:
for line in f:
word, count = line.strip().split()
for ngram in self.get_ngrams(word):
self.ngram_probs[ngram][i] += 1
for k, v in self.ngram_probs.items():
self.ngram_probs[k] = np.log(v/np.sum(v))
def predict_word(self, word):
my_counts = np.zeros(len(self.langs)+1)
my_counts[len(self.langs)] = self.other_bias
for ngram in self.get_ngrams(word):
if ngram in self.ngram_probs:
my_counts += self.ngram_probs[ngram]
my_counts -= np.max(my_counts)
my_counts -= np.log(np.sum(np.exp(my_counts)))
print(my_counts)
return my_counts
def get_ngrams(self, word):
word = word.lower()
for n in self.ns:
for i in range(len(word)-n+1):
yield word[i:i+n]
if __name__ == "__main__":
my_lid = WordClassifierLanguageID()
words = 'Danke , Bonjour'.split()
print(' '.join([str(x) for x in my_lid.id_words(words, id_type='name')]))
| 32.735714 | 167 | 0.66703 | import numpy as np
import iso639
from collections import defaultdict
all_langs = ('cay', 'dan', 'deu', 'eng', 'fra', 'kwk', 'see', 'swe')
codelang = [('cay', 'Cayuga'), ('see', 'Seneca'), ('other', 'Other')]
code2lang_dict = {c:l for (c,l) in codelang}
lang2code_dict = {l:c for (c,l) in codelang}
def code2lang(code):
if code in code2lang_dict:
return code2lang_dict[code]
elif code in iso639.languages.terminology:
return iso639.languages.terminology[code].inverted
else:
return None
def lang2code(lang):
if lang in lang2code_dict:
return lang2code_dict[lang]
elif lang in iso639.languages.inverted:
return iso639.languages.inverted[lang].terminology
else:
return None
class LanguageID(object):
def __init__(self, langs=all_langs):
self.langs = langs
raise NotImplementedError('Need to implement in a subclass')
def predict_word(word):
raise NotImplementedError('Need to implement in a subclass')
def predict_words(self, words):
ret = np.zeros( (len(words), len(self.langs)+1) )
for i, word in enumerate(words):
ret[i] = self.predict_word(word)
return ret
def id_words(self, words, id_type='pos'):
ret = list(np.argmax(self.predict_words(words), axis=1))
if id_type == 'pos': return ret
ret = ['other' if pos == len(self.langs) else self.langs[pos] for pos in ret]
if id_type == 'code': return ret
ret = [code2lang(code) for code in ret]
return ret
class WordCountBasedLanguageID(LanguageID):
def __init__(self, langs=all_langs, other_alpha=1.0e-9, lang_alpha=1.0e-10):
self.langs = langs
self.other_alpha = other_alpha
self.lang_alpha = lang_alpha
self.counts = [self.load_counts(lang) for lang in langs]
def load_counts(self, lang):
counts = {}
with open(f'data/word_counts/{lang}.txt', 'r') as f:
for line in f:
word, count = line.strip().split()
counts[word.lower()] = int(count)
my_sum = float(sum(counts.values()))
counts = {word: count/my_sum for (word, count) in counts.items()}
return counts
def predict_word(self, word):
my_counts = np.zeros(len(self.langs)+1)
my_counts[len(self.langs)] = self.other_alpha
for i, counts in enumerate(self.counts):
my_counts[i] = counts.get(word.lower(), self.lang_alpha)
return np.log(my_counts/np.sum(my_counts))
class WordClassifierLanguageID(LanguageID):
def __init__(self, langs=all_langs, alpha=0.5, ns=(3,4,5), other_bias=1):
self.langs = langs
self.alpha = alpha
self.other_bias = other_bias
self.ns = ns
self.ngram_probs = defaultdict(lambda: np.zeros(len(langs)+1) + alpha)
for i, lang in enumerate(langs):
with open(f'data/word_counts/{lang}.txt', 'r') as f:
for line in f:
word, count = line.strip().split()
for ngram in self.get_ngrams(word):
self.ngram_probs[ngram][i] += 1
for k, v in self.ngram_probs.items():
self.ngram_probs[k] = np.log(v/np.sum(v))
def predict_word(self, word):
my_counts = np.zeros(len(self.langs)+1)
my_counts[len(self.langs)] = self.other_bias
for ngram in self.get_ngrams(word):
if ngram in self.ngram_probs:
my_counts += self.ngram_probs[ngram]
my_counts -= np.max(my_counts)
my_counts -= np.log(np.sum(np.exp(my_counts)))
print(my_counts)
return my_counts
def get_ngrams(self, word):
word = word.lower()
for n in self.ns:
for i in range(len(word)-n+1):
yield word[i:i+n]
if __name__ == "__main__":
my_lid = WordClassifierLanguageID()
words = 'Danke , Bonjour'.split()
print(' '.join([str(x) for x in my_lid.id_words(words, id_type='name')]))
| true | true |
f73ed44b958e1d6ea789574bb397f2a57261e54d | 1,926 | py | Python | rlpyt/ul/experiments/rl_with_ul/configs/atari_dqn_ul.py | erfanMhi/rlpyt | 56574ea209f48075c26179c5b2f1a4676c38efdd | [
"MIT"
] | null | null | null | rlpyt/ul/experiments/rl_with_ul/configs/atari_dqn_ul.py | erfanMhi/rlpyt | 56574ea209f48075c26179c5b2f1a4676c38efdd | [
"MIT"
] | null | null | null | rlpyt/ul/experiments/rl_with_ul/configs/atari_dqn_ul.py | erfanMhi/rlpyt | 56574ea209f48075c26179c5b2f1a4676c38efdd | [
"MIT"
] | null | null | null |
import copy
configs = dict()
config = dict(
agent=dict(),
algo=dict(
discount=0.99,
batch_size=256,
learning_rate=1.5e-4, # Adam Optimizer
target_update_interval=1000,
clip_grad_norm=40.,
min_steps_rl=int(1e5),
double_dqn=True,
prioritized_replay=False,
n_step_return=1,
replay_size=int(1e6),
min_steps_ul=int(5e4),# TODO: Modified
max_steps_ul=None,
ul_learning_rate=1e-3,
ul_optim_kwargs=None,
# ul_replay_size=1e5,
ul_update_schedule="constant_1",
ul_lr_schedule="cosine",
ul_lr_warmup=0,
ul_delta_T=3,
ul_batch_B=32,
ul_batch_T=16,
ul_random_shift_prob=1, # TODO: Modified
ul_random_shift_pad=4,
ul_target_update_interval=1,
ul_target_update_tau=0.01,
ul_latent_size=256,
ul_anchor_hidden_sizes=512,
ul_clip_grad_norm=10.,
# ul_pri_alpha=0.,
# ul_pri_beta=1.,
# ul_pri_n_step_return=1,
),
env=dict(
game="pong",
episodic_lives=False, # new standard
repeat_action_probability=0.25, # sticky actions
horizon=int(27e3),
fire_on_reset=True,
),
# Will use same args for eval env.
model=dict(
channels=[32, 64, 64],
kernel_sizes=[8, 4, 3],
strides=[4, 2, 1],
# dueling=False,
paddings=None, # No padding for standard 84x84 images.
stop_conv_grad=False,
hidden_sizes=512,
kiaming_init=True,
),
optim=dict(eps=0.01 / 256),
runner=dict(
n_steps=25e6,
log_interval_steps=5e5,
),
sampler=dict(
batch_T=2,
batch_B=16,
max_decorrelation_steps=1000,
eval_n_envs=4,
eval_max_steps=int(150e3),
eval_max_trajectories=75,
),
)
configs["scaled_ddqn_ul"] = config
| 25.012987 | 63 | 0.586708 |
import copy
configs = dict()
config = dict(
agent=dict(),
algo=dict(
discount=0.99,
batch_size=256,
learning_rate=1.5e-4,
target_update_interval=1000,
clip_grad_norm=40.,
min_steps_rl=int(1e5),
double_dqn=True,
prioritized_replay=False,
n_step_return=1,
replay_size=int(1e6),
min_steps_ul=int(5e4),
max_steps_ul=None,
ul_learning_rate=1e-3,
ul_optim_kwargs=None,
ul_update_schedule="constant_1",
ul_lr_schedule="cosine",
ul_lr_warmup=0,
ul_delta_T=3,
ul_batch_B=32,
ul_batch_T=16,
ul_random_shift_prob=1,
ul_random_shift_pad=4,
ul_target_update_interval=1,
ul_target_update_tau=0.01,
ul_latent_size=256,
ul_anchor_hidden_sizes=512,
ul_clip_grad_norm=10.,
),
env=dict(
game="pong",
episodic_lives=False,
repeat_action_probability=0.25,
horizon=int(27e3),
fire_on_reset=True,
),
model=dict(
channels=[32, 64, 64],
kernel_sizes=[8, 4, 3],
strides=[4, 2, 1],
paddings=None,
stop_conv_grad=False,
hidden_sizes=512,
kiaming_init=True,
),
optim=dict(eps=0.01 / 256),
runner=dict(
n_steps=25e6,
log_interval_steps=5e5,
),
sampler=dict(
batch_T=2,
batch_B=16,
max_decorrelation_steps=1000,
eval_n_envs=4,
eval_max_steps=int(150e3),
eval_max_trajectories=75,
),
)
configs["scaled_ddqn_ul"] = config
| true | true |
f73ed4b730a149cec73763219e19df0c835b21fc | 5,626 | py | Python | src/olympia/shelves/views.py | renancleyson-dev/addons-server | 1db556b40260aa4c0c63e84c4d9e3c2d7609eb10 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/shelves/views.py | renancleyson-dev/addons-server | 1db556b40260aa4c0c63e84c4d9e3c2d7609eb10 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/shelves/views.py | renancleyson-dev/addons-server | 1db556b40260aa4c0c63e84c4d9e3c2d7609eb10 | [
"BSD-3-Clause"
] | null | null | null | from django.db.transaction import non_atomic_requests
from django.utils.decorators import classonlymethod
from django_statsd.clients import statsd
from elasticsearch_dsl import Q, query
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import APIException
from rest_framework.response import Response
import olympia.core.logger
from olympia.addons.views import AddonSearchView
from olympia.api.pagination import ESPageNumberPagination
from olympia.constants.promoted import PROMOTED_GROUPS
from olympia.hero.views import PrimaryHeroShelfViewSet, SecondaryHeroShelfViewSet
from olympia.search.filters import ReviewedContentFilter
from .models import Shelf
from .serializers import ESSponsoredAddonSerializer, ShelfSerializer
from .utils import (
get_addons_from_adzerk,
get_signed_impression_blob_from_results,
filter_adzerk_results_to_es_results_qs,
send_event_ping,
send_impression_pings,
)
log = olympia.core.logger.getLogger('z.shelves')
VALID_EVENT_TYPES = ('click', 'conversion')
class ShelfViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
queryset = Shelf.objects.filter(shelfmanagement__enabled=True).order_by(
'shelfmanagement__position'
)
permission_classes = []
serializer_class = ShelfSerializer
def list(self, request, *args, **kwargs):
response = super().list(request, *args, **kwargs)
response.data['primary'] = PrimaryHeroShelfViewSet(
request=request
).get_one_random_data()
response.data['secondary'] = SecondaryHeroShelfViewSet(
request=request
).get_one_random_data()
return response
@classonlymethod
def as_view(cls, *args, **initkwargs):
view = super().as_view(*args, **initkwargs)
return non_atomic_requests(view)
class SponsoredShelfPagination(ESPageNumberPagination):
page_size = 6
class SponsoredShelfViewSet(viewsets.ViewSetMixin, AddonSearchView):
filter_backends = [ReviewedContentFilter]
pagination_class = SponsoredShelfPagination
serializer_class = ESSponsoredAddonSerializer
@classmethod
def as_view(cls, actions, **initkwargs):
return non_atomic_requests(super().as_view(actions, **initkwargs))
def get_paginated_response(self, data):
response = super().get_paginated_response(data)
response.data['impression_url'] = self.reverse_action('impression')
response.data['impression_data'] = get_signed_impression_blob_from_results(
self.adzerk_results
)
# reorder results to match adzerk order
order = list(self.adzerk_results.keys())
response.data['results'] = sorted(
response.data.get('results', ()),
key=lambda result: order.index(str(result.get('id'))),
)
return response
def filter_queryset(self, qs):
qs = super().filter_queryset(qs)
count = self.paginator.get_page_size(self.request)
self.adzerk_results = get_addons_from_adzerk(count)
ids = list(self.adzerk_results.keys())
group_ids_to_allow = [
group.id for group in PROMOTED_GROUPS if group.can_be_selected_by_adzerk
]
results_qs = qs.query(
query.Bool(
must=[
Q('terms', id=ids),
Q('terms', **{'promoted.group_id': group_ids_to_allow}),
]
)
)
results_qs.execute() # To cache the results.
extras = filter_adzerk_results_to_es_results_qs(self.adzerk_results, results_qs)
if extras:
group_names = '; '.join(
str(group.name)
for group in PROMOTED_GROUPS
if group.can_be_selected_by_adzerk
)
for id_ in extras:
log.error(
'Addon id [%s] returned from Adzerk, but not in a valid '
'Promoted group [%s]',
id_,
group_names,
)
statsd.incr('services.adzerk.elasticsearch_miss', len(extras))
return results_qs
@action(detail=False, methods=['post'])
def impression(self, request):
signed_impressions = request.data.get('impression_data', '')
try:
send_impression_pings(signed_impressions)
except APIException as e:
return Response(
f'Bad impression_data: {e}', status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_202_ACCEPTED)
@action(detail=False, methods=['post'])
def click(self, request):
signed_click = request.data.get('click_data', '')
try:
send_event_ping(signed_click, 'click')
except APIException as e:
return Response(f'Bad click_data: {e}', status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_202_ACCEPTED)
@action(detail=False, methods=['post'])
def event(self, request):
signed_data = request.data.get('data', '')
data_type = request.data.get('type')
if data_type not in VALID_EVENT_TYPES:
return Response(
f'Bad type: {data_type}', status=status.HTTP_400_BAD_REQUEST
)
try:
send_event_ping(signed_data, data_type)
except APIException as e:
return Response(
f'Bad data for {data_type}: {e}', status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_202_ACCEPTED)
| 36.771242 | 88 | 0.662105 | from django.db.transaction import non_atomic_requests
from django.utils.decorators import classonlymethod
from django_statsd.clients import statsd
from elasticsearch_dsl import Q, query
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import APIException
from rest_framework.response import Response
import olympia.core.logger
from olympia.addons.views import AddonSearchView
from olympia.api.pagination import ESPageNumberPagination
from olympia.constants.promoted import PROMOTED_GROUPS
from olympia.hero.views import PrimaryHeroShelfViewSet, SecondaryHeroShelfViewSet
from olympia.search.filters import ReviewedContentFilter
from .models import Shelf
from .serializers import ESSponsoredAddonSerializer, ShelfSerializer
from .utils import (
get_addons_from_adzerk,
get_signed_impression_blob_from_results,
filter_adzerk_results_to_es_results_qs,
send_event_ping,
send_impression_pings,
)
log = olympia.core.logger.getLogger('z.shelves')
VALID_EVENT_TYPES = ('click', 'conversion')
class ShelfViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
queryset = Shelf.objects.filter(shelfmanagement__enabled=True).order_by(
'shelfmanagement__position'
)
permission_classes = []
serializer_class = ShelfSerializer
def list(self, request, *args, **kwargs):
response = super().list(request, *args, **kwargs)
response.data['primary'] = PrimaryHeroShelfViewSet(
request=request
).get_one_random_data()
response.data['secondary'] = SecondaryHeroShelfViewSet(
request=request
).get_one_random_data()
return response
@classonlymethod
def as_view(cls, *args, **initkwargs):
view = super().as_view(*args, **initkwargs)
return non_atomic_requests(view)
class SponsoredShelfPagination(ESPageNumberPagination):
page_size = 6
class SponsoredShelfViewSet(viewsets.ViewSetMixin, AddonSearchView):
filter_backends = [ReviewedContentFilter]
pagination_class = SponsoredShelfPagination
serializer_class = ESSponsoredAddonSerializer
@classmethod
def as_view(cls, actions, **initkwargs):
return non_atomic_requests(super().as_view(actions, **initkwargs))
def get_paginated_response(self, data):
response = super().get_paginated_response(data)
response.data['impression_url'] = self.reverse_action('impression')
response.data['impression_data'] = get_signed_impression_blob_from_results(
self.adzerk_results
)
order = list(self.adzerk_results.keys())
response.data['results'] = sorted(
response.data.get('results', ()),
key=lambda result: order.index(str(result.get('id'))),
)
return response
def filter_queryset(self, qs):
qs = super().filter_queryset(qs)
count = self.paginator.get_page_size(self.request)
self.adzerk_results = get_addons_from_adzerk(count)
ids = list(self.adzerk_results.keys())
group_ids_to_allow = [
group.id for group in PROMOTED_GROUPS if group.can_be_selected_by_adzerk
]
results_qs = qs.query(
query.Bool(
must=[
Q('terms', id=ids),
Q('terms', **{'promoted.group_id': group_ids_to_allow}),
]
)
)
results_qs.execute()
extras = filter_adzerk_results_to_es_results_qs(self.adzerk_results, results_qs)
if extras:
group_names = '; '.join(
str(group.name)
for group in PROMOTED_GROUPS
if group.can_be_selected_by_adzerk
)
for id_ in extras:
log.error(
'Addon id [%s] returned from Adzerk, but not in a valid '
'Promoted group [%s]',
id_,
group_names,
)
statsd.incr('services.adzerk.elasticsearch_miss', len(extras))
return results_qs
@action(detail=False, methods=['post'])
def impression(self, request):
signed_impressions = request.data.get('impression_data', '')
try:
send_impression_pings(signed_impressions)
except APIException as e:
return Response(
f'Bad impression_data: {e}', status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_202_ACCEPTED)
@action(detail=False, methods=['post'])
def click(self, request):
signed_click = request.data.get('click_data', '')
try:
send_event_ping(signed_click, 'click')
except APIException as e:
return Response(f'Bad click_data: {e}', status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_202_ACCEPTED)
@action(detail=False, methods=['post'])
def event(self, request):
signed_data = request.data.get('data', '')
data_type = request.data.get('type')
if data_type not in VALID_EVENT_TYPES:
return Response(
f'Bad type: {data_type}', status=status.HTTP_400_BAD_REQUEST
)
try:
send_event_ping(signed_data, data_type)
except APIException as e:
return Response(
f'Bad data for {data_type}: {e}', status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_202_ACCEPTED)
| true | true |
f73ed58ffca385adc268bc8ac6189cdf5a71a949 | 345 | py | Python | utils.py | ofirkris/Hifill-tensorflow | 85a3332975a0c2da60116f72dc5a17f76c2d1b96 | [
"MIT"
] | 39 | 2020-10-15T00:57:57.000Z | 2022-02-24T13:53:32.000Z | utils.py | ofirkris/Hifill-tensorflow | 85a3332975a0c2da60116f72dc5a17f76c2d1b96 | [
"MIT"
] | 7 | 2020-10-28T03:59:03.000Z | 2020-12-17T09:11:16.000Z | utils.py | ofirkris/Hifill-tensorflow | 85a3332975a0c2da60116f72dc5a17f76c2d1b96 | [
"MIT"
] | 13 | 2020-10-26T01:34:01.000Z | 2022-03-23T01:52:23.000Z | import yaml
from easydict import EasyDict as edict
from tensorflow.python.ops import data_flow_ops
import tensorflow as tf
def load_yml(path):
with open(path, 'r') as f:
try:
config = yaml.load(f)
print(config)
return edict(config)
except yaml.YAMLError as exc:
print(exc)
| 23 | 47 | 0.62029 | import yaml
from easydict import EasyDict as edict
from tensorflow.python.ops import data_flow_ops
import tensorflow as tf
def load_yml(path):
with open(path, 'r') as f:
try:
config = yaml.load(f)
print(config)
return edict(config)
except yaml.YAMLError as exc:
print(exc)
| true | true |
f73ed6bd22d1a75a235635d5025edd24dfd253e5 | 657 | py | Python | Modules/Attention.py | drat/Neural-Voice-Cloning-With-Few-Samples | 4febde43ccc143fc88d74d5fa0c5a117636778b4 | [
"MIT"
] | 361 | 2018-08-17T14:37:29.000Z | 2022-03-15T13:04:16.000Z | Modules/Attention.py | drat/Neural-Voice-Cloning-With-Few-Samples | 4febde43ccc143fc88d74d5fa0c5a117636778b4 | [
"MIT"
] | 22 | 2018-11-25T13:42:26.000Z | 2020-04-29T05:16:25.000Z | Modules/Attention.py | drat/Neural-Voice-Cloning-With-Few-Samples | 4febde43ccc143fc88d74d5fa0c5a117636778b4 | [
"MIT"
] | 121 | 2018-08-30T03:53:09.000Z | 2022-03-25T09:03:17.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
from Modules.MultiHeadAttention import MultiHeadAttention
class Attention(nn.Module):
def __init__(self, dim):
super(Attention, self).__init__()
self.encoders = self._build_model(dim)
def _build_model(self, dim):
layers = []
dim = dim
layers.append(MultiHeadAttention(dim, dim, dim))
return nn.ModuleList(layers)
def forward(self, inputs):
net_inputs = inputs
net_inputs.contiguous()
for enc in self.encoders:
net_inputs = enc(net_inputs, net_inputs)
return net_inputs
| 24.333333 | 57 | 0.6621 | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
from Modules.MultiHeadAttention import MultiHeadAttention
class Attention(nn.Module):
def __init__(self, dim):
super(Attention, self).__init__()
self.encoders = self._build_model(dim)
def _build_model(self, dim):
layers = []
dim = dim
layers.append(MultiHeadAttention(dim, dim, dim))
return nn.ModuleList(layers)
def forward(self, inputs):
net_inputs = inputs
net_inputs.contiguous()
for enc in self.encoders:
net_inputs = enc(net_inputs, net_inputs)
return net_inputs
| true | true |
f73ed7111bd86e854b241abd7a8e95261d3d782b | 3,076 | py | Python | monoloco/monoloco/visuals/plot_3d_box.py | galvinw/fairmotdocker | 032d50a4025788b97ca36b0d97b7df15ddb5986c | [
"MIT"
] | null | null | null | monoloco/monoloco/visuals/plot_3d_box.py | galvinw/fairmotdocker | 032d50a4025788b97ca36b0d97b7df15ddb5986c | [
"MIT"
] | null | null | null | monoloco/monoloco/visuals/plot_3d_box.py | galvinw/fairmotdocker | 032d50a4025788b97ca36b0d97b7df15ddb5986c | [
"MIT"
] | 2 | 2021-11-15T03:01:28.000Z | 2021-11-15T03:20:20.000Z |
import numpy as np
def correct_boxes(boxes, hwls, xyzs, yaws, path_calib):
with open(path_calib, "r") as ff:
file = ff.readlines()
p2_str = file[2].split()[1:]
p2_list = [float(xx) for xx in p2_str]
P = np.array(p2_list).reshape(3, 4)
boxes_new = []
for idx in range(boxes):
hwl = hwls[idx]
xyz = xyzs[idx]
yaw = yaws[idx]
corners_2d, _ = compute_box_3d(hwl, xyz, yaw, P)
box_new = project_8p_to_4p(corners_2d).reshape(-1).tolist()
boxes_new.append(box_new)
return boxes_new
def compute_box_3d(hwl, xyz, ry, P):
""" Takes an object and a projection matrix (P) and projects the 3d
bounding box into the image plane.
Returns:
corners_2d: (8,2) array in left image coord.
corners_3d: (8,3) array in in rect camera coord.
"""
# compute rotational matrix around yaw axis
R = roty(ry)
# 3d bounding box dimensions
l = hwl[2]
w = hwl[1]
h = hwl[0]
# 3d bounding box corners
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
# rotate and translate 3d bounding box
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
# print corners_3d.shape
corners_3d[0, :] = corners_3d[0, :] + xyz[0]
corners_3d[1, :] = corners_3d[1, :] + xyz[1]
corners_3d[2, :] = corners_3d[2, :] + xyz[2]
# print 'cornsers_3d: ', corners_3d
# only draw 3d bounding box for objs in front of the camera
if np.any(corners_3d[2, :] < 0.1):
corners_2d = None
return corners_2d, np.transpose(corners_3d)
# project the 3d bounding box into the image plane
corners_2d = project_to_image(np.transpose(corners_3d), P)
# print 'corners_2d: ', corners_2d
return corners_2d, np.transpose(corners_3d)
def roty(t):
""" Rotation about the y-axis. """
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def project_to_image(pts_3d, P):
""" Project 3d points to image plane.
Usage: pts_2d = projectToImage(pts_3d, P)
input: pts_3d: nx3 matrix
P: 3x4 projection matrix
output: pts_2d: nx2 matrix
P(3x4) dot pts_3d_extended(4xn) = projected_pts_2d(3xn)
=> normalize projected_pts_2d(2xn)
<=> pts_3d_extended(nx4) dot P'(4x3) = projected_pts_2d(nx3)
=> normalize projected_pts_2d(nx2)
"""
n = pts_3d.shape[0]
pts_3d_extend = np.hstack((pts_3d, np.ones((n, 1))))
# print(('pts_3d_extend shape: ', pts_3d_extend.shape))
pts_2d = np.dot(pts_3d_extend, np.transpose(P)) # nx3
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
return pts_2d[:, 0:2]
def project_8p_to_4p(pts_2d):
x0 = np.min(pts_2d[:, 0])
x1 = np.max(pts_2d[:, 0])
y0 = np.min(pts_2d[:, 1])
y1 = np.max(pts_2d[:, 1])
x0 = max(0, x0)
y0 = max(0, y0)
return np.array([x0, y0, x1, y1])
| 32.041667 | 76 | 0.586476 |
import numpy as np
def correct_boxes(boxes, hwls, xyzs, yaws, path_calib):
with open(path_calib, "r") as ff:
file = ff.readlines()
p2_str = file[2].split()[1:]
p2_list = [float(xx) for xx in p2_str]
P = np.array(p2_list).reshape(3, 4)
boxes_new = []
for idx in range(boxes):
hwl = hwls[idx]
xyz = xyzs[idx]
yaw = yaws[idx]
corners_2d, _ = compute_box_3d(hwl, xyz, yaw, P)
box_new = project_8p_to_4p(corners_2d).reshape(-1).tolist()
boxes_new.append(box_new)
return boxes_new
def compute_box_3d(hwl, xyz, ry, P):
R = roty(ry)
l = hwl[2]
w = hwl[1]
h = hwl[0]
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + xyz[0]
corners_3d[1, :] = corners_3d[1, :] + xyz[1]
corners_3d[2, :] = corners_3d[2, :] + xyz[2]
if np.any(corners_3d[2, :] < 0.1):
corners_2d = None
return corners_2d, np.transpose(corners_3d)
corners_2d = project_to_image(np.transpose(corners_3d), P)
return corners_2d, np.transpose(corners_3d)
def roty(t):
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def project_to_image(pts_3d, P):
n = pts_3d.shape[0]
pts_3d_extend = np.hstack((pts_3d, np.ones((n, 1))))
pts_2d = np.dot(pts_3d_extend, np.transpose(P))
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
return pts_2d[:, 0:2]
def project_8p_to_4p(pts_2d):
x0 = np.min(pts_2d[:, 0])
x1 = np.max(pts_2d[:, 0])
y0 = np.min(pts_2d[:, 1])
y1 = np.max(pts_2d[:, 1])
x0 = max(0, x0)
y0 = max(0, y0)
return np.array([x0, y0, x1, y1])
| true | true |
f73ed73da48ea47de03e0fde95f500a416689a67 | 409 | py | Python | pyfk/__init__.py | ziyixi/pyfk | 2db56621cd4f9db5cf6a866fa0ca25fcb994b1d4 | [
"MIT"
] | 29 | 2019-09-08T03:43:55.000Z | 2022-03-16T06:13:08.000Z | pyfk/__init__.py | ziyixi/pyfk | 2db56621cd4f9db5cf6a866fa0ca25fcb994b1d4 | [
"MIT"
] | 9 | 2020-12-16T01:52:44.000Z | 2022-03-22T14:04:27.000Z | pyfk/__init__.py | ziyixi/pyfk | 2db56621cd4f9db5cf6a866fa0ca25fcb994b1d4 | [
"MIT"
] | 5 | 2021-02-17T14:46:32.000Z | 2022-01-24T02:43:03.000Z | from pyfk.config.config import Config, SeisModel, SourceModel
from pyfk.gf.gf import calculate_gf
from pyfk.gf.waveform_integration import mpi_info
from pyfk.sync.sync import calculate_sync, generate_source_time_function
__all__ = [
"SourceModel",
"SeisModel",
"Config",
"calculate_gf",
"calculate_sync",
"generate_source_time_function",
"mpi_info"]
__version__ = "0.2.0-beta.1"
| 25.5625 | 72 | 0.748166 | from pyfk.config.config import Config, SeisModel, SourceModel
from pyfk.gf.gf import calculate_gf
from pyfk.gf.waveform_integration import mpi_info
from pyfk.sync.sync import calculate_sync, generate_source_time_function
__all__ = [
"SourceModel",
"SeisModel",
"Config",
"calculate_gf",
"calculate_sync",
"generate_source_time_function",
"mpi_info"]
__version__ = "0.2.0-beta.1"
| true | true |
f73ed9d559c84ba26e305e2fdcda6d6bd470264c | 7,690 | py | Python | processingfcmsvd.py | Ditskih/Project | 87170245e55e615b0a14966d60afe41caece0434 | [
"Unlicense"
] | null | null | null | processingfcmsvd.py | Ditskih/Project | 87170245e55e615b0a14966d60afe41caece0434 | [
"Unlicense"
] | null | null | null | processingfcmsvd.py | Ditskih/Project | 87170245e55e615b0a14966d60afe41caece0434 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 15:38:52 2019
@author: Ditskih
"""
import os
import json
import re
import csv
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
#from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.random_projection import GaussianRandomProjection as GRP
import numpy as np
import sys
sys.path.insert(0, 'FCMeans')
from fcmeans import fcmeans
from sklearn.decomposition import TruncatedSVD
from scipy.sparse import csr_matrix
import pandas as pd
def my_preprocessor(tweet):
#Convert to lower case
tweet = tweet.lower()
#Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
#Convert @username to AT_USER
tweet = re.sub('@[^\s]+','AT_USER',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#trim
tweet = tweet.strip('\'"')
return tweet
def my_tokenizer(tweet):
words = word_tokenize(tweet)
tokens=[]
for w in words:
#replace two or more with two occurrences
pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
w = pattern.sub(r"\1\1", w)
#strip punctuation
w = w.strip('\'"?,.')
#choose words with a pattern
val = re.search(r"^[a-zA-Z0-9][a-zA-Z0-9]*$", w)
#add tokens
if(w in ['AT_USER','URL'] or val is None):
continue
else:
tokens.append(w.lower())
return tokens
for i in range (1):
# -------
# Loading
# -------
print ("Loading dataset .... ")
df = csv.reader(open("D:\\Private Property\\Data Kuliah\\Akademis\\Skripsweet\\program\\Program1\\Program\\nyoba\\dicobaduluajafix.csv", encoding="utf8"))
data = []
for column in df:
data.append(column[0].strip() + ' ' + column[1].strip())
# -----------
# Vectorizing : Preprocessing, Tokenizing, Filtering, Weighting
# -----------
print ("Vectorizing .....")
data_file = csv.reader(open('D:\Private Property\Data Kuliah\Akademis\Skripsweet\program\Program1\Program\\nyoba\\stopwords_id.csv'))
stopwords = []
for column in data_file:
stopwords.append(column[0])
my_stop_words = stopwords + ['untuk','toko','nya','false','none''0', '01', '02', '0223', '03', '04', '05', '06', '07', '08', '09',
'0pertandingan', '1', '10', '100', '1001', '101', '102', '1020', '103', '104', '105', '106', '108', '109',
'10th', '11', '110', '112', '113', '115', '12', '120', '121', '122', '123', '125', '129', '13', '130', '131',
'132', '135', '136', '137', '138', '139', '14', '140', '141', '142', '145', '148', '15', '150', '1500',
'152', '153', '154', '155', '157', '16', '160', '161', '162', '165', '166', '168', '17', '170', '1700',
'172', '1731', '175', '1763', '18', '180', '1800', '181', '184', '1848', '185', '187', '19', '190',
'1906', '191', '1930', '1936', '1945', '1947', '1948', '1949', '1950', '1954', '1955', '1958', '196',
'1961', '1962', '1964', '1965', '1967', '1968', '1972', '1973', '1974', '1984', '1985', '1987', '199',
'1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1a', '1musim', '1st', '2', '20',
'200', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '200cc', '201', '2010',
'2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021', '2022', '2025',
'2041', '2045', '205', '2050', '207', '21', '210', '211', '215', '22', '221', '223', '225', '227', '229',
'23', '230', '234', '235', '238', '239', '24', '240', '241', '25', '250', '250cc', '2560x1440', '258', '259',
'26', '260', '263', '265', '267', '268', '27', '278', '28', '280', '282', '283', '284', '286', '29',
'2pm', '3', '30', '300', '306', '308', '31', '310', '315', '32', '33', '330', '34', '345', '35', '350',
'359', '36', '360', '369', '37', '370', '378', '38', '386', '387', '39', '399', '3c', '3d', '3s', '4',
'40', '400', '407', '41', '410', '42', '43', '44', '45', '450', '46', '4640', '47', '4720', '48', '480',
'49', '4g', '4minute', '4x2', '4x4', '5', '50', '500', '500c', '508', '50mp', '51', '52', '53', '54', '55',
'550', '56', '560', '57', '58', '59', '595', '5c', '5g', '5s', '5th', '6', '60', '600', '61', '62', '623',
'625', '63', '634', '64', '640', '65', '650', '656', '66', '67', '68', '69', '69053', '6a', '6x6', '7', '70',
'700', '71', '72', '720', '73', '737', '74', '7442', '75', '750', '7569', '76', '77', '78', '79', '8', '80',
'800', '80an', '81', '814', '816', '82', '83', '84', '85', '8500', '86', '865', '86th', '87', '88', '889',
'89', '8gb', '9', '90', '900', '91', '911', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'a3', 'a320', 'a66s', 'aa']
vectorizer = TfidfVectorizer(preprocessor=my_preprocessor,tokenizer=my_tokenizer,
stop_words=my_stop_words,min_df=2,max_df=0.95)
data = vectorizer.fit_transform(data)
feature_names = vectorizer.get_feature_names()
#print (feature_names)
#break
#print (data)
# ------------------------------------------
# Model to Transform Data into a Lower Space
# ------------------------------------------
grps = GRP(n_components = 5)
new_data = grps.fit_transform(data)
# Learning
# --------
for n_topics in range(100,110,10):
print ("Learning ...." + str(n_topics))
#membership (u) calculation in the lower space
m=1.5
cntr, u= fcmeans(new_data.T, n_topics, m, error=0.005, maxiter=1000)
#centroid (cntr) calculation in the original space
temp = csr_matrix(np.ones((data.shape[1],1)).dot(np.atleast_2d(u.sum(axis=1))).T)
u = csr_matrix(u)
cntr = np.asarray(u.dot(data) / temp)
'''
# Find centroids for initialization
svd = TruncatedSVD(n_components = n_topics)
svd.fit(new_data)
cntr = svd.components_
#cntr[cntr<0.001]=0.0
# Find centroids by FCM
cntr, u = fcmeans(new_data.T, n_topics, m=1.5, error=0.005, maxiter=1000, init=cntr.T)
cntr = np.asarray(cntr)
'''
# Prints topics
n_top_words = 10
hasil = open('D:\\Private Property\\Data Kuliah\\Akademis\\Skripsweet\\program\\Program1\\Program\\nyoba\\topikgrp' + str(n_topics) + ".txt", 'w')
for topic_idx, topic in enumerate(cntr):
print("Topic " + str(topic_idx) + " : " + " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
hasil.write(""+" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) + "\n")
hasil.close()
| 50.927152 | 159 | 0.477373 |
import os
import json
import re
import csv
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.random_projection import GaussianRandomProjection as GRP
import numpy as np
import sys
sys.path.insert(0, 'FCMeans')
from fcmeans import fcmeans
from sklearn.decomposition import TruncatedSVD
from scipy.sparse import csr_matrix
import pandas as pd
def my_preprocessor(tweet):
tweet = tweet.lower()
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
tweet = re.sub('@[^\s]+','AT_USER',tweet)
tweet = re.sub('[\s]+', ' ', tweet)
sub(r'#([^\s]+)', r'\1', tweet)
tweet = tweet.strip('\'"')
return tweet
def my_tokenizer(tweet):
words = word_tokenize(tweet)
tokens=[]
for w in words:
#replace two or more with two occurrences
pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
w = pattern.sub(r"\1\1", w)
#strip punctuation
w = w.strip('\'"?,.')
val = re.search(r"^[a-zA-Z0-9][a-zA-Z0-9]*$", w)
if(w in ['AT_USER','URL'] or val is None):
continue
else:
tokens.append(w.lower())
return tokens
for i in range (1):
print ("Loading dataset .... ")
df = csv.reader(open("D:\\Private Property\\Data Kuliah\\Akademis\\Skripsweet\\program\\Program1\\Program\\nyoba\\dicobaduluajafix.csv", encoding="utf8"))
data = []
for column in df:
data.append(column[0].strip() + ' ' + column[1].strip())
print ("Vectorizing .....")
data_file = csv.reader(open('D:\Private Property\Data Kuliah\Akademis\Skripsweet\program\Program1\Program\\nyoba\\stopwords_id.csv'))
stopwords = []
for column in data_file:
stopwords.append(column[0])
my_stop_words = stopwords + ['untuk','toko','nya','false','none''0', '01', '02', '0223', '03', '04', '05', '06', '07', '08', '09',
'0pertandingan', '1', '10', '100', '1001', '101', '102', '1020', '103', '104', '105', '106', '108', '109',
'10th', '11', '110', '112', '113', '115', '12', '120', '121', '122', '123', '125', '129', '13', '130', '131',
'132', '135', '136', '137', '138', '139', '14', '140', '141', '142', '145', '148', '15', '150', '1500',
'152', '153', '154', '155', '157', '16', '160', '161', '162', '165', '166', '168', '17', '170', '1700',
'172', '1731', '175', '1763', '18', '180', '1800', '181', '184', '1848', '185', '187', '19', '190',
'1906', '191', '1930', '1936', '1945', '1947', '1948', '1949', '1950', '1954', '1955', '1958', '196',
'1961', '1962', '1964', '1965', '1967', '1968', '1972', '1973', '1974', '1984', '1985', '1987', '199',
'1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1a', '1musim', '1st', '2', '20',
'200', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '200cc', '201', '2010',
'2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021', '2022', '2025',
'2041', '2045', '205', '2050', '207', '21', '210', '211', '215', '22', '221', '223', '225', '227', '229',
'23', '230', '234', '235', '238', '239', '24', '240', '241', '25', '250', '250cc', '2560x1440', '258', '259',
'26', '260', '263', '265', '267', '268', '27', '278', '28', '280', '282', '283', '284', '286', '29',
'2pm', '3', '30', '300', '306', '308', '31', '310', '315', '32', '33', '330', '34', '345', '35', '350',
'359', '36', '360', '369', '37', '370', '378', '38', '386', '387', '39', '399', '3c', '3d', '3s', '4',
'40', '400', '407', '41', '410', '42', '43', '44', '45', '450', '46', '4640', '47', '4720', '48', '480',
'49', '4g', '4minute', '4x2', '4x4', '5', '50', '500', '500c', '508', '50mp', '51', '52', '53', '54', '55',
'550', '56', '560', '57', '58', '59', '595', '5c', '5g', '5s', '5th', '6', '60', '600', '61', '62', '623',
'625', '63', '634', '64', '640', '65', '650', '656', '66', '67', '68', '69', '69053', '6a', '6x6', '7', '70',
'700', '71', '72', '720', '73', '737', '74', '7442', '75', '750', '7569', '76', '77', '78', '79', '8', '80',
'800', '80an', '81', '814', '816', '82', '83', '84', '85', '8500', '86', '865', '86th', '87', '88', '889',
'89', '8gb', '9', '90', '900', '91', '911', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'a3', 'a320', 'a66s', 'aa']
vectorizer = TfidfVectorizer(preprocessor=my_preprocessor,tokenizer=my_tokenizer,
stop_words=my_stop_words,min_df=2,max_df=0.95)
data = vectorizer.fit_transform(data)
feature_names = vectorizer.get_feature_names()
grps = GRP(n_components = 5)
new_data = grps.fit_transform(data)
for n_topics in range(100,110,10):
print ("Learning ...." + str(n_topics))
m=1.5
cntr, u= fcmeans(new_data.T, n_topics, m, error=0.005, maxiter=1000)
temp = csr_matrix(np.ones((data.shape[1],1)).dot(np.atleast_2d(u.sum(axis=1))).T)
u = csr_matrix(u)
cntr = np.asarray(u.dot(data) / temp)
n_top_words = 10
hasil = open('D:\\Private Property\\Data Kuliah\\Akademis\\Skripsweet\\program\\Program1\\Program\\nyoba\\topikgrp' + str(n_topics) + ".txt", 'w')
for topic_idx, topic in enumerate(cntr):
print("Topic " + str(topic_idx) + " : " + " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
hasil.write(""+" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) + "\n")
hasil.close()
| true | true |
f73eda20134489981bf33c4a600b93ed20064d0e | 3,045 | py | Python | aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/DescribeCenRegionDomainRouteEntriesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/DescribeCenRegionDomainRouteEntriesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/DescribeCenRegionDomainRouteEntriesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class DescribeCenRegionDomainRouteEntriesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'DescribeCenRegionDomainRouteEntries')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_CenRegionId(self): # String
return self.get_query_params().get('CenRegionId')
def set_CenRegionId(self, CenRegionId): # String
self.add_query_param('CenRegionId', CenRegionId)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
| 38.544304 | 88 | 0.758949 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class DescribeCenRegionDomainRouteEntriesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'DescribeCenRegionDomainRouteEntries')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId):
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CenId(self):
return self.get_query_params().get('CenId')
def set_CenId(self, CenId):
self.add_query_param('CenId', CenId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber):
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize):
self.add_query_param('PageSize', PageSize)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount):
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId):
self.add_query_param('OwnerId', OwnerId)
def get_CenRegionId(self):
return self.get_query_params().get('CenRegionId')
def set_CenRegionId(self, CenRegionId):
self.add_query_param('CenRegionId', CenRegionId)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self, Status):
self.add_query_param('Status', Status)
| true | true |
f73eda34b97d868c08b0f2dbd69dbddb700a6dbb | 842 | py | Python | migrations/versions/3ee15b8edebb_add_sessions_table.py | romantomjak/dns-cockpit | 6fd5fc269061dec647b0d1fba79cb6c982777795 | [
"MIT"
] | 1 | 2018-08-27T10:07:25.000Z | 2018-08-27T10:07:25.000Z | migrations/versions/3ee15b8edebb_add_sessions_table.py | romantomjak/dns-cockpit | 6fd5fc269061dec647b0d1fba79cb6c982777795 | [
"MIT"
] | null | null | null | migrations/versions/3ee15b8edebb_add_sessions_table.py | romantomjak/dns-cockpit | 6fd5fc269061dec647b0d1fba79cb6c982777795 | [
"MIT"
] | 2 | 2018-10-06T22:56:33.000Z | 2021-01-03T16:14:53.000Z | """add sessions table
Revision ID: 3ee15b8edebb
Revises: feac35539764
Create Date: 2018-08-03 23:32:03.940252
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3ee15b8edebb'
down_revision = 'feac35539764'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('sessions',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('data', sa.String(length=262144), nullable=False),
sa.Column('expires_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('sessions')
# ### end Alembic commands ###
| 24.764706 | 68 | 0.673397 | from alembic import op
import sqlalchemy as sa
revision = '3ee15b8edebb'
down_revision = 'feac35539764'
branch_labels = None
depends_on = None
def upgrade():
mn('expires_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
| true | true |
f73edb68cd488318c962441da4a8b1eb42b61ff1 | 4,191 | py | Python | molecules/utils/callback.py | hengma1001/molecules | c6694cc77ef1eb246f3fdab1f201481d1bcaa07c | [
"MIT"
] | null | null | null | molecules/utils/callback.py | hengma1001/molecules | c6694cc77ef1eb246f3fdab1f201481d1bcaa07c | [
"MIT"
] | 1 | 2020-06-08T15:12:31.000Z | 2020-06-08T15:12:31.000Z | molecules/utils/callback.py | hengma1001/molecules | c6694cc77ef1eb246f3fdab1f201481d1bcaa07c | [
"MIT"
] | null | null | null | import os
import time
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Callback:
def __init__(self): pass
def on_train_begin(self, logs): pass
def on_train_end(self, logs): pass
def on_epoch_begin(self, epoch, logs): pass
def on_epoch_end(self, epoch, logs): pass
def on_batch_begin(self, batch, epoch, logs): pass
def on_batch_end(self, batch, epoch, logs): pass
# TODO: need way to share SummaryWriter among multiple callbacks for a model
# could make writer global variable
class LossCallback(Callback):
def on_train_begin(self, logs):
#from torch.utils.tensorboard import SummaryWriter
#self.writer = SummaryWriter()
self.train_losses = []
self.valid_losses = []
def on_epoch_end(self, epoch, logs):
# self.writer.add_scalar('epoch training loss',
# logs['train_loss'],
# logs['global_step'])
# self.writer.add_scalar('epoch validation loss',
# logs['valid_loss'],
# logs['global_step'])
self.train_losses.append(logs['train_loss'])
self.valid_losses.append(logs['valid_loss'])
def save(self, path):
"""
Save train and validation loss from the end of each epoch.
Parameters
----------
path: str
Path to save train and validation loss history
"""
torch.save({'loss': self.train_losses, 'valid': self.valid_losses}, path)
class CheckpointCallback(Callback):
def __init__(self, interval=0,
directory=os.path.join('.', 'checkpoints')):
"""
Checkpoint interface for saving dictionary objects to disk
during training. Typically used to save model state_dict
and optimizer state_dict in order to resume training and
record model weight history.
Parameters
----------
directory : str
Directory to store checkpoint files.
Files are named 'epoch-{e}-%Y%m%d-%H%M%S.pt'
interval : int
Checkpoints model every interval batches, default is once per epoch.
"""
if interval < 0:
raise ValueError('Checkpoint interval must be non-negative')
os.makedirs(directory, exist_ok=True)
self.interval = interval
self.directory = directory
def on_batch_end(self, batch, epoch, logs):
if self.interval and batch % self.interval == 0:
self._save(epoch, logs)
def on_epoch_end(self, epoch, logs):
if not self.interval:
self._save(epoch, logs)
def _save(self, epoch, logs):
"""Saves optimizer state and encoder/decoder weights."""
checkpoint = {
'encoder_state_dict': logs['model'].encoder.state_dict(),
'decoder_state_dict': logs['model'].decoder.state_dict(),
'optimizer_state_dict': logs['optimizer'].state_dict(),
'epoch': epoch
}
time_stamp = time.strftime(f'epoch-{epoch}-%Y%m%d-%H%M%S.pt')
path = os.path.join(self.directory, time_stamp)
torch.save(checkpoint, path)
class EmbeddingCallback(Callback):
"""
Saves embeddings of random samples.
Parameters
----------
data : torch.Tensor
Dataset from which to sample for embeddings.
"""
def __init__(self, data):
self.data = data
def on_train_begin(self, logs):
self.embeddings = []
self.data_index = []
def on_epoch_end(self, epoch, logs):
# TODO: may need to change the torch device
idx = torch.randint(len(self.data), (1,))
embedding = logs['model'].encode(self.data[idx].to(device))
self.data_index.append(idx)
self.embeddings.append(embedding)
def save(self, path):
"""
Save embeddings and index of associated data point.
Parameters
----------
path: str
Path to save embeddings and indices
"""
torch.save({'embeddings': self.embeddings, 'indices': self.data_index}, path)
| 30.591241 | 85 | 0.599141 | import os
import time
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Callback:
def __init__(self): pass
def on_train_begin(self, logs): pass
def on_train_end(self, logs): pass
def on_epoch_begin(self, epoch, logs): pass
def on_epoch_end(self, epoch, logs): pass
def on_batch_begin(self, batch, epoch, logs): pass
def on_batch_end(self, batch, epoch, logs): pass
class LossCallback(Callback):
def on_train_begin(self, logs):
self.train_losses = []
self.valid_losses = []
def on_epoch_end(self, epoch, logs):
self.train_losses.append(logs['train_loss'])
self.valid_losses.append(logs['valid_loss'])
def save(self, path):
torch.save({'loss': self.train_losses, 'valid': self.valid_losses}, path)
class CheckpointCallback(Callback):
def __init__(self, interval=0,
directory=os.path.join('.', 'checkpoints')):
if interval < 0:
raise ValueError('Checkpoint interval must be non-negative')
os.makedirs(directory, exist_ok=True)
self.interval = interval
self.directory = directory
def on_batch_end(self, batch, epoch, logs):
if self.interval and batch % self.interval == 0:
self._save(epoch, logs)
def on_epoch_end(self, epoch, logs):
if not self.interval:
self._save(epoch, logs)
def _save(self, epoch, logs):
checkpoint = {
'encoder_state_dict': logs['model'].encoder.state_dict(),
'decoder_state_dict': logs['model'].decoder.state_dict(),
'optimizer_state_dict': logs['optimizer'].state_dict(),
'epoch': epoch
}
time_stamp = time.strftime(f'epoch-{epoch}-%Y%m%d-%H%M%S.pt')
path = os.path.join(self.directory, time_stamp)
torch.save(checkpoint, path)
class EmbeddingCallback(Callback):
def __init__(self, data):
self.data = data
def on_train_begin(self, logs):
self.embeddings = []
self.data_index = []
def on_epoch_end(self, epoch, logs):
idx = torch.randint(len(self.data), (1,))
embedding = logs['model'].encode(self.data[idx].to(device))
self.data_index.append(idx)
self.embeddings.append(embedding)
def save(self, path):
torch.save({'embeddings': self.embeddings, 'indices': self.data_index}, path)
| true | true |
f73edcaf21e30793f30976838e24e314b008e00d | 371 | py | Python | programmers/lv1/12940.py | KLumy/Basic-Algorithm | e52e4200c1955a9062569814ff3418dd06666845 | [
"MIT"
] | 1 | 2021-01-22T15:58:32.000Z | 2021-01-22T15:58:32.000Z | programmers/lv1/12940.py | KLumy/Basic-Algorithm | e52e4200c1955a9062569814ff3418dd06666845 | [
"MIT"
] | null | null | null | programmers/lv1/12940.py | KLumy/Basic-Algorithm | e52e4200c1955a9062569814ff3418dd06666845 | [
"MIT"
] | null | null | null | from math import gcd
# def getGcd(n, m):
# while m != 0:
# temp = n % m
# n = m
# m = temp
# return n
def solution(n: int, m: int):
# gcd = getGcd(n, m)
# lcm = n * m // gcd
# return [gcd, lcm]
g = gcd(n, m)
l = n * m // g
return g, l
if __name__ == "__main__":
n = 3
m = 12
print(solution(n, m))
| 14.84 | 29 | 0.433962 | from math import gcd
def solution(n: int, m: int):
g = gcd(n, m)
l = n * m // g
return g, l
if __name__ == "__main__":
n = 3
m = 12
print(solution(n, m))
| true | true |
f73edcc14f750dd6ab36d1acd8f72ad451ac843e | 4,212 | py | Python | start.py | blairgao/iMessageChatbot | 73fca49536673da40dcfd24b0a70dce0a8202942 | [
"BSD-3-Clause"
] | null | null | null | start.py | blairgao/iMessageChatbot | 73fca49536673da40dcfd24b0a70dce0a8202942 | [
"BSD-3-Clause"
] | null | null | null | start.py | blairgao/iMessageChatbot | 73fca49536673da40dcfd24b0a70dce0a8202942 | [
"BSD-3-Clause"
] | null | null | null | import sys
import random
import string
import datetime
import logging
import subprocess
import json
import time
import requests
import urllib.request
import ssl
# Fixed Scraping: SSL: CERTIFICATE_VERIFY_FAILED error
ssl._create_default_https_context = ssl._create_unverified_context
HELP = """OPTIONS:
--cute (default) sends cute/caring/lovie-dovie messages (okie ❤️🥰😘)
--mean sends moodie messages that tend to pick up fights (k.)
--hungry sends food related messages (Kk 😋🤤🍕🍩)
--random sends messages like a bipolar (k. ❤️🥰😘)
-f (frequent) sends him message more frequently
-r (reply) adds auto-reply feature, else ghost him without the tag """
subreddit_list = ["BetterEveryLoop", "AnimalsBeingJerks", "meme"]
#todo: https://www.twilio.com/blog/2016/09/how-to-receive-and-respond-to-a-text-message-with-python-flask-and-twilio.html
def printInfo(mood, settings, appleID):
print("Starting AI girlfriend chatbot...")
if 'r' in settings:
print("Sending messages to " + appleID + " in " + mood[2:] + " mood with auto-reply feature. ")
else:
print("Sending messages to " + appleID + " in " + mood[2:] + " mood. ")
#credit: https://github.com/noahbroyles/iMessageFUN
def runAppleScript(applescript):
arguments = [item for x in [("-e", l.strip()) for l in applescript.split('\n') if l.strip() != ''] for item in x]
proc = subprocess.Popen(["osascript"] + arguments, stdout=subprocess.PIPE)
proc.stdout.flush()
def sendMessage(message, appleID):
script = '''
on run
tell application "Messages"
set iMessageService to 1st service whose service type = iMessage
set boyfriend to buddy "''' + appleID + '''" of iMessageService
send "''' + message + '''" to boyfriend
end tell
end run'''
runAppleScript(script)
logging.info("Sent" + message + " at " + str(datetime.datetime.now()))
def getMeme(subreddit):
response = requests.get("https://meme-api.herokuapp.com/gimme/" + subreddit)
url = json.loads(response.text)["url"]
# apple script cannot send image file: urllib.request.urlretrieve(url, "meme.jpg")
return url
def getMessage(path, category):
with open('messages/' + path, 'r') as file:
data = json.load(file)
x = random.randint(0, len(data[category]) - 1)
return data[category][x]
def generateMessage(mood, appleID):
if "mean" in mood:
sendMessage(message, appleID)
elif "hungry" in mood:
sendMessage(message, appleID)
elif "random" in mood:
sendMessage(message, appleID)
else:
x = random.randint(0, 11)
if x%11 == 0:
message = getMessage("cute.json", "greetings")
sendMessage(message, appleID)
else:
i = random.randint(0, len(subreddit_list) - 1)
meme = getMeme(subreddit_list[i])
sendMessage(meme, appleID)
if __name__ == "__main__":
args = sys.argv
if len(args) < 2:
print("\n" + HELP + "\n")
try:
mood = [arg for arg in args if arg.startswith("--")][0]
except IndexError:
mood = '--cute'
try:
settings = [arg for arg in args if arg.startswith("-") and not arg.startswith("--")][0]
except IndexError:
settings = ''
appleID = args[-1]
for x in appleID:
if x not in string.digits and '@' not in appleID:
sys.exit("ERROR: Invalid AppleID or Phone number: {}".format(appleID))
printInfo(mood, settings, appleID)
logging.basicConfig(filename="message.log", level=logging.INFO)
logging.info("Sending message to " + appleID + " with " + mood + " " + settings)
while True:
try:
generateMessage(mood, appleID)
if 'f' in settings:
time.sleep(5)
else:
time.sleep(10)
except KeyboardInterrupt:
print("RAP got interrupted")
break
except Exception as e:
print(e)
logging.ERROR(e)
break
| 32.4 | 121 | 0.597578 | import sys
import random
import string
import datetime
import logging
import subprocess
import json
import time
import requests
import urllib.request
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
HELP = """OPTIONS:
--cute (default) sends cute/caring/lovie-dovie messages (okie ❤️🥰😘)
--mean sends moodie messages that tend to pick up fights (k.)
--hungry sends food related messages (Kk 😋🤤🍕🍩)
--random sends messages like a bipolar (k. ❤️🥰😘)
-f (frequent) sends him message more frequently
-r (reply) adds auto-reply feature, else ghost him without the tag """
subreddit_list = ["BetterEveryLoop", "AnimalsBeingJerks", "meme"]
def printInfo(mood, settings, appleID):
print("Starting AI girlfriend chatbot...")
if 'r' in settings:
print("Sending messages to " + appleID + " in " + mood[2:] + " mood with auto-reply feature. ")
else:
print("Sending messages to " + appleID + " in " + mood[2:] + " mood. ")
def runAppleScript(applescript):
arguments = [item for x in [("-e", l.strip()) for l in applescript.split('\n') if l.strip() != ''] for item in x]
proc = subprocess.Popen(["osascript"] + arguments, stdout=subprocess.PIPE)
proc.stdout.flush()
def sendMessage(message, appleID):
script = '''
on run
tell application "Messages"
set iMessageService to 1st service whose service type = iMessage
set boyfriend to buddy "''' + appleID + '''" of iMessageService
send "''' + message + '''" to boyfriend
end tell
end run'''
runAppleScript(script)
logging.info("Sent" + message + " at " + str(datetime.datetime.now()))
def getMeme(subreddit):
response = requests.get("https://meme-api.herokuapp.com/gimme/" + subreddit)
url = json.loads(response.text)["url"]
return url
def getMessage(path, category):
with open('messages/' + path, 'r') as file:
data = json.load(file)
x = random.randint(0, len(data[category]) - 1)
return data[category][x]
def generateMessage(mood, appleID):
if "mean" in mood:
sendMessage(message, appleID)
elif "hungry" in mood:
sendMessage(message, appleID)
elif "random" in mood:
sendMessage(message, appleID)
else:
x = random.randint(0, 11)
if x%11 == 0:
message = getMessage("cute.json", "greetings")
sendMessage(message, appleID)
else:
i = random.randint(0, len(subreddit_list) - 1)
meme = getMeme(subreddit_list[i])
sendMessage(meme, appleID)
if __name__ == "__main__":
args = sys.argv
if len(args) < 2:
print("\n" + HELP + "\n")
try:
mood = [arg for arg in args if arg.startswith("--")][0]
except IndexError:
mood = '--cute'
try:
settings = [arg for arg in args if arg.startswith("-") and not arg.startswith("--")][0]
except IndexError:
settings = ''
appleID = args[-1]
for x in appleID:
if x not in string.digits and '@' not in appleID:
sys.exit("ERROR: Invalid AppleID or Phone number: {}".format(appleID))
printInfo(mood, settings, appleID)
logging.basicConfig(filename="message.log", level=logging.INFO)
logging.info("Sending message to " + appleID + " with " + mood + " " + settings)
while True:
try:
generateMessage(mood, appleID)
if 'f' in settings:
time.sleep(5)
else:
time.sleep(10)
except KeyboardInterrupt:
print("RAP got interrupted")
break
except Exception as e:
print(e)
logging.ERROR(e)
break
| true | true |
f73eddf2f7412f9fb8281475ab7723626cb37399 | 2,486 | py | Python | testCases/noiseTest/sixMachineNoise4.ltd.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | testCases/noiseTest/sixMachineNoise4.ltd.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | testCases/noiseTest/sixMachineNoise4.ltd.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | # LTD simulation models / perturbances
# Attribute name case sensitive.
# Commented and empty lines are ignored
# Double quoted variable names in sysPert parameters ignored
# Uses Steps and no ACE filtering
# Perturbances
mirror.sysPerturbances = [
#'load 9 : step P 5 75 rel',
#'gen 5 : step Pm 5 -75 rel',
#'gen 5 : step Pref 5 -75 rel',
]
# Power Plants
mirror.sysPowerPlants ={'pp1': ["gen 2 1: 0.75 : step", "gen 2 2 : 0.25: step"],
'pp2': ["gen 3 : 0.75: step", "gen 4 : 0.25: step"],
}
mirror.NoiseAgent = ltd.perturbance.LoadNoiseAgent(mirror, 0.3, True)
# Testing of Balancing Authority input
mirror.sysBA = {
'BA1':{
'Area':1,
'B': "2.0 : perload", # MW/0.1 Hz
'AGCActionTime': 15.00, # seconds
'ACEgain' : 2.0,
'AGCType':'TLB : 0', # Tie-Line Bias
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 15, # seconds - size of window
'IACEscale' : 1/15,
'IACEweight' : .3, # out of one - percent to mix with calculated ace
'IACEdeadband' : 0.0, # Hz # changed 10/6/19
'ACEFiltering': 'PI : 0.04 0.0001', # changed 10/6/19
'AGCDeadband' : None, # MW? -> not implemented
'GovDeadbandType' : 'nldroop', # changed 10/6/19
'GovDeadband' : .036, # Hz
'GovAlpha' : 0.016, # changed 10/6/19
'GovBeta' : 0.036, # changed 10/6/19
'CtrlGens': ['plant pp1 : .60 ',
'gen 1 : .40 : step']
},
'BA2':{
'Area':2,
'B': "2.0 : perload", # MW/0.1 Hz
'AGCActionTime': 15.00, # seconds
'ACEgain' : 2.0,
'AGCType':'TLB : 0', # Tie-Line Bias
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 15, # seconds - size of window
'IACEscale' : 1/15,
'IACEweight' : .3, # out of one - percent to mix with calculated ace
'IACEdeadband' : 0.0, # Hz # changed 10/6/19
'ACEFiltering': 'PI : 0.04 0.0001', # changed 10/6/19
'AGCDeadband' : None, # MW? -> not implemented
'GovDeadbandType' : 'nldroop', # changed 10/6/19
'GovDeadband' : .036, # Hz
'GovAlpha' : 0.016, # changed 10/6/19
'GovBeta' : 0.036, # changed 10/6/19
'CtrlGens': ['plant pp2 : 1.0 ']
},
}
| 35.514286 | 80 | 0.532985 |
mirror.sysPerturbances = [
]
mirror.sysPowerPlants ={'pp1': ["gen 2 1: 0.75 : step", "gen 2 2 : 0.25: step"],
'pp2': ["gen 3 : 0.75: step", "gen 4 : 0.25: step"],
}
mirror.NoiseAgent = ltd.perturbance.LoadNoiseAgent(mirror, 0.3, True)
mirror.sysBA = {
'BA1':{
'Area':1,
'B': "2.0 : perload",
'AGCActionTime': 15.00,
'ACEgain' : 2.0,
'AGCType':'TLB : 0',
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 15,
'IACEscale' : 1/15,
'IACEweight' : .3,
'IACEdeadband' : 0.0, ering': 'PI : 0.04 0.0001',
'AGCDeadband' : None,
'GovDeadbandType' : 'nldroop',
'GovDeadband' : .036,
'GovAlpha' : 0.016,
'GovBeta' : 0.036,
'CtrlGens': ['plant pp1 : .60 ',
'gen 1 : .40 : step']
},
'BA2':{
'Area':2,
'B': "2.0 : perload",
'AGCActionTime': 15.00,
'ACEgain' : 2.0,
'AGCType':'TLB : 0',
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 15,
'IACEscale' : 1/15,
'IACEweight' : .3,
'IACEdeadband' : 0.0, ering': 'PI : 0.04 0.0001',
'AGCDeadband' : None,
'GovDeadbandType' : 'nldroop',
'GovDeadband' : .036,
'GovAlpha' : 0.016,
'GovBeta' : 0.036,
'CtrlGens': ['plant pp2 : 1.0 ']
},
}
| true | true |
f73ede6eb958c71d8efb3e061a8e2d340d744406 | 7,064 | py | Python | web/mainApp/views.py | njw1204/golf-online-judge | adb22653c457e97d5a239aa562725144235a2ab8 | [
"MIT"
] | 3 | 2019-07-22T20:00:07.000Z | 2021-12-17T17:54:09.000Z | web/mainApp/views.py | njw1204/golf-online-judge | adb22653c457e97d5a239aa562725144235a2ab8 | [
"MIT"
] | null | null | null | web/mainApp/views.py | njw1204/golf-online-judge | adb22653c457e97d5a239aa562725144235a2ab8 | [
"MIT"
] | 2 | 2019-06-18T18:41:10.000Z | 2021-04-15T09:57:31.000Z | from django.shortcuts import redirect, render
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView
from django.urls import reverse_lazy
from django.contrib.auth import login
from django.contrib import messages
from django.db import transaction
from .forms import CustomUserCreateForm
from . import models as mainModels
from . import forms as mainForms
from . import utils
from judge import tasks, problem
# Create your views here.
class SignupView(CreateView):
template_name = "registration/signup.html"
form_class = CustomUserCreateForm
success_url = reverse_lazy("mainApp:index")
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["cap-token"] = self.request.POST.get("g-recaptcha-response", "")
return kwargs
def form_valid(self, form):
self.object = form.save()
login(self.request, self.object)
messages.info(self.request, self.object.username + "님, 회원가입을 환영합니다.")
return redirect(self.get_success_url())
class IndexView(TemplateView):
template_name = "mainApp/index.html"
class ProblemListView(TemplateView):
template_name = "mainApp/problem-list.html"
def dispatch(self, request, *args, **kwargs):
problem_per_page = 10 # 한 페이지에 보여줄 문제수
cache = mainModels.ProblemPost.objects.filter(show=True)
kwargs["problem_total_count"] = cache.count() # 문제 총 개수
kwargs["last_page"] = kwargs["problem_total_count"] // problem_per_page + 1 # 마지막 페이지 번호
if kwargs["problem_total_count"] % problem_per_page == 0:
kwargs["last_page"] -= 1
# 현재 페이지가 유효범위 안에 있어야 함 or 문제가 하나도 없으면 OK
if not (1 <= kwargs["current_page"] <= kwargs["last_page"]) \
and not (kwargs["current_page"] == 1 and kwargs["last_page"] == 0):
messages.info(request, "문제가 존재하지 않습니다.")
return redirect("mainApp:index")
kwargs["pages"] = range(1, kwargs["last_page"] + 1)
show_start_range = (kwargs["current_page"] - 1) * problem_per_page
show_end_range = show_start_range + problem_per_page
kwargs["problems"] = cache.order_by("pk")[show_start_range:show_end_range] # 현재 페이지에 보여줄 문제 목록
return super().dispatch(request, *args, **kwargs)
class ProblemView(TemplateView):
template_name = "mainApp/problem.html"
def dispatch(self, request, *args, **kwargs):
# 현재 문제가 존재해야 됨
result = mainModels.ProblemPost.objects.filter(pk=kwargs["pk"], show=True)
if not result.exists():
messages.info(request, "문제가 존재하지 않습니다.")
return redirect("mainApp:index")
kwargs["problem"] = result[0]
kwargs["full_absolute_url"] = request.build_absolute_uri(result[0].get_absolute_url())
return super().dispatch(request, *args, **kwargs)
class ProblemSubmitView(CreateView):
template_name = "mainApp/problem-submit.html"
form_class = mainForms.SolvePostForm
def dispatch(self, request, *args, **kwargs):
try:
self.kwargs["problem"] = mainModels.ProblemPost.objects.filter(pk=self.kwargs["problem_pk"], show=True).first()
if not request.user.is_authenticated:
messages.info(request, "로그인을 해주세요.")
return redirect(self.kwargs["problem"].get_absolute_url())
except:
return redirect("mainApp:problems", current_page=1)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["problem"] = self.kwargs["problem"]
return context
def get_success_url(self):
return self.kwargs["problem"].get_absolute_status_url()
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
kwargs["ip"] = utils.get_real_ip(self.request)
kwargs["problem"] = self.kwargs["problem"]
return kwargs
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user_pk = self.request.user
self.object.problem_pk = self.kwargs["problem"]
self.object.ip = utils.get_real_ip(self.request)
self.object.save()
tasks.activate_judge() # 채점 시스템 가동
return redirect(self.get_success_url(), pk=self.kwargs["problem_pk"])
class ProblemStatusView(TemplateView):
template_name = "mainApp/problem-status.html"
def get(self, request, *args, **kwargs):
submit_per_page = 10 # 한 페이지에 보여줄 제출수
single_mode = False
if "problem_pk" in kwargs:
# problem_pk에 해당하는 문제가 존재하면 그에 맞는 채점 현황만 로드
result = mainModels.ProblemPost.objects.filter(pk=kwargs["problem_pk"], show=True)
if result.exists():
submits = mainModels.SolvePost.objects.filter(problem_pk=result.first(), show=True).order_by("-pk")
kwargs["heading"] = str(kwargs["problem_pk"]) + "번 문제 채점 현황"
single_mode = True
if not single_mode:
# 그런 문제가 없으면 전체 채점 현황을 로드
submits = mainModels.SolvePost.objects.select_related("problem_pk").filter(show=True, problem_pk__show=True).order_by("-pk")
kwargs["heading"] = "전체 채점 현황"
kwargs["single_mode"] = single_mode
kwargs["total_count"] = submits.count() # 제출 총 개수
kwargs["last_page"] = kwargs["total_count"] // submit_per_page + 1 # 마지막 페이지 번호
if kwargs["total_count"] % submit_per_page == 0:
kwargs["last_page"] -= 1
# 현재 페이지가 유효범위 안에 있어야 함 or 제출 현황이 하나도 없으면 OK
if not (1 <= kwargs["current_page"] <= kwargs["last_page"]) \
and not (kwargs["current_page"] == 1 and kwargs["last_page"] == 0):
return redirect("mainApp:index")
kwargs["pages"] = range(1, kwargs["last_page"] + 1)
show_start_range = (kwargs["current_page"] - 1) * submit_per_page
show_end_range = show_start_range + submit_per_page
kwargs["submits"] = submits[show_start_range:show_end_range]
return super().get(request, *args, **kwargs)
class ProblemMakeView(CreateView):
template_name = "mainApp/problem-make.html"
form_class = mainForms.CreateProblemForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["cap-token"] = self.request.POST.get("g-recaptcha-response", "")
return kwargs
def form_valid(self, form):
try:
with transaction.atomic():
self.object = form.save()
self.object.creator = self.request.user
self.object.save()
problem.save_testcase(self.object.pk, form.cleaned_data["input_file"], form.cleaned_data["output_file"])
except:
messages.warning(self.request, "문제 생성에 실패했습니다.")
return render(self.request, self.template_name, {"form": form})
messages.info(self.request, "문제가 생성되었습니다.")
return redirect("mainApp:problem", pk=self.object.pk)
| 39.909605 | 136 | 0.648783 | from django.shortcuts import redirect, render
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView
from django.urls import reverse_lazy
from django.contrib.auth import login
from django.contrib import messages
from django.db import transaction
from .forms import CustomUserCreateForm
from . import models as mainModels
from . import forms as mainForms
from . import utils
from judge import tasks, problem
class SignupView(CreateView):
template_name = "registration/signup.html"
form_class = CustomUserCreateForm
success_url = reverse_lazy("mainApp:index")
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["cap-token"] = self.request.POST.get("g-recaptcha-response", "")
return kwargs
def form_valid(self, form):
self.object = form.save()
login(self.request, self.object)
messages.info(self.request, self.object.username + "님, 회원가입을 환영합니다.")
return redirect(self.get_success_url())
class IndexView(TemplateView):
template_name = "mainApp/index.html"
class ProblemListView(TemplateView):
template_name = "mainApp/problem-list.html"
def dispatch(self, request, *args, **kwargs):
problem_per_page = 10
cache = mainModels.ProblemPost.objects.filter(show=True)
kwargs["problem_total_count"] = cache.count()
kwargs["last_page"] = kwargs["problem_total_count"] // problem_per_page + 1
if kwargs["problem_total_count"] % problem_per_page == 0:
kwargs["last_page"] -= 1
if not (1 <= kwargs["current_page"] <= kwargs["last_page"]) \
and not (kwargs["current_page"] == 1 and kwargs["last_page"] == 0):
messages.info(request, "문제가 존재하지 않습니다.")
return redirect("mainApp:index")
kwargs["pages"] = range(1, kwargs["last_page"] + 1)
show_start_range = (kwargs["current_page"] - 1) * problem_per_page
show_end_range = show_start_range + problem_per_page
kwargs["problems"] = cache.order_by("pk")[show_start_range:show_end_range]
return super().dispatch(request, *args, **kwargs)
class ProblemView(TemplateView):
template_name = "mainApp/problem.html"
def dispatch(self, request, *args, **kwargs):
result = mainModels.ProblemPost.objects.filter(pk=kwargs["pk"], show=True)
if not result.exists():
messages.info(request, "문제가 존재하지 않습니다.")
return redirect("mainApp:index")
kwargs["problem"] = result[0]
kwargs["full_absolute_url"] = request.build_absolute_uri(result[0].get_absolute_url())
return super().dispatch(request, *args, **kwargs)
class ProblemSubmitView(CreateView):
template_name = "mainApp/problem-submit.html"
form_class = mainForms.SolvePostForm
def dispatch(self, request, *args, **kwargs):
try:
self.kwargs["problem"] = mainModels.ProblemPost.objects.filter(pk=self.kwargs["problem_pk"], show=True).first()
if not request.user.is_authenticated:
messages.info(request, "로그인을 해주세요.")
return redirect(self.kwargs["problem"].get_absolute_url())
except:
return redirect("mainApp:problems", current_page=1)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["problem"] = self.kwargs["problem"]
return context
def get_success_url(self):
return self.kwargs["problem"].get_absolute_status_url()
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
kwargs["ip"] = utils.get_real_ip(self.request)
kwargs["problem"] = self.kwargs["problem"]
return kwargs
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user_pk = self.request.user
self.object.problem_pk = self.kwargs["problem"]
self.object.ip = utils.get_real_ip(self.request)
self.object.save()
tasks.activate_judge()
return redirect(self.get_success_url(), pk=self.kwargs["problem_pk"])
class ProblemStatusView(TemplateView):
template_name = "mainApp/problem-status.html"
def get(self, request, *args, **kwargs):
submit_per_page = 10
single_mode = False
if "problem_pk" in kwargs:
result = mainModels.ProblemPost.objects.filter(pk=kwargs["problem_pk"], show=True)
if result.exists():
submits = mainModels.SolvePost.objects.filter(problem_pk=result.first(), show=True).order_by("-pk")
kwargs["heading"] = str(kwargs["problem_pk"]) + "번 문제 채점 현황"
single_mode = True
if not single_mode:
submits = mainModels.SolvePost.objects.select_related("problem_pk").filter(show=True, problem_pk__show=True).order_by("-pk")
kwargs["heading"] = "전체 채점 현황"
kwargs["single_mode"] = single_mode
kwargs["total_count"] = submits.count()
kwargs["last_page"] = kwargs["total_count"] // submit_per_page + 1
if kwargs["total_count"] % submit_per_page == 0:
kwargs["last_page"] -= 1
if not (1 <= kwargs["current_page"] <= kwargs["last_page"]) \
and not (kwargs["current_page"] == 1 and kwargs["last_page"] == 0):
return redirect("mainApp:index")
kwargs["pages"] = range(1, kwargs["last_page"] + 1)
show_start_range = (kwargs["current_page"] - 1) * submit_per_page
show_end_range = show_start_range + submit_per_page
kwargs["submits"] = submits[show_start_range:show_end_range]
return super().get(request, *args, **kwargs)
class ProblemMakeView(CreateView):
template_name = "mainApp/problem-make.html"
form_class = mainForms.CreateProblemForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["cap-token"] = self.request.POST.get("g-recaptcha-response", "")
return kwargs
def form_valid(self, form):
try:
with transaction.atomic():
self.object = form.save()
self.object.creator = self.request.user
self.object.save()
problem.save_testcase(self.object.pk, form.cleaned_data["input_file"], form.cleaned_data["output_file"])
except:
messages.warning(self.request, "문제 생성에 실패했습니다.")
return render(self.request, self.template_name, {"form": form})
messages.info(self.request, "문제가 생성되었습니다.")
return redirect("mainApp:problem", pk=self.object.pk)
| true | true |
f73edfd15082c761095976f1e0a74df5e1ef490b | 1,734 | py | Python | whist/core/scoring/elo.py | Whist-Team/Whist-Core | b0130c6988592ba2094c7e1384362daabaefb3f4 | [
"MIT"
] | 1 | 2021-07-01T09:11:35.000Z | 2021-07-01T09:11:35.000Z | whist/core/scoring/elo.py | Whist-Team/Whist-Core | b0130c6988592ba2094c7e1384362daabaefb3f4 | [
"MIT"
] | 156 | 2021-04-20T10:01:21.000Z | 2022-03-29T21:20:25.000Z | whist/core/scoring/elo.py | Whist-Team/Whist-Core | b0130c6988592ba2094c7e1384362daabaefb3f4 | [
"MIT"
] | 2 | 2021-04-18T21:26:22.000Z | 2022-03-20T17:41:13.000Z | """
Elo Rating Calculator
"""
from whist.core.scoring.score_card import ScoreCard
from whist.core.scoring.team import Team
from whist.core.user.player import Player
# pylint: disable=too-few-public-methods
class EloRater:
"""
Static class that calculates the Elo-Rating for players after several hands played.
"""
@staticmethod
def rate(teams: list[Team], scores: ScoreCard) -> None:
"""
Calculates the new rating of player after several hand played.
:param teams:
:type teams:
:param scores:
:type scores:
:return:
:rtype:
"""
delta = EloRater._score_delta(teams[0], teams[1], scores)
for team in teams:
for player in team.players:
k_factor = EloRater._k_factor(player)
won = scores.won(team)
player.rating += round(k_factor * delta * won)
@staticmethod
def _k_factor(player: Player) -> int:
if player.rating > 2400 and player.games > 30:
return 10
if player.rating < 2300 and player.games < 30:
return 40
return 20
@staticmethod
def _score_delta(team: Team, opponent: Team, scores: ScoreCard) -> float:
num_games = len(scores)
num_wins = scores.score(team)
expected_score = EloRater._expected_score(team, opponent)
return num_wins - num_games * expected_score
@staticmethod
def _expected_score(team: Team, opponent: Team) -> float:
q_a = EloRater._team_quotient(team)
q_b = EloRater._team_quotient(opponent)
return q_a / (q_a + q_b)
@staticmethod
def _team_quotient(team: Team):
return 10 ** (team.rating / 400)
| 29.896552 | 87 | 0.620531 | from whist.core.scoring.score_card import ScoreCard
from whist.core.scoring.team import Team
from whist.core.user.player import Player
class EloRater:
@staticmethod
def rate(teams: list[Team], scores: ScoreCard) -> None:
delta = EloRater._score_delta(teams[0], teams[1], scores)
for team in teams:
for player in team.players:
k_factor = EloRater._k_factor(player)
won = scores.won(team)
player.rating += round(k_factor * delta * won)
@staticmethod
def _k_factor(player: Player) -> int:
if player.rating > 2400 and player.games > 30:
return 10
if player.rating < 2300 and player.games < 30:
return 40
return 20
@staticmethod
def _score_delta(team: Team, opponent: Team, scores: ScoreCard) -> float:
num_games = len(scores)
num_wins = scores.score(team)
expected_score = EloRater._expected_score(team, opponent)
return num_wins - num_games * expected_score
@staticmethod
def _expected_score(team: Team, opponent: Team) -> float:
q_a = EloRater._team_quotient(team)
q_b = EloRater._team_quotient(opponent)
return q_a / (q_a + q_b)
@staticmethod
def _team_quotient(team: Team):
return 10 ** (team.rating / 400)
| true | true |
f73edffebe683195910b7afb5e25cdffa1d8f234 | 19,234 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/protection_containers_operations.py | AlexanderYukhanov/azure-sdk-for-python | 41e37c8a10876db40697a63e828ed7cafc19c7d6 | [
"MIT"
] | null | null | null | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/protection_containers_operations.py | AlexanderYukhanov/azure-sdk-for-python | 41e37c8a10876db40697a63e828ed7cafc19c7d6 | [
"MIT"
] | null | null | null | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/protection_containers_operations.py | AlexanderYukhanov/azure-sdk-for-python | 41e37c8a10876db40697a63e828ed7cafc19c7d6 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ProtectionContainersOperations(object):
"""ProtectionContainersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def get(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
"""Gets details of the specific container registered to your Recovery
Services Vault.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs.
:type fabric_name: str
:param container_name: Name of the container whose details need to be
fetched.
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ProtectionContainerResource or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def register(
self, vault_name, resource_group_name, fabric_name, container_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Registers the container with Recovery Services vault.
This is an asynchronous operation. To track the operation status, use
location header to call get latest status of the operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container.
:type fabric_name: str
:param container_name: Name of the container to be registered.
:type container_name: str
:param parameters: Request body for operation
:type parameters:
~azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ProtectionContainerResource or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.register.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ProtectionContainerResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
register.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def unregister(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
"""Unregisters the given container from your Recovery Services Vault.
This is an asynchronous operation. To determine whether the backend
service has finished processing the request, call Get Container
Operation Result API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs.
:type fabric_name: str
:param container_name: Name of the container which needs to be
unregistered from the Recovery Services Vault.
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.unregister.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
unregister.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def inquire(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
"""Inquires all the protectable item in the given container that can be
protected.
Inquires all the protectable items that are protectable under the given
container.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Fabric Name associated with the container.
:type fabric_name: str
:param container_name: Name of the container in which inquiry needs to
be triggered.
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.inquire.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
inquire.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/inquire'}
def refresh(
self, vault_name, resource_group_name, fabric_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Discovers all the containers in the subscription that can be backed up
to Recovery Services Vault. This is an asynchronous operation. To know
the status of the operation, call GetRefreshOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Fabric name associated the container.
:type fabric_name: str
:param filter: OData filter options.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.refresh.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
refresh.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/refreshContainers'}
| 49.958442 | 228 | 0.68145 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ProtectionContainersOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def get(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def register(
self, vault_name, resource_group_name, fabric_name, container_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.register.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'ProtectionContainerResource')
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
register.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def unregister(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
url = self.unregister.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
unregister.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def inquire(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
url = self.inquire.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
inquire.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/inquire'}
def refresh(
self, vault_name, resource_group_name, fabric_name, filter=None, custom_headers=None, raw=False, **operation_config):
url = self.refresh.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
refresh.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/refreshContainers'}
| true | true |
f73ee06e0cd088c903736ac6be38d340e03b4d63 | 153 | py | Python | rl_env/gym-tictac4/setup.py | SeanAmmirati/fantasy_football_pred | d361af3afa6595112f80b47571eb5c42a7c69dbf | [
"MIT"
] | null | null | null | rl_env/gym-tictac4/setup.py | SeanAmmirati/fantasy_football_pred | d361af3afa6595112f80b47571eb5c42a7c69dbf | [
"MIT"
] | null | null | null | rl_env/gym-tictac4/setup.py | SeanAmmirati/fantasy_football_pred | d361af3afa6595112f80b47571eb5c42a7c69dbf | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gym_tictac4',
version='0.0.1',
install_requires=['gym'] # And any other dependencies foo needs
) | 25.5 | 70 | 0.686275 | from setuptools import setup
setup(name='gym_tictac4',
version='0.0.1',
install_requires=['gym']
) | true | true |
f73ee0974fc20e64b8d52ff2d9b8a6938da54a2b | 2,079 | py | Python | watevrCTF-2019/challenges/web/NewPwd/train.py | bemrdo/CTF-2019 | 424512f7c43278d72091aa737da78907c14f9fc1 | [
"MIT"
] | null | null | null | watevrCTF-2019/challenges/web/NewPwd/train.py | bemrdo/CTF-2019 | 424512f7c43278d72091aa737da78907c14f9fc1 | [
"MIT"
] | null | null | null | watevrCTF-2019/challenges/web/NewPwd/train.py | bemrdo/CTF-2019 | 424512f7c43278d72091aa737da78907c14f9fc1 | [
"MIT"
] | 1 | 2020-03-14T07:24:12.000Z | 2020-03-14T07:24:12.000Z | import requests
import urllib.parse
import base64
import json
import io
import numpy as np
from PIL import Image
import cv2.cv2 as cv
from solve import *
def combine_and_show_alphabet():
imgTop = np.empty((50, 0))
imgBottom = np.empty((50, 0))
for char in alphabet[:16]:
imgTop = np.append(imgTop, np.min(trained_key[char], axis=0), axis=1)
for char in alphabet[16:]:
imgBottom = np.append(imgBottom, np.min(trained_key[char], axis=0), axis=1)
img = np.rot90(np.append(np.rot90(imgTop), np.rot90(imgBottom), axis=1), 3)
cv.imshow("alphabet", img)
combine_and_show_alphabet()
lastchar = 0
count = 0
cheat_amount = 0
while True:
captcha = get_captcha()
solution = list(captcha[2])
captcha_no_overlay = remove_overlay(captcha)
chars = []
for i in range(5):
chars.append(captcha_no_overlay[:, i * 40 : (i + 1) * 40])
while len(chars) != 0:
cv.imshow("character", chars[0])
if cheat_amount <= 0:
key = cv.waitKey(0)
else:
key = ord(solution[0].lower())
if key not in [ord(char) for char in alphabet.lower()] + [8, 13, 27, 225]:
continue
if key == 8: # backspace
trained_key[lastchar].pop()
combine_and_show_alphabet()
elif key == 27: # escape
for char in alphabet:
cv.imwrite("training/%s.png" % char, np.min(trained_key[char], axis=0))
cv.destroyAllWindows()
exit()
elif key == 13: # enter
for char in alphabet:
cv.imwrite("training/%s.png" % char, np.min(trained_key[char], axis=0))
elif key == 225: # left shift
key = ord(solution[0].lower())
cheat_amount = 10
if key not in [8, 13, 27, 225]:
trained_key[chr(key).upper()].append(chars[0])
chars.pop(0)
solution.pop(0)
lastchar = chr(key).upper()
combine_and_show_alphabet()
count += 1
cheat_amount -= 1
print(count)
| 31.029851 | 87 | 0.570948 | import requests
import urllib.parse
import base64
import json
import io
import numpy as np
from PIL import Image
import cv2.cv2 as cv
from solve import *
def combine_and_show_alphabet():
imgTop = np.empty((50, 0))
imgBottom = np.empty((50, 0))
for char in alphabet[:16]:
imgTop = np.append(imgTop, np.min(trained_key[char], axis=0), axis=1)
for char in alphabet[16:]:
imgBottom = np.append(imgBottom, np.min(trained_key[char], axis=0), axis=1)
img = np.rot90(np.append(np.rot90(imgTop), np.rot90(imgBottom), axis=1), 3)
cv.imshow("alphabet", img)
combine_and_show_alphabet()
lastchar = 0
count = 0
cheat_amount = 0
while True:
captcha = get_captcha()
solution = list(captcha[2])
captcha_no_overlay = remove_overlay(captcha)
chars = []
for i in range(5):
chars.append(captcha_no_overlay[:, i * 40 : (i + 1) * 40])
while len(chars) != 0:
cv.imshow("character", chars[0])
if cheat_amount <= 0:
key = cv.waitKey(0)
else:
key = ord(solution[0].lower())
if key not in [ord(char) for char in alphabet.lower()] + [8, 13, 27, 225]:
continue
if key == 8:
trained_key[lastchar].pop()
combine_and_show_alphabet()
elif key == 27:
for char in alphabet:
cv.imwrite("training/%s.png" % char, np.min(trained_key[char], axis=0))
cv.destroyAllWindows()
exit()
elif key == 13:
for char in alphabet:
cv.imwrite("training/%s.png" % char, np.min(trained_key[char], axis=0))
elif key == 225:
key = ord(solution[0].lower())
cheat_amount = 10
if key not in [8, 13, 27, 225]:
trained_key[chr(key).upper()].append(chars[0])
chars.pop(0)
solution.pop(0)
lastchar = chr(key).upper()
combine_and_show_alphabet()
count += 1
cheat_amount -= 1
print(count)
| true | true |
f73ee10290ecacf9ebc8b85843f1c2704c4354fe | 1,036 | py | Python | src/tests/TestResultsWriter.py | mjyeaney/CustomVisionParallelScoring | 2803cdb13f9512763a4204db4e44e1c9d6e7f7c1 | [
"MIT"
] | 3 | 2019-09-06T19:49:05.000Z | 2019-10-28T19:07:21.000Z | src/tests/TestResultsWriter.py | mjyeaney/CustomVisionParallelScoring | 2803cdb13f9512763a4204db4e44e1c9d6e7f7c1 | [
"MIT"
] | 6 | 2019-09-11T22:49:57.000Z | 2022-03-11T23:57:28.000Z | src/tests/TestResultsWriter.py | mjyeaney/CustomVisionParallelScoring | 2803cdb13f9512763a4204db4e44e1c9d6e7f7c1 | [
"MIT"
] | 2 | 2019-09-17T11:23:00.000Z | 2019-10-28T19:07:30.000Z | import unittest
import sys
import os
import glob
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)
from ResultsWriter import ImageWithBoundingBoxes
class TestResultsWriter(unittest.TestCase):
def test_basic_box_drawing(self):
writer = ImageWithBoundingBoxes()
boxes = []
boxes.append((500, 500, 700, 700))
writer.Write("./samples/test-1.jpg", boxes, "./samples/results/test-1-results.jpg")
def test_multiple_box_drawing(self):
writer = ImageWithBoundingBoxes()
boxes = []
boxes.append((500, 500, 700, 700))
boxes.append((800, 500, 1000, 700))
boxes.append((900, 900, 1100, 1100))
boxes.append((1200, 900, 1400, 1100))
writer.Write("./samples/test-1.jpg", boxes, "./samples/results/test-2-results.jpg")
def tearDown(self):
filesToRemove = glob.glob("./samples/results/*.jpg")
for f in filesToRemove:
os.remove(f)
if __name__ == "__main__":
unittest.main() | 30.470588 | 91 | 0.647683 | import unittest
import sys
import os
import glob
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)
from ResultsWriter import ImageWithBoundingBoxes
class TestResultsWriter(unittest.TestCase):
def test_basic_box_drawing(self):
writer = ImageWithBoundingBoxes()
boxes = []
boxes.append((500, 500, 700, 700))
writer.Write("./samples/test-1.jpg", boxes, "./samples/results/test-1-results.jpg")
def test_multiple_box_drawing(self):
writer = ImageWithBoundingBoxes()
boxes = []
boxes.append((500, 500, 700, 700))
boxes.append((800, 500, 1000, 700))
boxes.append((900, 900, 1100, 1100))
boxes.append((1200, 900, 1400, 1100))
writer.Write("./samples/test-1.jpg", boxes, "./samples/results/test-2-results.jpg")
def tearDown(self):
filesToRemove = glob.glob("./samples/results/*.jpg")
for f in filesToRemove:
os.remove(f)
if __name__ == "__main__":
unittest.main() | true | true |
f73ee15fbc73a5653c396a273bf36cbeb81d3f6f | 18,934 | py | Python | lib/matplotlib/spines.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-04-15T09:40:53.000Z | 2019-04-15T09:40:53.000Z | lib/matplotlib/spines.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/spines.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.transforms as mtransforms
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.cbook as cbook
import numpy as np
import warnings
rcParams = matplotlib.rcParams
class Spine(mpatches.Patch):
"""an axis spine -- the line noting the data area boundaries
Spines are the lines connecting the axis tick marks and noting the
boundaries of the data area. They can be placed at arbitrary
positions. See function:`~matplotlib.spines.Spine.set_position`
for more information.
The default position is ``('outward',0)``.
Spines are subclasses of class:`~matplotlib.patches.Patch`, and
inherit much of their behavior.
Spines draw a line or a circle, depending if
function:`~matplotlib.spines.Spine.set_patch_line` or
function:`~matplotlib.spines.Spine.set_patch_circle` has been
called. Line-like is the default.
"""
def __str__(self):
return "Spine"
@docstring.dedent_interpd
def __init__(self, axes, spine_type, path, **kwargs):
"""
- *axes* : the Axes instance containing the spine
- *spine_type* : a string specifying the spine type
- *path* : the path instance used to draw the spine
Valid kwargs are:
%(Patch)s
"""
super(Spine, self).__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.figure)
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(rcParams['axes.edgecolor'])
self.set_linewidth(rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData) # default transform
self._bounds = None # default bounds
self._smart_bounds = False
# Defer initial position determination. (Not much support for
# non-rectangular axes is currently implemented, and this lets
# them pass through the spines machinery without errors.)
self._position = None
if not isinstance(path, matplotlib.path.Path):
msg = "'path' must be an instance of 'matplotlib.path.Path'"
raise ValueError(msg)
self._path = path
# To support drawing both linear and circular spines, this
# class implements Patch behavior two ways. If
# self._patch_type == 'line', behave like a mpatches.PathPatch
# instance. If self._patch_type == 'circle', behave like a
# mpatches.Ellipse instance.
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = mtransforms.IdentityTransform()
def set_smart_bounds(self, value):
"""set the spine and associated axis to have smart bounds"""
self._smart_bounds = value
# also set the axis if possible
if self.spine_type in ('left', 'right'):
self.axes.yaxis.set_smart_bounds(value)
elif self.spine_type in ('top', 'bottom'):
self.axes.xaxis.set_smart_bounds(value)
def get_smart_bounds(self):
"""get whether the spine has smart bounds"""
return self._smart_bounds
def set_patch_circle(self, center, radius):
"""set the spine to be circular"""
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._angle = 0
# circle drawn on axes transform
self.set_transform(self.axes.transAxes)
def set_patch_line(self):
"""set the spine to be linear"""
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
assert self._patch_type == 'circle'
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self._angle) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type == 'circle':
self._recompute_transform()
return self._patch_transform
else:
return super(Spine, self).get_patch_transform()
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
# default position
self._position = ('outward', 0.0) # in points
self.set_position(self._position)
def register_axis(self, axis):
"""register an axis
An axis should be registered with its corresponding spine from
the Axes instance. This allows the spine to clear any axis
properties when needed.
"""
self.axis = axis
if self.axis is not None:
self.axis.cla()
def cla(self):
"""Clear the current spine"""
self._position = None # clear position
if self.axis is not None:
self.axis.cla()
def is_frame_like(self):
"""return True if directly on axes frame
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == 'outward' and amount == 0:
return True
else:
return False
def _adjust_location(self):
"""automatically set spine bounds to the view interval"""
if self.spine_type == 'circle':
return
if self._bounds is None:
if self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if self._smart_bounds:
# attempt to set bounds in sophisticated way
if low > high:
# handle inverted limits
low, high = high, low
viewlim_low = low
viewlim_high = high
del low, high
if self.spine_type in ('left', 'right'):
datalim_low, datalim_high = self.axes.dataLim.intervaly
ticks = self.axes.get_yticks()
elif self.spine_type in ('top', 'bottom'):
datalim_low, datalim_high = self.axes.dataLim.intervalx
ticks = self.axes.get_xticks()
# handle inverted limits
ticks = list(ticks)
ticks.sort()
ticks = np.array(ticks)
if datalim_low > datalim_high:
datalim_low, datalim_high = datalim_high, datalim_low
if datalim_low < viewlim_low:
# Data extends past view. Clip line to view.
low = viewlim_low
else:
# Data ends before view ends.
cond = (ticks <= datalim_low) & (ticks >= viewlim_low)
tickvals = ticks[cond]
if len(tickvals):
# A tick is less than or equal to lowest data point.
low = tickvals[-1]
else:
# No tick is available
low = datalim_low
low = max(low, viewlim_low)
if datalim_high > viewlim_high:
# Data extends past view. Clip line to view.
high = viewlim_high
else:
# Data ends before view ends.
cond = (ticks >= datalim_high) & (ticks <= viewlim_high)
tickvals = ticks[cond]
if len(tickvals):
# A tick is greater than or equal to highest data
# point.
high = tickvals[0]
else:
# No tick is available
high = datalim_high
high = min(high, viewlim_high)
else:
low, high = self._bounds
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
return super(Spine, self).draw(renderer)
def _calc_offset_transform(self):
"""calculate the offset transform performed by the spine"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
assert position_type in ('axes', 'outward', 'data')
if position_type == 'outward':
if amount == 0:
# short circuit commonest case
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif self.spine_type in ['left', 'right', 'top', 'bottom']:
offset_vec = {'left': (-1, 0),
'right': (1, 0),
'bottom': (0, -1),
'top': (0, 1),
}[self.spine_type]
# calculate x and y offset in dots
offset_x = amount * offset_vec[0] / 72.0
offset_y = amount * offset_vec[1] / 72.0
self._spine_transform = ('post',
mtransforms.ScaledTranslation(
offset_x,
offset_y,
self.figure.dpi_scale_trans))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'axes':
if self.spine_type in ('left', 'right'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep y unchanged, fix x at
# amount
0, 0, 0, 1, amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep x unchanged, fix y at
# amount
1, 0, 0, 0, 0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
# The right and top spines have a default position of 1 in
# axes coordinates. When specifying the position in data
# coordinates, we need to calculate the position relative to 0.
amount -= 1
if self.spine_type in ('left', 'right'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
def set_position(self, position):
"""set the position of the spine
Spine position is specified by a 2 tuple of (position type,
amount). The position types are:
* 'outward' : place the spine out from the data area by the
specified number of points. (Negative values specify placing the
spine inward.)
* 'axes' : place the spine at the specified Axes coordinate (from
0.0-1.0).
* 'data' : place the spine at the specified data coordinate.
Additionally, shorthand notations define a special positions:
* 'center' -> ('axes',0.5)
* 'zero' -> ('data', 0.0)
"""
if position in ('center', 'zero'):
# special positions
pass
else:
if len(position) != 2:
raise ValueError("position should be 'center' or 2-tuple")
if position[0] not in ['outward', 'axes', 'data']:
msg = ("position[0] should be in [ 'outward' | 'axes' |"
" 'data' ]")
raise ValueError(msg)
self._position = position
self._calc_offset_transform()
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
def get_position(self):
"""get the spine position"""
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
"""get the spine transform"""
self._ensure_position_is_set()
what, how = self._spine_transform
if what == 'data':
# special case data based spine locations
data_xform = self.axes.transScale + \
(how + self.axes.transLimits + self.axes.transAxes)
if self.spine_type in ['left', 'right']:
result = mtransforms.blended_transform_factory(
data_xform, self.axes.transData)
elif self.spine_type in ['top', 'bottom']:
result = mtransforms.blended_transform_factory(
self.axes.transData, data_xform)
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
return result
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if what == 'identity':
return base_transform
elif what == 'post':
return base_transform + how
elif what == 'pre':
return how + base_transform
else:
raise ValueError("unknown spine_transform type: %s" % what)
def set_bounds(self, low, high):
"""Set the bounds of the spine."""
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
self._bounds = (low, high)
def get_bounds(self):
"""Get the bounds of the spine."""
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
"""
(staticmethod) Returns a linear :class:`Spine`.
"""
# all values of 13 get replaced upon call to set_bounds()
if spine_type == 'left':
path = mpath.Path([(0.0, 13), (0.0, 13)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 13), (1.0, 13)])
elif spine_type == 'bottom':
path = mpath.Path([(13, 0.0), (13, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(13, 1.0), (13, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
"""
(staticmethod) Returns a circular :class:`Spine`.
"""
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
"""
Set the edgecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
# The facecolor of a spine is always 'none' by default -- let
# the user change it manually if desired.
self.set_edgecolor(c)
| 38.719836 | 79 | 0.538344 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.transforms as mtransforms
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.cbook as cbook
import numpy as np
import warnings
rcParams = matplotlib.rcParams
class Spine(mpatches.Patch):
def __str__(self):
return "Spine"
@docstring.dedent_interpd
def __init__(self, axes, spine_type, path, **kwargs):
super(Spine, self).__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.figure)
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(rcParams['axes.edgecolor'])
self.set_linewidth(rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData)
self._bounds = None
self._smart_bounds = False
self._position = None
if not isinstance(path, matplotlib.path.Path):
msg = "'path' must be an instance of 'matplotlib.path.Path'"
raise ValueError(msg)
self._path = path
self._patch_type = 'line'
self._patch_transform = mtransforms.IdentityTransform()
def set_smart_bounds(self, value):
self._smart_bounds = value
if self.spine_type in ('left', 'right'):
self.axes.yaxis.set_smart_bounds(value)
elif self.spine_type in ('top', 'bottom'):
self.axes.xaxis.set_smart_bounds(value)
def get_smart_bounds(self):
return self._smart_bounds
def set_patch_circle(self, center, radius):
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._angle = 0
self.set_transform(self.axes.transAxes)
def set_patch_line(self):
self._patch_type = 'line'
def _recompute_transform(self):
assert self._patch_type == 'circle'
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self._angle) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type == 'circle':
self._recompute_transform()
return self._patch_transform
else:
return super(Spine, self).get_patch_transform()
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
self._position = ('outward', 0.0)
self.set_position(self._position)
def register_axis(self, axis):
self.axis = axis
if self.axis is not None:
self.axis.cla()
def cla(self):
self._position = None
if self.axis is not None:
self.axis.cla()
def is_frame_like(self):
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == 'outward' and amount == 0:
return True
else:
return False
def _adjust_location(self):
if self.spine_type == 'circle':
return
if self._bounds is None:
if self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if self._smart_bounds:
if low > high:
low, high = high, low
viewlim_low = low
viewlim_high = high
del low, high
if self.spine_type in ('left', 'right'):
datalim_low, datalim_high = self.axes.dataLim.intervaly
ticks = self.axes.get_yticks()
elif self.spine_type in ('top', 'bottom'):
datalim_low, datalim_high = self.axes.dataLim.intervalx
ticks = self.axes.get_xticks()
ticks = list(ticks)
ticks.sort()
ticks = np.array(ticks)
if datalim_low > datalim_high:
datalim_low, datalim_high = datalim_high, datalim_low
if datalim_low < viewlim_low:
low = viewlim_low
else:
cond = (ticks <= datalim_low) & (ticks >= viewlim_low)
tickvals = ticks[cond]
if len(tickvals):
low = tickvals[-1]
else:
low = datalim_low
low = max(low, viewlim_low)
if datalim_high > viewlim_high:
high = viewlim_high
else:
cond = (ticks >= datalim_high) & (ticks <= viewlim_high)
tickvals = ticks[cond]
if len(tickvals):
high = tickvals[0]
else:
high = datalim_high
high = min(high, viewlim_high)
else:
low, high = self._bounds
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
return super(Spine, self).draw(renderer)
def _calc_offset_transform(self):
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
assert position_type in ('axes', 'outward', 'data')
if position_type == 'outward':
if amount == 0:
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif self.spine_type in ['left', 'right', 'top', 'bottom']:
offset_vec = {'left': (-1, 0),
'right': (1, 0),
'bottom': (0, -1),
'top': (0, 1),
}[self.spine_type]
offset_x = amount * offset_vec[0] / 72.0
offset_y = amount * offset_vec[1] / 72.0
self._spine_transform = ('post',
mtransforms.ScaledTranslation(
offset_x,
offset_y,
self.figure.dpi_scale_trans))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'axes':
if self.spine_type in ('left', 'right'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
0, 0, 0, 1, amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
1, 0, 0, 0, 0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
amount -= 1
if self.spine_type in ('left', 'right'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
def set_position(self, position):
if position in ('center', 'zero'):
pass
else:
if len(position) != 2:
raise ValueError("position should be 'center' or 2-tuple")
if position[0] not in ['outward', 'axes', 'data']:
msg = ("position[0] should be in [ 'outward' | 'axes' |"
" 'data' ]")
raise ValueError(msg)
self._position = position
self._calc_offset_transform()
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
def get_position(self):
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
self._ensure_position_is_set()
what, how = self._spine_transform
if what == 'data':
data_xform = self.axes.transScale + \
(how + self.axes.transLimits + self.axes.transAxes)
if self.spine_type in ['left', 'right']:
result = mtransforms.blended_transform_factory(
data_xform, self.axes.transData)
elif self.spine_type in ['top', 'bottom']:
result = mtransforms.blended_transform_factory(
self.axes.transData, data_xform)
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
return result
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if what == 'identity':
return base_transform
elif what == 'post':
return base_transform + how
elif what == 'pre':
return how + base_transform
else:
raise ValueError("unknown spine_transform type: %s" % what)
def set_bounds(self, low, high):
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
self._bounds = (low, high)
def get_bounds(self):
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
if spine_type == 'left':
path = mpath.Path([(0.0, 13), (0.0, 13)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 13), (1.0, 13)])
elif spine_type == 'bottom':
path = mpath.Path([(13, 0.0), (13, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(13, 1.0), (13, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
self.set_edgecolor(c)
| true | true |
f73ee32bc10b29282276512bc591b64393af49f2 | 657 | py | Python | model/new_user.py | MrSedek/-dev | 12eed868cd706a7e598e92a7f0e06c8fdd9aca29 | [
"Apache-2.0"
] | null | null | null | model/new_user.py | MrSedek/-dev | 12eed868cd706a7e598e92a7f0e06c8fdd9aca29 | [
"Apache-2.0"
] | null | null | null | model/new_user.py | MrSedek/-dev | 12eed868cd706a7e598e92a7f0e06c8fdd9aca29 | [
"Apache-2.0"
] | null | null | null | """
class NewUser:
def __init__(self, fName, mName, lName, nickName, photo, title, company, address, hTel, mTel, wTel, eMail, homepage, sAddress, sHome, sNotes):
self.fName = fName
self.mName = mName
self.lName = lName
self.nickName = nickName
self.photo = photo
self.title = title
self.company = company
self.address = address
self.hTel = hTel
self.mTel = mTel
self.wTel = wTel
self.eMail = eMail
self.homepage = homepage
self.birthday = birthday
self.sAddress = sAddress
self.sHome = sHome
self.sNotes = sNotes
""" | 29.863636 | 146 | 0.584475 | true | true | |
f73ee4e9ce460650c7d139ea552aab8018d7f942 | 244 | py | Python | slaterify/utils.py | merindorium/slaterify | 3af3f4d811ae43442b13a532e932c34e301b90c6 | [
"MIT"
] | 1 | 2019-04-10T19:53:56.000Z | 2019-04-10T19:53:56.000Z | slaterify/utils.py | merindorium/slaterify | 3af3f4d811ae43442b13a532e932c34e301b90c6 | [
"MIT"
] | null | null | null | slaterify/utils.py | merindorium/slaterify | 3af3f4d811ae43442b13a532e932c34e301b90c6 | [
"MIT"
] | null | null | null | _components = {}
def add_component(path, data):
_components[path] = data
def get_component(path):
try:
return _components[path]
except KeyError:
raise NameError('There is no component with path {}'.format(path))
| 18.769231 | 74 | 0.663934 | _components = {}
def add_component(path, data):
_components[path] = data
def get_component(path):
try:
return _components[path]
except KeyError:
raise NameError('There is no component with path {}'.format(path))
| true | true |
f73ee516b43ca3f50af6df73c9a1ebb43b5efd25 | 4,763 | py | Python | src/beanmachine/ppl/inference/tests/inference_test.py | ToddSmall/beanmachine | 85768bd1785bf6a8b3760a04f37a8fca69b4e4ca | [
"MIT"
] | null | null | null | src/beanmachine/ppl/inference/tests/inference_test.py | ToddSmall/beanmachine | 85768bd1785bf6a8b3760a04f37a8fca69b4e4ca | [
"MIT"
] | null | null | null | src/beanmachine/ppl/inference/tests/inference_test.py | ToddSmall/beanmachine | 85768bd1785bf6a8b3760a04f37a8fca69b4e4ca | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_proposer import (
BaseProposer,
)
from beanmachine.ppl.world import World, init_from_prior
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@bm.functional
def baz(self):
return self.bar() * 2.0
class SampleDoubleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0).double())
@pytest.mark.parametrize("multiprocess", [False, True])
def test_inference(multiprocess):
if multiprocess and sys.platform.startswith("win"):
pytest.skip(
"Windows does not support fork-based multiprocessing (which is necessary "
"for running parallel inference within pytest."
)
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.foo(), model.baz()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
samples = mh.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
run_in_parallel=multiprocess,
mp_context="fork",
)
assert model.foo() in samples
assert isinstance(samples[model.foo()], torch.Tensor)
assert samples[model.foo()].shape == (num_chains, num_samples)
assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2
# make sure that the RNG state for each chain is different
assert not torch.equal(
samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]
)
def test_get_proposers():
world = World()
model = SampleModel()
world.call(model.bar())
nuts = bm.GlobalNoUTurnSampler()
proposers = nuts.get_proposers(world, world.latent_nodes, 10)
assert all(isinstance(proposer, BaseProposer) for proposer in proposers)
def test_initialize_world():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
world = nuts._initialize_world([model.bar()], {})
assert model.foo() in world
assert model.bar() in world
def test_initialize_from_prior():
mh = bm.SingleSiteAncestralMetropolisHastings()
model = SampleModel()
queries = [model.foo()]
samples_from_prior = []
for _ in range(10000):
world = mh._initialize_world(queries, {}, init_from_prior)
val = world.get(model.foo())
samples_from_prior.append(val.item())
assert samples_from_prior[0] != samples_from_prior[1]
assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)
def test_initialization_resampling():
mh = bm.SingleSiteAncestralMetropolisHastings()
@bm.random_variable
def foo():
return dist.Uniform(3.0, 5.0)
# verify that the method re-sample as expected
retries = 0
def init_after_three_tries(d: dist.Distribution):
nonlocal retries
retries += 1
return torch.tensor(float("nan")) if retries < 3 else d.sample()
sampler = mh.sampler(
[foo()], {}, num_samples=10, initialize_fn=init_after_three_tries
)
for world in sampler:
assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())
# an extreme case where the init value is always out of the support
def init_to_zero(d: dist.Distribution):
return torch.zeros_like(d.sample())
with pytest.raises(ValueError, match="Cannot find a valid initialization"):
mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteNewtonianMonteCarlo(),
bm.SingleSiteUniformMetropolisHastings(),
],
)
def test_inference_with_double_dtype(algorithm):
model = SampleDoubleModel()
queries = [model.foo()]
bar_val = torch.tensor(0.5).double()
# make sure that the inference can run successfully
samples = algorithm.infer(
queries,
{model.bar(): bar_val},
num_samples=20,
num_chains=1,
)
assert samples[model.foo()].dtype == bar_val.dtype
| 29.76875 | 86 | 0.678564 |
import math
import sys
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_proposer import (
BaseProposer,
)
from beanmachine.ppl.world import World, init_from_prior
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@bm.functional
def baz(self):
return self.bar() * 2.0
class SampleDoubleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0).double())
@pytest.mark.parametrize("multiprocess", [False, True])
def test_inference(multiprocess):
if multiprocess and sys.platform.startswith("win"):
pytest.skip(
"Windows does not support fork-based multiprocessing (which is necessary "
"for running parallel inference within pytest."
)
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.foo(), model.baz()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
samples = mh.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
run_in_parallel=multiprocess,
mp_context="fork",
)
assert model.foo() in samples
assert isinstance(samples[model.foo()], torch.Tensor)
assert samples[model.foo()].shape == (num_chains, num_samples)
assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2
assert not torch.equal(
samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]
)
def test_get_proposers():
world = World()
model = SampleModel()
world.call(model.bar())
nuts = bm.GlobalNoUTurnSampler()
proposers = nuts.get_proposers(world, world.latent_nodes, 10)
assert all(isinstance(proposer, BaseProposer) for proposer in proposers)
def test_initialize_world():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
world = nuts._initialize_world([model.bar()], {})
assert model.foo() in world
assert model.bar() in world
def test_initialize_from_prior():
mh = bm.SingleSiteAncestralMetropolisHastings()
model = SampleModel()
queries = [model.foo()]
samples_from_prior = []
for _ in range(10000):
world = mh._initialize_world(queries, {}, init_from_prior)
val = world.get(model.foo())
samples_from_prior.append(val.item())
assert samples_from_prior[0] != samples_from_prior[1]
assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)
def test_initialization_resampling():
mh = bm.SingleSiteAncestralMetropolisHastings()
@bm.random_variable
def foo():
return dist.Uniform(3.0, 5.0)
retries = 0
def init_after_three_tries(d: dist.Distribution):
nonlocal retries
retries += 1
return torch.tensor(float("nan")) if retries < 3 else d.sample()
sampler = mh.sampler(
[foo()], {}, num_samples=10, initialize_fn=init_after_three_tries
)
for world in sampler:
assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())
def init_to_zero(d: dist.Distribution):
return torch.zeros_like(d.sample())
with pytest.raises(ValueError, match="Cannot find a valid initialization"):
mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteNewtonianMonteCarlo(),
bm.SingleSiteUniformMetropolisHastings(),
],
)
def test_inference_with_double_dtype(algorithm):
model = SampleDoubleModel()
queries = [model.foo()]
bar_val = torch.tensor(0.5).double()
samples = algorithm.infer(
queries,
{model.bar(): bar_val},
num_samples=20,
num_chains=1,
)
assert samples[model.foo()].dtype == bar_val.dtype
| true | true |
f73ee62acd60598726c4a8b903210294712cd4c2 | 3,813 | py | Python | config.py | trantinan2512/Francis | f5f7cd3c5af6efd36d6c25c0c516dbf286195f11 | [
"MIT"
] | null | null | null | config.py | trantinan2512/Francis | f5f7cd3c5af6efd36d6c25c0c516dbf286195f11 | [
"MIT"
] | 2 | 2020-02-11T23:06:52.000Z | 2020-06-05T18:46:58.000Z | config.py | trantinan2512/francis-discord-bot | f5f7cd3c5af6efd36d6c25c0c516dbf286195f11 | [
"MIT"
] | 1 | 2019-06-12T21:33:20.000Z | 2019-06-12T21:33:20.000Z | import os
from decouple import config, Csv
DEBUG = config('DEBUG', default=False, cast=bool)
FRANCIS_TOKEN = config('FRANCIS_TOKEN')
OZ_TOKEN = config('OZ_TOKEN')
MY_ID = config('MY_ID', cast=int)
SERVER_ID = config('SERVER_ID', cast=int)
BOT_PREFIX = config('BOT_PREFIX', default='!')
# Twitter stuff
TWITTER_CONSUMER_KEY = config('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = config('TWITTER_CONSUMER_SECRET')
TWITTER_OWNER = config('TWITTER_OWNER')
TWITTER_OWNER_ID = config('TWITTER_OWNER_ID')
TWITTER_ACCESS_TOKEN = config('TWITTER_ACCESS_TOKEN')
TWITTER_ACCESS_TOKEN_SECRET = config('TWITTER_ACCESS_TOKEN_SECRET')
FACEBOOK_ACCESS_TOKEN = config('FACEBOOK_TEST_ACCESS_TOKEN')
# msvn_discordbot/config.py -> msvn_discordbot
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# GOOGLE API AUTH
GAPI_AUTH_DICT = {
"type": config('GAPI_TYPE'),
"project_id": config('GAPI_PROJECT_ID'),
"private_key_id": config('GAPI_PRIVATE_KEY_ID'),
"private_key": config('GAPI_PRIVATE_KEY').replace('\\n', '\n'),
"client_email": config('GAPI_CLIENT_EMAIL'),
"client_id": config('GAPI_CLIENT_ID'),
"auth_uri": config('GAPI_AUTH_URI'),
"token_uri": config('GAPI_TOKEN_URI'),
"auth_provider_x509_cert_url": config('GAPI_AUTH_PROVIDER_X509_CERT_URL'),
"client_x509_cert_url": config('GAPI_CLIENT_X509_CERT_URL'),
}
AUTOASIGN_COLOR_ROLES = [
'Anchor',
'Apricot',
'Black',
'Canary',
'Cardinal',
'Carrot',
'Chateau',
'Fire',
'Forest',
'Green',
'Neon Green',
'Lilac',
'Maroon',
'Mint',
'Ocean',
'Pink',
'Prussian',
'Ruby',
'Slate',
'Steel',
'Tawny',
'Teal',
'Violet',
]
AUTOASIGN_CHANNEL_ROLES = [
'GMS',
'GMSM',
'GMS2',
'THMS',
'KMS',
'KMS2',
'KMSM',
]
AUTOASIGN_NOTIFY_ROLES = [
'Notify GMS',
'Notify GMSM'
]
AUTOASIGN_DAWN_NOTIFY_ROLES = [
'Event Notify',
]
AUTOASIGN_DAWN_EVENT_ROLES = [
'Investigator',
]
AUTOASIGN_JOB_ROLES = [
'Hero', 'Dark Knight', 'Paladin',
'Bowmaster', 'Marksman',
'Arch Mage IL', 'Arch Mage FP', 'Bishop',
'Night Lord', 'Shadower', 'Dual Blade',
'Buccaneer', 'Corsair', 'Cannoneer', 'Jett',
'Dawn Warrior', 'Wind Archer', 'Blaze Wizard', 'Night Walker', 'Thunder Breaker', 'Mihile',
'Mercedes', 'Aran', 'Phantom', 'Luminous', 'Evan', 'Shade',
'Battle Mage', 'Wild Hunter', 'Mechanic', 'Blaster', 'Xenon',
'Demon Slayer', 'Demon Avenger',
'Kaiser', 'Angelic Buster', 'Cadena',
'Hayato', 'Kanna',
'Illium', 'Ark',
'Beast Tamer', 'Kinesis', 'Zero', 'Pink Bean',
'Beginner', 'Citizen',
]
PONPON_ROLE_REACT_ROLE_IDS = config('PONPON_ROLE_REACT_ROLE_IDS', cast=Csv(int))
PONPON_ROLE_REACT_EMOJI_IDS = config('PONPON_ROLE_REACT_EMOJI_IDS', cast=Csv(int))
AUTOASIGN_ROLES = AUTOASIGN_COLOR_ROLES + AUTOASIGN_CHANNEL_ROLES + AUTOASIGN_JOB_ROLES
AUTOASIGN_ROLES += AUTOASIGN_NOTIFY_ROLES + AUTOASIGN_DAWN_NOTIFY_ROLES + AUTOASIGN_DAWN_EVENT_ROLES
AUTOASIGN_ROLES += PONPON_ROLE_REACT_ROLE_IDS
DAWN_SERVER_ID = 364323564737789953
MSVN_SERVER_ID = 453555802670759947
PON_SERVER_ID = 753267469413777508
REACT_FOR_ROLE_MESSAGE_ID = 472966572340674560
REACT_FOR_NOTIFICATION_ROLE_MESSAGE_ID = 472967231781863427
DAWN_REACT_FOR_ROLE_MESSAGE_ID = config('DAWN_REACT_FOR_ROLE_MESSAGE_ID', cast=int, default=557788422379667457)
DAWN_REACT_FOR_ROLE_EMOJI_IDS = config('DAWN_REACT_FOR_ROLE_EMOJI_IDS', cast=Csv())
DAWN_REACT_FOR_ROLE_ROLE_IDS = config('DAWN_REACT_FOR_ROLE_ROLE_IDS', cast=Csv())
DAWN_COLOR_CHANGE_ROLE_IDS = config('DAWN_COLOR_CHANGE_ROLE_IDS', cast=Csv(int))
EMBED_DEFAULT_COLOR = 0xf900e5
DISCORD_ROLE_FOR_INVESTIGATION = config('DISCORD_ROLE_FOR_INVESTIGATION', default='Investigator')
SPIDER_DELAY = config('SPIDER_DELAY', default=30, cast=int)
| 29.10687 | 111 | 0.717545 | import os
from decouple import config, Csv
DEBUG = config('DEBUG', default=False, cast=bool)
FRANCIS_TOKEN = config('FRANCIS_TOKEN')
OZ_TOKEN = config('OZ_TOKEN')
MY_ID = config('MY_ID', cast=int)
SERVER_ID = config('SERVER_ID', cast=int)
BOT_PREFIX = config('BOT_PREFIX', default='!')
TWITTER_CONSUMER_KEY = config('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = config('TWITTER_CONSUMER_SECRET')
TWITTER_OWNER = config('TWITTER_OWNER')
TWITTER_OWNER_ID = config('TWITTER_OWNER_ID')
TWITTER_ACCESS_TOKEN = config('TWITTER_ACCESS_TOKEN')
TWITTER_ACCESS_TOKEN_SECRET = config('TWITTER_ACCESS_TOKEN_SECRET')
FACEBOOK_ACCESS_TOKEN = config('FACEBOOK_TEST_ACCESS_TOKEN')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
GAPI_AUTH_DICT = {
"type": config('GAPI_TYPE'),
"project_id": config('GAPI_PROJECT_ID'),
"private_key_id": config('GAPI_PRIVATE_KEY_ID'),
"private_key": config('GAPI_PRIVATE_KEY').replace('\\n', '\n'),
"client_email": config('GAPI_CLIENT_EMAIL'),
"client_id": config('GAPI_CLIENT_ID'),
"auth_uri": config('GAPI_AUTH_URI'),
"token_uri": config('GAPI_TOKEN_URI'),
"auth_provider_x509_cert_url": config('GAPI_AUTH_PROVIDER_X509_CERT_URL'),
"client_x509_cert_url": config('GAPI_CLIENT_X509_CERT_URL'),
}
AUTOASIGN_COLOR_ROLES = [
'Anchor',
'Apricot',
'Black',
'Canary',
'Cardinal',
'Carrot',
'Chateau',
'Fire',
'Forest',
'Green',
'Neon Green',
'Lilac',
'Maroon',
'Mint',
'Ocean',
'Pink',
'Prussian',
'Ruby',
'Slate',
'Steel',
'Tawny',
'Teal',
'Violet',
]
AUTOASIGN_CHANNEL_ROLES = [
'GMS',
'GMSM',
'GMS2',
'THMS',
'KMS',
'KMS2',
'KMSM',
]
AUTOASIGN_NOTIFY_ROLES = [
'Notify GMS',
'Notify GMSM'
]
AUTOASIGN_DAWN_NOTIFY_ROLES = [
'Event Notify',
]
AUTOASIGN_DAWN_EVENT_ROLES = [
'Investigator',
]
AUTOASIGN_JOB_ROLES = [
'Hero', 'Dark Knight', 'Paladin',
'Bowmaster', 'Marksman',
'Arch Mage IL', 'Arch Mage FP', 'Bishop',
'Night Lord', 'Shadower', 'Dual Blade',
'Buccaneer', 'Corsair', 'Cannoneer', 'Jett',
'Dawn Warrior', 'Wind Archer', 'Blaze Wizard', 'Night Walker', 'Thunder Breaker', 'Mihile',
'Mercedes', 'Aran', 'Phantom', 'Luminous', 'Evan', 'Shade',
'Battle Mage', 'Wild Hunter', 'Mechanic', 'Blaster', 'Xenon',
'Demon Slayer', 'Demon Avenger',
'Kaiser', 'Angelic Buster', 'Cadena',
'Hayato', 'Kanna',
'Illium', 'Ark',
'Beast Tamer', 'Kinesis', 'Zero', 'Pink Bean',
'Beginner', 'Citizen',
]
PONPON_ROLE_REACT_ROLE_IDS = config('PONPON_ROLE_REACT_ROLE_IDS', cast=Csv(int))
PONPON_ROLE_REACT_EMOJI_IDS = config('PONPON_ROLE_REACT_EMOJI_IDS', cast=Csv(int))
AUTOASIGN_ROLES = AUTOASIGN_COLOR_ROLES + AUTOASIGN_CHANNEL_ROLES + AUTOASIGN_JOB_ROLES
AUTOASIGN_ROLES += AUTOASIGN_NOTIFY_ROLES + AUTOASIGN_DAWN_NOTIFY_ROLES + AUTOASIGN_DAWN_EVENT_ROLES
AUTOASIGN_ROLES += PONPON_ROLE_REACT_ROLE_IDS
DAWN_SERVER_ID = 364323564737789953
MSVN_SERVER_ID = 453555802670759947
PON_SERVER_ID = 753267469413777508
REACT_FOR_ROLE_MESSAGE_ID = 472966572340674560
REACT_FOR_NOTIFICATION_ROLE_MESSAGE_ID = 472967231781863427
DAWN_REACT_FOR_ROLE_MESSAGE_ID = config('DAWN_REACT_FOR_ROLE_MESSAGE_ID', cast=int, default=557788422379667457)
DAWN_REACT_FOR_ROLE_EMOJI_IDS = config('DAWN_REACT_FOR_ROLE_EMOJI_IDS', cast=Csv())
DAWN_REACT_FOR_ROLE_ROLE_IDS = config('DAWN_REACT_FOR_ROLE_ROLE_IDS', cast=Csv())
DAWN_COLOR_CHANGE_ROLE_IDS = config('DAWN_COLOR_CHANGE_ROLE_IDS', cast=Csv(int))
EMBED_DEFAULT_COLOR = 0xf900e5
DISCORD_ROLE_FOR_INVESTIGATION = config('DISCORD_ROLE_FOR_INVESTIGATION', default='Investigator')
SPIDER_DELAY = config('SPIDER_DELAY', default=30, cast=int)
| true | true |
f73ee6c7fa31b135d219baa1fac0e39fa5293694 | 3,642 | py | Python | fuxi/core/databases/orm/discovery/port_orm.py | jeffzh3ng/fuxi-scanner | fadb1136b8896fe2a0f7783627bda867d5e6fd98 | [
"MIT"
] | 731 | 2018-06-13T05:41:04.000Z | 2019-09-06T01:36:57.000Z | fuxi/core/databases/orm/discovery/port_orm.py | riusksk/fuxi | fadb1136b8896fe2a0f7783627bda867d5e6fd98 | [
"MIT"
] | 16 | 2019-10-14T08:17:13.000Z | 2021-12-13T20:13:23.000Z | fuxi/core/databases/orm/discovery/port_orm.py | riusksk/fuxi | fadb1136b8896fe2a0f7783627bda867d5e6fd98 | [
"MIT"
] | 238 | 2018-06-14T08:59:44.000Z | 2019-09-04T06:35:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 2019/9/6
# @File : port_orm.py
# @Desc : ""
import time
from bson import ObjectId
from flask import session
from fuxi.core.databases.db_error import DatabaseError
from fuxi.core.databases.orm.database_base import DatabaseBase
from fuxi.core.databases.db_mongo import mongo, T_PORT_TASKS, T_PORT_RESULT
from fuxi.common.utils.logger import logger
class _DBPortScanTasks(DatabaseBase):
"""
:parameter
name:
target:
port:
option:
op:
status:
date:
end_date:
"""
def __init__(self):
DatabaseBase.__init__(self)
self.table = T_PORT_TASKS
def add(self, name, target, port, option):
op = session.get('user')
if name and target and op:
inserted_id = mongo[self.table].insert_one({
"name": name.strip(), "target": target, "port": port, "option": option,
"op": op, "status": "waiting", "date": int(time.time()), "end_date": 0
}).inserted_id
return str(inserted_id)
else:
logger.warning("port scan task insert failed: invalid data")
raise DatabaseError("invalid data")
def search(self, keyword):
keyword = keyword.lower()
return mongo[self.table].find({
"$or": [
{"name": {'$regex': keyword}}, {"target": {'$regex': keyword}},
{"date": {'$regex': keyword}}, {"option": {'$regex': keyword}},
{"status": {'$regex': keyword}}, {"op": {'$regex': keyword}},
]
})
def update_celery_id(self, task_id, celery_id):
return mongo[self.table].update_one(
{"_id": ObjectId(task_id)}, {"$set": {"celery_id": str(celery_id)}}
)
class _DBPortScanResult(DatabaseBase):
"""
:parameter
"""
def __init__(self):
DatabaseBase.__init__(self)
self.table = T_PORT_RESULT
def get_list_by_tid(self, tid):
return mongo[self.table].find({"task_id": str(tid)})
def add(self, task_id, host, hostname, port, detail):
inserted_id = mongo[self.table].insert_one({
"task_id": task_id, "host": host, "hostname": hostname, "port": port,
"detail": detail, "date": int(time.time())
}).inserted_id
return str(inserted_id)
def add_multiple(self, result):
data = []
for item in result:
if item.get("task_id") and item.get("host"):
task_id = item.get("task_id")
host = item.get("host")
port = item.get("port")
port_detail = item.get("detail")
else:
continue
data.append({
"task_id": task_id, "host": host, "hostname": item.get("hostname"),
"port": port, "detail": port_detail, "date": int(time.time())
})
if data:
x = mongo[self.table].insert_many(data)
return [str(i) for i in x.inserted_ids]
else:
return []
def delete_by_tid(self, tid):
return mongo[self.table].delete_many({"task_id": str(tid)})
def search(self, tid, keyword):
keyword = keyword.lower()
return mongo[self.table].find({
"task_id": tid,
"$or": [
{"host": {'$regex': keyword}}, {"hostname": {'$regex': keyword}},
{"port": {'$regex': keyword}}, {"detail.detail.name": {'$regex': keyword}}
]
})
DBPortScanTasks = _DBPortScanTasks()
DBPortScanResult = _DBPortScanResult()
| 30.605042 | 90 | 0.551345 |
import time
from bson import ObjectId
from flask import session
from fuxi.core.databases.db_error import DatabaseError
from fuxi.core.databases.orm.database_base import DatabaseBase
from fuxi.core.databases.db_mongo import mongo, T_PORT_TASKS, T_PORT_RESULT
from fuxi.common.utils.logger import logger
class _DBPortScanTasks(DatabaseBase):
def __init__(self):
DatabaseBase.__init__(self)
self.table = T_PORT_TASKS
def add(self, name, target, port, option):
op = session.get('user')
if name and target and op:
inserted_id = mongo[self.table].insert_one({
"name": name.strip(), "target": target, "port": port, "option": option,
"op": op, "status": "waiting", "date": int(time.time()), "end_date": 0
}).inserted_id
return str(inserted_id)
else:
logger.warning("port scan task insert failed: invalid data")
raise DatabaseError("invalid data")
def search(self, keyword):
keyword = keyword.lower()
return mongo[self.table].find({
"$or": [
{"name": {'$regex': keyword}}, {"target": {'$regex': keyword}},
{"date": {'$regex': keyword}}, {"option": {'$regex': keyword}},
{"status": {'$regex': keyword}}, {"op": {'$regex': keyword}},
]
})
def update_celery_id(self, task_id, celery_id):
return mongo[self.table].update_one(
{"_id": ObjectId(task_id)}, {"$set": {"celery_id": str(celery_id)}}
)
class _DBPortScanResult(DatabaseBase):
def __init__(self):
DatabaseBase.__init__(self)
self.table = T_PORT_RESULT
def get_list_by_tid(self, tid):
return mongo[self.table].find({"task_id": str(tid)})
def add(self, task_id, host, hostname, port, detail):
inserted_id = mongo[self.table].insert_one({
"task_id": task_id, "host": host, "hostname": hostname, "port": port,
"detail": detail, "date": int(time.time())
}).inserted_id
return str(inserted_id)
def add_multiple(self, result):
data = []
for item in result:
if item.get("task_id") and item.get("host"):
task_id = item.get("task_id")
host = item.get("host")
port = item.get("port")
port_detail = item.get("detail")
else:
continue
data.append({
"task_id": task_id, "host": host, "hostname": item.get("hostname"),
"port": port, "detail": port_detail, "date": int(time.time())
})
if data:
x = mongo[self.table].insert_many(data)
return [str(i) for i in x.inserted_ids]
else:
return []
def delete_by_tid(self, tid):
return mongo[self.table].delete_many({"task_id": str(tid)})
def search(self, tid, keyword):
keyword = keyword.lower()
return mongo[self.table].find({
"task_id": tid,
"$or": [
{"host": {'$regex': keyword}}, {"hostname": {'$regex': keyword}},
{"port": {'$regex': keyword}}, {"detail.detail.name": {'$regex': keyword}}
]
})
DBPortScanTasks = _DBPortScanTasks()
DBPortScanResult = _DBPortScanResult()
| true | true |
f73ee6d250c43b153743c980a135d678be14cdab | 2,095 | py | Python | src/editor/wxUI/baseTreeControl.py | rehmanx/PandaEditor | 125c79605fd46a045201e5ff6a88709764ac104f | [
"MIT"
] | null | null | null | src/editor/wxUI/baseTreeControl.py | rehmanx/PandaEditor | 125c79605fd46a045201e5ff6a88709764ac104f | [
"MIT"
] | null | null | null | src/editor/wxUI/baseTreeControl.py | rehmanx/PandaEditor | 125c79605fd46a045201e5ff6a88709764ac104f | [
"MIT"
] | null | null | null | import wx
import wx.lib.agw.customtreectrl as customtree
class BaseTreeControl(customtree.CustomTreeCtrl):
def __init__(self, parent, *args, **kwargs):
"""Base tree controls implements a custom drag drop operation"""
customtree.CustomTreeCtrl.__init__(self, parent, *args, **kwargs)
# for drag drop operations
self.mouse_left_down = False
self.is_dragging = False
self.current_selected = []
self.Bind(wx.EVT_LEFT_DOWN, self.on_evt_left_down)
self.Bind(wx.EVT_LEFT_UP, self.on_evt_left_up)
self.Bind(wx.EVT_MOTION, self.on_evt_mouse_move)
def on_evt_left_down(self, evt):
self.mouse_left_down = True
evt.Skip()
def on_evt_mouse_move(self, evt):
if self.mouse_left_down:
self.is_dragging = True
for item in self.current_selected:
self.SetItemTextColour(item, wx.Colour(0, 0, 0, 255))
# highlight item under the mouse, when dragging
self.current_selected.clear()
x, y = evt.GetPosition()
(_id, flag) = self.HitTest((x, y))
if _id:
self.SetItemTextColour(_id, wx.Colour(255, 255, 190, 255))
self.current_selected.append(_id)
if not self.is_dragging and len(self.current_selected) > 0:
for item in self.current_selected:
self.SetItemTextColour(item, wx.Colour(0, 0, 0, 255))
evt.Skip()
def on_evt_left_up(self, evt):
# do drag drop here
x, y = evt.GetPosition()
(_id, flag) = self.HitTest((x, y))
if self.is_dragging and _id:
# print("{0} dropped onto {1}".format(self.GetSelections(), self.GetItemText(_id)) )
for item in self.GetSelections():
if _id == item:
continue
self.do_drag_drop(item, _id)
self.mouse_left_down = False
self.is_dragging = False
evt.Skip()
def do_drag_drop(self, src_file: str, target_dir: str):
print("do drag drop is empty")
| 32.230769 | 96 | 0.601909 | import wx
import wx.lib.agw.customtreectrl as customtree
class BaseTreeControl(customtree.CustomTreeCtrl):
def __init__(self, parent, *args, **kwargs):
customtree.CustomTreeCtrl.__init__(self, parent, *args, **kwargs)
self.mouse_left_down = False
self.is_dragging = False
self.current_selected = []
self.Bind(wx.EVT_LEFT_DOWN, self.on_evt_left_down)
self.Bind(wx.EVT_LEFT_UP, self.on_evt_left_up)
self.Bind(wx.EVT_MOTION, self.on_evt_mouse_move)
def on_evt_left_down(self, evt):
self.mouse_left_down = True
evt.Skip()
def on_evt_mouse_move(self, evt):
if self.mouse_left_down:
self.is_dragging = True
for item in self.current_selected:
self.SetItemTextColour(item, wx.Colour(0, 0, 0, 255))
self.current_selected.clear()
x, y = evt.GetPosition()
(_id, flag) = self.HitTest((x, y))
if _id:
self.SetItemTextColour(_id, wx.Colour(255, 255, 190, 255))
self.current_selected.append(_id)
if not self.is_dragging and len(self.current_selected) > 0:
for item in self.current_selected:
self.SetItemTextColour(item, wx.Colour(0, 0, 0, 255))
evt.Skip()
def on_evt_left_up(self, evt):
x, y = evt.GetPosition()
(_id, flag) = self.HitTest((x, y))
if self.is_dragging and _id:
for item in self.GetSelections():
if _id == item:
continue
self.do_drag_drop(item, _id)
self.mouse_left_down = False
self.is_dragging = False
evt.Skip()
def do_drag_drop(self, src_file: str, target_dir: str):
print("do drag drop is empty")
| true | true |
f73ee7cbd890517075626fd833578fb3110e4e9f | 3,077 | py | Python | wgail_info_2/preprocess.py | hzyjerry/InfoGAIL | 89bf3bee42242f4a8a41401d17296773294e6b6a | [
"MIT"
] | null | null | null | wgail_info_2/preprocess.py | hzyjerry/InfoGAIL | 89bf3bee42242f4a8a41401d17296773294e6b6a | [
"MIT"
] | null | null | null | wgail_info_2/preprocess.py | hzyjerry/InfoGAIL | 89bf3bee42242f4a8a41401d17296773294e6b6a | [
"MIT"
] | null | null | null | from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
from keras.models import Model
import numpy as np
import time
import cv2
def collect_demo(path, num_patch, aux_dim, action_dim):
for i in range(num_patch):
path_patch = path + str(i) + "/"
demo_name = path_patch + "demo.txt"
demo_raw = open(demo_name, 'r').readlines()
state_name = path_patch + "states.txt"
state_raw = open(state_name, 'r').readlines()
pa = np.zeros(6, dtype=np.float32)
print("Loading patch %d ..." % i)
for j in range(0, len(demo_raw)):
action_data = np.array(demo_raw[j].strip().split(" ")).astype(np.float32)
state_data = np.array(state_raw[j].strip().split(" ")).astype(np.float32)
aux = np.expand_dims([state_data[-3], state_data[-1]], axis=0).astype(np.float32)
action = np.expand_dims(action_data[:], axis=0).astype(np.float32)
img_path = path_patch + str(j) + ".jpg"
img = image.load_img(img_path)
img = image.img_to_array(img)
img = cv2.resize(img, (256, 256))
#img = img[40:, :, :]
'''
if j < 130 and i == 1:
img_cv2 = cv2.imread(img_path)
img_cv2 = cv2.resize(img_cv2, (200, 150))
img_cv2 = img_cv2[40:, :, :]
cv2.imshow('image', cv2.cvtColor(img, cv2.COLOR_RGB2BGR)/255.0)
cv2.waitKey(0)
'''
img = np.expand_dims(img, axis=0).astype(np.uint8)
if j == 0:
auxs_tmp = aux
actions_tmp = action
imgs_tmp = img
else:
auxs_tmp = np.concatenate((auxs_tmp, aux), axis=0)
actions_tmp = np.concatenate((actions_tmp, action), axis=0)
imgs_tmp = np.concatenate((imgs_tmp, img), axis=0)
if i == 0:
auxs = auxs_tmp
actions = actions_tmp
imgs = imgs_tmp
else:
auxs = np.concatenate((auxs, auxs_tmp), axis=0)
actions = np.concatenate((actions, actions_tmp), axis=0)
imgs = np.concatenate((imgs, imgs_tmp), axis=0)
print("Current total:", imgs.shape, auxs.shape, actions.shape)
print("Images:", imgs.shape, "Auxs:", auxs.shape, "Actions:", actions.shape)
return imgs, auxs, actions
def normalize(x):
x[:, 0:4] /= 200.
return x
def main():
aux_dim = 66
action_dim = 3
num_patch = 240
#demo_path = "/home/yunzhu/Desktop/human_low_case_1/demo_"
demo_path = "/home/zhiyang/Desktop/intention/reacher/rl_demo/demo_"
imgs, auxs, actions = collect_demo(demo_path, num_patch, aux_dim, action_dim)
auxs = normalize(auxs)
#np.savez_compressed("/home/zhiyang/Desktop/intention/reacher/rl_demo/demo.npz",
#imgs=imgs, auxs=auxs, actions=actions)
print("Finished.")
if __name__ == "__main__":
main()
| 33.086022 | 93 | 0.574911 | from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
from keras.models import Model
import numpy as np
import time
import cv2
def collect_demo(path, num_patch, aux_dim, action_dim):
for i in range(num_patch):
path_patch = path + str(i) + "/"
demo_name = path_patch + "demo.txt"
demo_raw = open(demo_name, 'r').readlines()
state_name = path_patch + "states.txt"
state_raw = open(state_name, 'r').readlines()
pa = np.zeros(6, dtype=np.float32)
print("Loading patch %d ..." % i)
for j in range(0, len(demo_raw)):
action_data = np.array(demo_raw[j].strip().split(" ")).astype(np.float32)
state_data = np.array(state_raw[j].strip().split(" ")).astype(np.float32)
aux = np.expand_dims([state_data[-3], state_data[-1]], axis=0).astype(np.float32)
action = np.expand_dims(action_data[:], axis=0).astype(np.float32)
img_path = path_patch + str(j) + ".jpg"
img = image.load_img(img_path)
img = image.img_to_array(img)
img = cv2.resize(img, (256, 256))
img = np.expand_dims(img, axis=0).astype(np.uint8)
if j == 0:
auxs_tmp = aux
actions_tmp = action
imgs_tmp = img
else:
auxs_tmp = np.concatenate((auxs_tmp, aux), axis=0)
actions_tmp = np.concatenate((actions_tmp, action), axis=0)
imgs_tmp = np.concatenate((imgs_tmp, img), axis=0)
if i == 0:
auxs = auxs_tmp
actions = actions_tmp
imgs = imgs_tmp
else:
auxs = np.concatenate((auxs, auxs_tmp), axis=0)
actions = np.concatenate((actions, actions_tmp), axis=0)
imgs = np.concatenate((imgs, imgs_tmp), axis=0)
print("Current total:", imgs.shape, auxs.shape, actions.shape)
print("Images:", imgs.shape, "Auxs:", auxs.shape, "Actions:", actions.shape)
return imgs, auxs, actions
def normalize(x):
x[:, 0:4] /= 200.
return x
def main():
aux_dim = 66
action_dim = 3
num_patch = 240
demo_path = "/home/zhiyang/Desktop/intention/reacher/rl_demo/demo_"
imgs, auxs, actions = collect_demo(demo_path, num_patch, aux_dim, action_dim)
auxs = normalize(auxs)
print("Finished.")
if __name__ == "__main__":
main()
| true | true |
f73ee8077f517575e1ee630a42cad76392adc9c9 | 1,947 | py | Python | tests/Simple/SupLstm/visualize_policy.py | maxiaoba/rlk | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | 1 | 2021-09-28T21:16:54.000Z | 2021-09-28T21:16:54.000Z | tests/Simple/SupLstm/visualize_policy.py | maxiaoba/rlkit | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | null | null | null | tests/Simple/SupLstm/visualize_policy.py | maxiaoba/rlkit | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | null | null | null | import torch
import numpy as np
import time
import pdb
from rlkit.torch.core import eval_np, np_ify
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='SimpleSupLSTM')
parser.add_argument('--extra_name', type=str, default='obs1int10')
parser.add_argument('--log_dir', type=str, default='SupLSTMlayer1hidden16')
parser.add_argument('--file', type=str, default='params')
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
pre_dir = './Data/'+args.exp_name+args.extra_name
import os
data_path = '{}/{}/seed{}/{}.pkl'.format(pre_dir,args.log_dir,args.seed,args.file)
data = torch.load(data_path,map_location='cpu')
policy = data['trainer/policy']
# from rlkit.torch.policies.make_deterministic import MakeDeterministic
# policy = MakeDeterministic(policy)
if 'trainer/sup_learner' in data.keys():
sup_learner = data['trainer/sup_learner']
else:
sup_learner = None
import sys
import json
with open('{}/{}/seed{}/variant.json'.format(pre_dir,args.log_dir,args.seed)) as f:
variant = json.load(f)
from simple_sup_lstm import SimpleSupLSTMEnv
env = SimpleSupLSTMEnv(**variant['env_kwargs'])
o = env.reset()
policy.reset()
max_path_length = 10
path_length = 0
done = False
c_r = 0.
while True:
path_length += 1
a, agent_info = policy.get_action(o)
o, r, done, env_info = env.step(a)
if sup_learner:
intentions = eval_np(sup_learner, o[None,:])
elif hasattr(policy, 'sup_prob'):
intentions = eval_np(policy.sup_prob, o[None,:])[0]
else:
intentions = None
c_r += r
print("step: ",path_length)
print("intentions: ",intentions)
print("a: ",a)
print("env_info: ",env_info)
print('r: ',r)
print(done)
# pdb.set_trace()
time.sleep(0.1)
if path_length > max_path_length or done:
print('c_r: ',c_r)
path_length = 0
done = False
c_r = 0.
pdb.set_trace()
o = env.reset()
policy.reset() | 27.041667 | 83 | 0.724191 | import torch
import numpy as np
import time
import pdb
from rlkit.torch.core import eval_np, np_ify
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='SimpleSupLSTM')
parser.add_argument('--extra_name', type=str, default='obs1int10')
parser.add_argument('--log_dir', type=str, default='SupLSTMlayer1hidden16')
parser.add_argument('--file', type=str, default='params')
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
pre_dir = './Data/'+args.exp_name+args.extra_name
import os
data_path = '{}/{}/seed{}/{}.pkl'.format(pre_dir,args.log_dir,args.seed,args.file)
data = torch.load(data_path,map_location='cpu')
policy = data['trainer/policy']
if 'trainer/sup_learner' in data.keys():
sup_learner = data['trainer/sup_learner']
else:
sup_learner = None
import sys
import json
with open('{}/{}/seed{}/variant.json'.format(pre_dir,args.log_dir,args.seed)) as f:
variant = json.load(f)
from simple_sup_lstm import SimpleSupLSTMEnv
env = SimpleSupLSTMEnv(**variant['env_kwargs'])
o = env.reset()
policy.reset()
max_path_length = 10
path_length = 0
done = False
c_r = 0.
while True:
path_length += 1
a, agent_info = policy.get_action(o)
o, r, done, env_info = env.step(a)
if sup_learner:
intentions = eval_np(sup_learner, o[None,:])
elif hasattr(policy, 'sup_prob'):
intentions = eval_np(policy.sup_prob, o[None,:])[0]
else:
intentions = None
c_r += r
print("step: ",path_length)
print("intentions: ",intentions)
print("a: ",a)
print("env_info: ",env_info)
print('r: ',r)
print(done)
time.sleep(0.1)
if path_length > max_path_length or done:
print('c_r: ',c_r)
path_length = 0
done = False
c_r = 0.
pdb.set_trace()
o = env.reset()
policy.reset() | true | true |
f73ee9e5af67073b0da598dca04d06e699fdccd1 | 440 | py | Python | PrepareData.py | YungRAW/ComputerScienceLicense---PlagiarismChecker | 411199d4d8cb33dbeebc0f204c46d74c976fb9cd | [
"MIT"
] | 10 | 2020-06-29T13:18:22.000Z | 2021-07-29T02:19:54.000Z | PrepareData.py | YungRAW/ComputerScienceLicense---PlagiarismChecker | 411199d4d8cb33dbeebc0f204c46d74c976fb9cd | [
"MIT"
] | 1 | 2020-08-20T17:29:31.000Z | 2020-08-20T19:43:12.000Z | PrepareData.py | YungRAW/ComputerScienceLicense---PlagiarismChecker | 411199d4d8cb33dbeebc0f204c46d74c976fb9cd | [
"MIT"
] | 2 | 2021-08-24T18:57:20.000Z | 2022-02-17T00:31:02.000Z | import os
import Utils
def load_data():
files = []
documents = []
path = 'Data/Plagiarism Documents';
for r, d, f in os.walk(path):
for file in f:
if '.txt' in file:
files.append(path+'/'+file)
for path in files:
file = open(path ,'r')
lines = file.readlines()
documents.append(Utils.split_text_max_words(lines, 128))
return documents
| 17.6 | 64 | 0.540909 | import os
import Utils
def load_data():
files = []
documents = []
path = 'Data/Plagiarism Documents';
for r, d, f in os.walk(path):
for file in f:
if '.txt' in file:
files.append(path+'/'+file)
for path in files:
file = open(path ,'r')
lines = file.readlines()
documents.append(Utils.split_text_max_words(lines, 128))
return documents
| true | true |
f73eea9cc59b6add9520d8d1d1d611aa98560f52 | 285 | py | Python | examples/promote_as_admin.py | fedor-chemashkin/harbor-python | d7b0e63e9b460530c66918d8849ed5aba4319c49 | [
"Apache-2.0"
] | 11 | 2019-01-28T08:08:18.000Z | 2022-02-03T15:47:49.000Z | examples/promote_as_admin.py | fedor-chemashkin/harbor-python | d7b0e63e9b460530c66918d8849ed5aba4319c49 | [
"Apache-2.0"
] | null | null | null | examples/promote_as_admin.py | fedor-chemashkin/harbor-python | d7b0e63e9b460530c66918d8849ed5aba4319c49 | [
"Apache-2.0"
] | 12 | 2019-05-28T18:25:26.000Z | 2022-03-17T11:08:40.000Z | #!/usr/bin/env python
import sys
sys.path.append("../")
from harborclient_light import harborclient
host = "127.0.0.1"
user = "admin"
password = "Harbor12345"
client = harborclient.HarborClient(host, user, password)
# Promote as admin
user_id = 2
client.promote_as_admin(user_id)
| 16.764706 | 56 | 0.74386 |
import sys
sys.path.append("../")
from harborclient_light import harborclient
host = "127.0.0.1"
user = "admin"
password = "Harbor12345"
client = harborclient.HarborClient(host, user, password)
user_id = 2
client.promote_as_admin(user_id)
| true | true |
f73eeadec33885d91247f63d260772f3be335b8a | 543 | py | Python | devices/master/plugins/epg_sat/read_epg_json.py | stko/Schnipsl | 824572c657e48f18950f584b9529661ff5bb8069 | [
"MIT"
] | null | null | null | devices/master/plugins/epg_sat/read_epg_json.py | stko/Schnipsl | 824572c657e48f18950f584b9529661ff5bb8069 | [
"MIT"
] | 29 | 2020-08-30T15:07:50.000Z | 2022-02-19T03:41:26.000Z | devices/master/plugins/epg_sat/read_epg_json.py | wifitvbox/Schnipsl | 553ce8de3dda26fb92297ad76e92f4a363070e4e | [
"MIT"
] | 1 | 2020-12-28T05:46:17.000Z | 2020-12-28T05:46:17.000Z | import sys
import json
from datetime import datetime
lastEnd=0
with open(sys.argv[1]) as json_file:
data = json.load(json_file)
times=sorted(list(data['details']))
for time in times:
p=data['details'][time]
print('{0} {1} {2}-{3}'.format(
datetime.utcfromtimestamp(p['unixTimeBegin']).strftime('%Y-%m-%d %H:%M'),
datetime.utcfromtimestamp(p['unixTimeEnd']-p['unixTimeBegin']).strftime('%H:%M'),
p['name'],
p['title']
)
)
if lastEnd != p['unixTimeBegin']:
print('--------------------')
lastEnd=p['unixTimeEnd']
| 24.681818 | 84 | 0.627993 | import sys
import json
from datetime import datetime
lastEnd=0
with open(sys.argv[1]) as json_file:
data = json.load(json_file)
times=sorted(list(data['details']))
for time in times:
p=data['details'][time]
print('{0} {1} {2}-{3}'.format(
datetime.utcfromtimestamp(p['unixTimeBegin']).strftime('%Y-%m-%d %H:%M'),
datetime.utcfromtimestamp(p['unixTimeEnd']-p['unixTimeBegin']).strftime('%H:%M'),
p['name'],
p['title']
)
)
if lastEnd != p['unixTimeBegin']:
print('--------------------')
lastEnd=p['unixTimeEnd']
| true | true |
f73eeb5dbe470dea4ec0a9950f7e35c6c2859821 | 6,522 | py | Python | cvpods/engine/setup.py | reinforcementdriving/cvpods | 32d98b74745020be035a0e20337ad934201615c4 | [
"Apache-2.0"
] | 758 | 2021-03-11T08:14:26.000Z | 2022-03-31T07:24:13.000Z | cvpods/engine/setup.py | wondervictor/cvpods | 614a975e5425bbaeb66bbd1ffca552d633ba89ca | [
"Apache-2.0"
] | 58 | 2020-12-04T19:47:10.000Z | 2022-03-30T06:52:13.000Z | cvpods/engine/setup.py | wondervictor/cvpods | 614a975e5425bbaeb66bbd1ffca552d633ba89ca | [
"Apache-2.0"
] | 110 | 2021-03-18T01:59:31.000Z | 2022-03-18T21:26:56.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import os
import torch
from cvpods.utils import PathManager, collect_env_info, comm, seed_all_rng, setup_logger
__all__ = ["default_argument_parser", "default_setup"]
def default_argument_parser():
"""
Create a parser with some common arguments used by cvpods users.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(description="cvpods Training")
parser.add_argument(
"--resume",
action="store_true",
help="whether to attempt to resume from the checkpoint directory",
)
parser.add_argument("--eval-only",
action="store_true",
help="perform evaluation only")
parser.add_argument("--num-gpus",
type=int,
default=1,
help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument("--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)")
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2**15 + 2**14 + hash(os.getuid()) % 2**14
parser.add_argument("--dist-url",
default="tcp://127.0.0.1:{}".format(port))
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def check_subdivision_config(cfg):
images_per_device = cfg.SOLVER.IMS_PER_DEVICE
batch_subdivisions = cfg.SOLVER.BATCH_SUBDIVISIONS
assert (
batch_subdivisions > 0
), "SOLVER.BATCH_SUBDIVISIONS ({}) must be a positive number.".format(
batch_subdivisions
)
if batch_subdivisions > 1:
# if batch_subdivisions is equal to 1, the following check is redundant
assert (
images_per_device % batch_subdivisions == 0
), "SOLVER.IMS_PER_DEVICE ({}) must be divisible by the " \
"SOLVER.BATCH_SUBDIVISIONS ({}).".format(images_per_device, batch_subdivisions)
def adjust_config(cfg):
base_world_size = int(cfg.SOLVER.IMS_PER_BATCH / cfg.SOLVER.IMS_PER_DEVICE)
# Batchsize, learning rate and max_iter in original config is used for 8 GPUs
assert base_world_size == 8, "IMS_PER_BATCH/DEVICE in config file is used for 8 GPUs"
world_size = comm.get_world_size()
machines_ratio = world_size / base_world_size
# ------ adjust batch_size ---------- #
cfg.SOLVER.IMS_PER_BATCH = int(machines_ratio * cfg.SOLVER.IMS_PER_BATCH)
assert (
cfg.SOLVER.IMS_PER_BATCH / cfg.SOLVER.IMS_PER_DEVICE == world_size
), "IMS_PER_BATCH ({}) not equal to IMS_PER_BATCH ({}) * world_size ({})".format(
cfg.SOLVER.IMS_PER_BATCH, cfg.SOLVER.IMS_PER_DEVICE, world_size
)
check_subdivision_config(cfg)
# ------- adjust scheduler --------- #
# since we use new IMS_PER_BATCH value, epoch value doesn't need to multiply ratio
if cfg.SOLVER.LR_SCHEDULER.MAX_EPOCH is None:
cfg.SOLVER.LR_SCHEDULER.MAX_ITER = int(cfg.SOLVER.LR_SCHEDULER.MAX_ITER / machines_ratio)
cfg.SOLVER.LR_SCHEDULER.STEPS = [
int(step / machines_ratio) for step in cfg.SOLVER.LR_SCHEDULER.STEPS
]
cfg.SOLVER.CHECKPOINT_PERIOD = int(cfg.SOLVER.CHECKPOINT_PERIOD / machines_ratio)
cfg.TEST.EVAL_PERIOD = int(cfg.TEST.EVAL_PERIOD / machines_ratio)
if "SGD" in cfg.SOLVER.OPTIMIZER.NAME:
# adjust learning rate according to Linear rule
cfg.SOLVER.OPTIMIZER.BASE_LR = machines_ratio * cfg.SOLVER.OPTIMIZER.BASE_LR
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the cvpods logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (BaseConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = cfg.OUTPUT_DIR
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
# setup_logger(output_dir, distributed_rank=rank, name="cvpods")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(
rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info("Contents of args.config_file={}:\n{}".format(
args.config_file,
PathManager.open(args.config_file, "r").read())
)
adjust_config(cfg)
logger.info("Running with full config:\n{}".format(cfg))
base_config = cfg.__class__.__base__()
logger.info("different config with base class:\n{}".format(cfg.diff(base_config)))
# if comm.is_main_process() and output_dir:
# # Note: some of our scripts may expect the existence of
# # config.yaml in output directory
# path = os.path.join(output_dir, "config.yaml")
# with PathManager.open(path, "w") as f:
# f.write(cfg.dump())
# logger.info("Full config saved to {}".format(os.path.abspath(path)))
# make sure each worker has a different, yet deterministic seed if specified
seed = seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)
# save seed to config for dump
cfg.SEED = seed
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
return cfg, logger
| 39.289157 | 97 | 0.665747 |
import argparse
import os
import torch
from cvpods.utils import PathManager, collect_env_info, comm, seed_all_rng, setup_logger
__all__ = ["default_argument_parser", "default_setup"]
def default_argument_parser():
parser = argparse.ArgumentParser(description="cvpods Training")
parser.add_argument(
"--resume",
action="store_true",
help="whether to attempt to resume from the checkpoint directory",
)
parser.add_argument("--eval-only",
action="store_true",
help="perform evaluation only")
parser.add_argument("--num-gpus",
type=int,
default=1,
help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument("--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)")
port = 2**15 + 2**14 + hash(os.getuid()) % 2**14
parser.add_argument("--dist-url",
default="tcp://127.0.0.1:{}".format(port))
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def check_subdivision_config(cfg):
images_per_device = cfg.SOLVER.IMS_PER_DEVICE
batch_subdivisions = cfg.SOLVER.BATCH_SUBDIVISIONS
assert (
batch_subdivisions > 0
), "SOLVER.BATCH_SUBDIVISIONS ({}) must be a positive number.".format(
batch_subdivisions
)
if batch_subdivisions > 1:
assert (
images_per_device % batch_subdivisions == 0
), "SOLVER.IMS_PER_DEVICE ({}) must be divisible by the " \
"SOLVER.BATCH_SUBDIVISIONS ({}).".format(images_per_device, batch_subdivisions)
def adjust_config(cfg):
base_world_size = int(cfg.SOLVER.IMS_PER_BATCH / cfg.SOLVER.IMS_PER_DEVICE)
assert base_world_size == 8, "IMS_PER_BATCH/DEVICE in config file is used for 8 GPUs"
world_size = comm.get_world_size()
machines_ratio = world_size / base_world_size
cfg.SOLVER.IMS_PER_BATCH = int(machines_ratio * cfg.SOLVER.IMS_PER_BATCH)
assert (
cfg.SOLVER.IMS_PER_BATCH / cfg.SOLVER.IMS_PER_DEVICE == world_size
), "IMS_PER_BATCH ({}) not equal to IMS_PER_BATCH ({}) * world_size ({})".format(
cfg.SOLVER.IMS_PER_BATCH, cfg.SOLVER.IMS_PER_DEVICE, world_size
)
check_subdivision_config(cfg)
if cfg.SOLVER.LR_SCHEDULER.MAX_EPOCH is None:
cfg.SOLVER.LR_SCHEDULER.MAX_ITER = int(cfg.SOLVER.LR_SCHEDULER.MAX_ITER / machines_ratio)
cfg.SOLVER.LR_SCHEDULER.STEPS = [
int(step / machines_ratio) for step in cfg.SOLVER.LR_SCHEDULER.STEPS
]
cfg.SOLVER.CHECKPOINT_PERIOD = int(cfg.SOLVER.CHECKPOINT_PERIOD / machines_ratio)
cfg.TEST.EVAL_PERIOD = int(cfg.TEST.EVAL_PERIOD / machines_ratio)
if "SGD" in cfg.SOLVER.OPTIMIZER.NAME:
# adjust learning rate according to Linear rule
cfg.SOLVER.OPTIMIZER.BASE_LR = machines_ratio * cfg.SOLVER.OPTIMIZER.BASE_LR
def default_setup(cfg, args):
output_dir = cfg.OUTPUT_DIR
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
# setup_logger(output_dir, distributed_rank=rank, name="cvpods")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(
rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info("Contents of args.config_file={}:\n{}".format(
args.config_file,
PathManager.open(args.config_file, "r").read())
)
adjust_config(cfg)
logger.info("Running with full config:\n{}".format(cfg))
base_config = cfg.__class__.__base__()
logger.info("different config with base class:\n{}".format(cfg.diff(base_config)))
# if comm.is_main_process() and output_dir:
# # Note: some of our scripts may expect the existence of
# # config.yaml in output directory
# path = os.path.join(output_dir, "config.yaml")
# with PathManager.open(path, "w") as f:
# f.write(cfg.dump())
# logger.info("Full config saved to {}".format(os.path.abspath(path)))
# make sure each worker has a different, yet deterministic seed if specified
seed = seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)
# save seed to config for dump
cfg.SEED = seed
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
return cfg, logger
| true | true |
f73eebb96d95028ff8078a10cf24ef3d0370bb84 | 2,202 | py | Python | google/ads/googleads/v9/resources/types/distance_view.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/resources/types/distance_view.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/resources/types/distance_view.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import (
distance_bucket as gage_distance_bucket,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"DistanceView",},
)
class DistanceView(proto.Message):
r"""A distance view with metrics aggregated by the user's
distance from an advertiser's location extensions. Each
DistanceBucket includes all impressions that fall within its
distance and a single impression will contribute to the metrics
for all DistanceBuckets that include the user's distance.
Attributes:
resource_name (str):
Output only. The resource name of the distance view.
Distance view resource names have the form:
``customers/{customer_id}/distanceViews/1~{distance_bucket}``
distance_bucket (google.ads.googleads.v9.enums.types.DistanceBucketEnum.DistanceBucket):
Output only. Grouping of user distance from
location extensions.
metric_system (bool):
Output only. True if the DistanceBucket is
using the metric system, false otherwise.
This field is a member of `oneof`_ ``_metric_system``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
distance_bucket = proto.Field(
proto.ENUM,
number=2,
enum=gage_distance_bucket.DistanceBucketEnum.DistanceBucket,
)
metric_system = proto.Field(proto.BOOL, number=4, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.952381 | 96 | 0.710718 |
import proto
from google.ads.googleads.v9.enums.types import (
distance_bucket as gage_distance_bucket,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"DistanceView",},
)
class DistanceView(proto.Message):
resource_name = proto.Field(proto.STRING, number=1,)
distance_bucket = proto.Field(
proto.ENUM,
number=2,
enum=gage_distance_bucket.DistanceBucketEnum.DistanceBucket,
)
metric_system = proto.Field(proto.BOOL, number=4, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f73eebc9fcb0dae3af2ebfd4cf16484ab4e2e10d | 921 | py | Python | audiofile_processor.py | ChameleonTartu/audio-files-to-pomodoro-counts | 654e090b94e145ec7ed73e9c62f7819483265eeb | [
"MIT"
] | null | null | null | audiofile_processor.py | ChameleonTartu/audio-files-to-pomodoro-counts | 654e090b94e145ec7ed73e9c62f7819483265eeb | [
"MIT"
] | 10 | 2019-10-01T19:24:12.000Z | 2020-05-14T12:22:21.000Z | audiofile_processor.py | ChameleonTartu/audio-files-to-pomodoro-counts | 654e090b94e145ec7ed73e9c62f7819483265eeb | [
"MIT"
] | null | null | null | from os import listdir
import os.path as osp
class AudiofileProcessor(object):
SECONDS = 60
POMADORO_LENGTH_IN_SECONDS = 25 * SECONDS
def __init__(self, directory, filter_by_ext, length_calculator):
self.directory = directory
self.filter_by_ext = filter_by_ext
self.length_calculator = length_calculator
def _get_files(self):
files = []
if osp.isdir(self.directory):
for f in listdir(self.directory):
if osp.isfile(osp.join(self.directory, f)) and self.filter_by_ext(f):
files.append(osp.join(self.directory, f))
return files
def pomodoro(self):
files = self._get_files()
length = 0
for f in files:
length += self.length_calculator(f)
l = round(length)
print("Pomodoros listened: #{}. Time remained: {}:{}"
.format(l // self.POMADORO_LENGTH_IN_SECONDS,
(l % self.POMADORO_LENGTH_IN_SECONDS) // self.SECONDS,
(l % self.POMADORO_LENGTH_IN_SECONDS) % self.SECONDS))
| 28.78125 | 73 | 0.724213 | from os import listdir
import os.path as osp
class AudiofileProcessor(object):
SECONDS = 60
POMADORO_LENGTH_IN_SECONDS = 25 * SECONDS
def __init__(self, directory, filter_by_ext, length_calculator):
self.directory = directory
self.filter_by_ext = filter_by_ext
self.length_calculator = length_calculator
def _get_files(self):
files = []
if osp.isdir(self.directory):
for f in listdir(self.directory):
if osp.isfile(osp.join(self.directory, f)) and self.filter_by_ext(f):
files.append(osp.join(self.directory, f))
return files
def pomodoro(self):
files = self._get_files()
length = 0
for f in files:
length += self.length_calculator(f)
l = round(length)
print("Pomodoros listened: #{}. Time remained: {}:{}"
.format(l // self.POMADORO_LENGTH_IN_SECONDS,
(l % self.POMADORO_LENGTH_IN_SECONDS) // self.SECONDS,
(l % self.POMADORO_LENGTH_IN_SECONDS) % self.SECONDS))
| true | true |
f73eebfe78f6d70463ddaa46312924b97cedc2d0 | 60 | py | Python | dndgui/__init__.py | a24ma/msg2eml | dc5d23339cd231991918fc6956a94a30308b72d5 | [
"MIT"
] | 1 | 2020-10-11T14:21:30.000Z | 2020-10-11T14:21:30.000Z | dndgui/__init__.py | a24ma/msg2eml | dc5d23339cd231991918fc6956a94a30308b72d5 | [
"MIT"
] | null | null | null | dndgui/__init__.py | a24ma/msg2eml | dc5d23339cd231991918fc6956a94a30308b72d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from dndgui.gui import MainForm
| 15 | 32 | 0.616667 |
from dndgui.gui import MainForm
| true | true |
f73eec910b41c90b9ba6b5d397715bcc48037279 | 8,709 | py | Python | tetris.py | olemb/tetris | 198f2e88915ea9af792eede47179f677dc5e993b | [
"MIT"
] | 1 | 2020-12-06T14:07:17.000Z | 2020-12-06T14:07:17.000Z | tetris.py | olemb/tetris | 198f2e88915ea9af792eede47179f677dc5e993b | [
"MIT"
] | null | null | null | tetris.py | olemb/tetris | 198f2e88915ea9af792eede47179f677dc5e993b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Tetris for Python / Tkinter
Ole Martin Bjorndalen
https://github.com/olemb/tetris/
http://tetris.wikia.com/wiki/Tetris_Guideline
"""
import random
from dataclasses import dataclass, replace
import tkinter
shapes = {
# See README.md for format.
'O': ['56a9', '6a95', 'a956', '956a'],
'I': ['4567', '26ae', 'ba98', 'd951'],
'J': ['0456', '2159', 'a654', '8951'],
'L': ['2654', 'a951', '8456', '0159'],
'T': ['1456', '6159', '9654', '4951'],
'Z': ['0156', '2659', 'a954', '8451'],
'S': ['1254', 'a651', '8956', '0459'],
}
@dataclass(frozen=True)
class Piece:
shape: str
rot: int = 0
x: int = 0
y: int = 0
def get_piece_blocks(piece):
for char in shapes[piece.shape][piece.rot % 4]:
y, x = divmod(int(char, 16), 4)
yield piece.x + x, piece.y - y
def move_piece(piece, *, rot=0, dx=0, dy=0):
rot = (piece.rot + rot) % 4
x = piece.x + dx
y = piece.y + dy
return replace(piece, rot=rot, x=x, y=y)
def get_wall_kicks(piece, *, rot=0):
return [
move_piece(piece, rot=rot, dx=dx, dy=dy)
for dx, dy in [(0, 0), (-1, 0), (1, 0), (0, -1)]
]
def piece_fits(field, piece):
width = len(field[0])
height = len(field)
for x, y in get_piece_blocks(piece):
if not 0 <= x < width:
return False
elif not 0 <= y < height:
return False
elif field[y][x]:
return False
else:
return True
def random_shape_bag():
bag = list(shapes)
# Make sure we start off the first bag with an easy piece.
while True:
random.shuffle(bag)
if bag[0] in 'IJLT':
break
while True:
yield from bag
random.shuffle(bag)
def make_rows(width, height):
return [[''] * width for _ in range(height)]
class Tetris:
def __init__(self, width=10, height=16):
self.width = width
self.height = height
self.game_over = False
self.score = 0
self._random_shapes = random_shape_bag()
self.field = make_rows(width, height)
self.piece = self._get_next_piece()
def _get_next_piece(self):
shape = next(self._random_shapes)
centered = self.width // 2 - 2
top = self.height - 1
return Piece(shape, x=centered, y=top)
def _place_new_piece(self):
self.piece = self._get_next_piece()
if not piece_fits(self.field, self.piece):
self.game_over = True
def _freeze_piece(self):
for x, y in get_piece_blocks(self.piece):
self.field[y][x] = self.piece.shape
def _remove_full_rows(self):
self.field = [row for row in self.field if not all(row)]
num_rows_cleared = self.height - len(self.field)
self.score += num_rows_cleared
self.field += make_rows(self.width, num_rows_cleared)
def _move(self, *, rot=0, dx=0, dy=0):
if rot:
candidate_pieces = get_wall_kicks(self.piece, rot=rot)
else:
candidate_pieces = [move_piece(self.piece, dx=dx, dy=dy)]
for piece in candidate_pieces:
if piece_fits(self.field, piece):
self.piece = piece
return
tried_to_move_down = dy == -1
if tried_to_move_down:
self._freeze_piece()
self._remove_full_rows()
self._place_new_piece()
def move(self, movement):
if not self.game_over:
args = {
'left': {'dx': -1},
'right': {'dx': 1},
'down': {'dy': -1},
'rotleft': {'rot': -1},
'rotright': {'rot': 1},
}[movement]
self._move(**args)
# Colors from Flatris.
colors = {
'I': '#3cc7d6', # Cyan.
'O': '#fbb414', # Yellow.
'T': '#b04497', # Magenta.
'J': '#3993d0', # Blue.
'L': '#ed652f', # Orange.
'S': '#95c43d', # Green.
'Z': '#e84138', # Red.
'': '#ecf0f1', # (Background color.)
}
class BlockDisplay(tkinter.Canvas):
def __init__(self, parent, width, height, block_size=40):
tkinter.Canvas.__init__(self, parent,
width=width * block_size,
height=height * block_size)
self.block_size = block_size
self.width = width
self.height = height
self.color_mode = True
self.blocks = {
(x, y): self._create_block(x, y)
for x in range(width)
for y in range(height)
}
def _create_block(self, x, y):
flipped_y = self.height - y - 1
y = flipped_y
size = self.block_size
return self.create_rectangle(
x * size,
y * size,
(x + 1) * size,
(y + 1) * size,
fill='',
outline='',
)
def __setitem__(self, pos, char):
if self.color_mode:
fill = colors[char.upper()]
else:
if char == '':
fill = colors['']
elif char.isupper():
fill = 'gray50'
else:
fill = 'black'
block = self.blocks[pos]
self.itemconfigure(block, fill=fill)
def clear(self):
self.itemconfigure('all', fill='')
def pause(self):
self.itemconfigure('all', stipple='gray50')
def resume(self):
self.itemconfigure('all', stipple='')
class TetrisTk:
def __init__(self):
self.tk = tk = tkinter.Tk()
self.tk.title('Tetris')
self.tetris = Tetris()
self.display = BlockDisplay(tk, self.tetris.width, self.tetris.height)
self.display.pack(side=tkinter.TOP, fill=tkinter.X)
self.score_view = tkinter.Label(self.tk, text='')
self.score_view.pack(side=tkinter.TOP, fill=tkinter.X)
self.score_view['font'] = 'Helvetica 30'
tk.bind('<KeyPress>', self.keypress)
self.paused = True
self.fall_id = None
self.redraw()
self.resume()
tk.mainloop()
def fall(self):
self.tetris.move('down')
self.redraw()
if self.tetris.game_over:
self.pause()
else:
self.schedule_fall()
def schedule_fall(self):
# In case we're already called once.
self.cancel_fall()
self.fall_id = self.tk.after(500, self.fall)
def cancel_fall(self):
if self.fall_id is not None:
self.tk.after_cancel(self.fall_id)
self.fall_id = None
def _draw_field(self):
for y, row in enumerate(self.tetris.field):
for x, char in enumerate(row):
self.display[x, y] = char
def _draw_piece(self):
piece = self.tetris.piece
char = piece.shape.lower()
for x, y in get_piece_blocks(piece):
self.display[x, y] = char
def redraw(self):
self._draw_field()
if not self.tetris.game_over:
self._draw_piece()
if self.tetris.game_over:
self.pause()
self.score_view['text'] = str(self.tetris.score)
def pause(self):
if not self.paused:
self.paused = True
self.display.pause()
self.cancel_fall()
def resume(self):
if self.paused:
self.paused = False
self.display.resume()
self.schedule_fall()
def new_game(self):
self.tetris = Tetris()
self.display.resume()
self.resume()
def toggle_pause(self):
if self.tetris.game_over:
self.new_game()
elif self.paused:
self.resume()
else:
self.pause()
def toggle_colors(self):
self.display.color_mode = not self.display.color_mode
def keypress(self, event):
commands = {
'Escape': self.toggle_pause,
'space': self.toggle_pause,
'c': self.toggle_colors,
}
if not self.paused:
commands.update({
'Up': lambda: self.tetris.move('rotleft'),
'Left': lambda: self.tetris.move('left'),
'Right': lambda: self.tetris.move('right'),
'Down': lambda: self.tetris.move('down'),
'w': lambda: self.tetris.move('rotleft'),
'a': lambda: self.tetris.move('left'),
's': lambda: self.tetris.move('down'),
'd': lambda: self.tetris.move('right'),
})
if event.keysym in commands.keys():
commands[event.keysym]()
self.redraw()
if __name__ == '__main__':
TetrisTk()
| 26.390909 | 78 | 0.531289 |
import random
from dataclasses import dataclass, replace
import tkinter
shapes = {
'O': ['56a9', '6a95', 'a956', '956a'],
'I': ['4567', '26ae', 'ba98', 'd951'],
'J': ['0456', '2159', 'a654', '8951'],
'L': ['2654', 'a951', '8456', '0159'],
'T': ['1456', '6159', '9654', '4951'],
'Z': ['0156', '2659', 'a954', '8451'],
'S': ['1254', 'a651', '8956', '0459'],
}
@dataclass(frozen=True)
class Piece:
shape: str
rot: int = 0
x: int = 0
y: int = 0
def get_piece_blocks(piece):
for char in shapes[piece.shape][piece.rot % 4]:
y, x = divmod(int(char, 16), 4)
yield piece.x + x, piece.y - y
def move_piece(piece, *, rot=0, dx=0, dy=0):
rot = (piece.rot + rot) % 4
x = piece.x + dx
y = piece.y + dy
return replace(piece, rot=rot, x=x, y=y)
def get_wall_kicks(piece, *, rot=0):
return [
move_piece(piece, rot=rot, dx=dx, dy=dy)
for dx, dy in [(0, 0), (-1, 0), (1, 0), (0, -1)]
]
def piece_fits(field, piece):
width = len(field[0])
height = len(field)
for x, y in get_piece_blocks(piece):
if not 0 <= x < width:
return False
elif not 0 <= y < height:
return False
elif field[y][x]:
return False
else:
return True
def random_shape_bag():
bag = list(shapes)
while True:
random.shuffle(bag)
if bag[0] in 'IJLT':
break
while True:
yield from bag
random.shuffle(bag)
def make_rows(width, height):
return [[''] * width for _ in range(height)]
class Tetris:
def __init__(self, width=10, height=16):
self.width = width
self.height = height
self.game_over = False
self.score = 0
self._random_shapes = random_shape_bag()
self.field = make_rows(width, height)
self.piece = self._get_next_piece()
def _get_next_piece(self):
shape = next(self._random_shapes)
centered = self.width // 2 - 2
top = self.height - 1
return Piece(shape, x=centered, y=top)
def _place_new_piece(self):
self.piece = self._get_next_piece()
if not piece_fits(self.field, self.piece):
self.game_over = True
def _freeze_piece(self):
for x, y in get_piece_blocks(self.piece):
self.field[y][x] = self.piece.shape
def _remove_full_rows(self):
self.field = [row for row in self.field if not all(row)]
num_rows_cleared = self.height - len(self.field)
self.score += num_rows_cleared
self.field += make_rows(self.width, num_rows_cleared)
def _move(self, *, rot=0, dx=0, dy=0):
if rot:
candidate_pieces = get_wall_kicks(self.piece, rot=rot)
else:
candidate_pieces = [move_piece(self.piece, dx=dx, dy=dy)]
for piece in candidate_pieces:
if piece_fits(self.field, piece):
self.piece = piece
return
tried_to_move_down = dy == -1
if tried_to_move_down:
self._freeze_piece()
self._remove_full_rows()
self._place_new_piece()
def move(self, movement):
if not self.game_over:
args = {
'left': {'dx': -1},
'right': {'dx': 1},
'down': {'dy': -1},
'rotleft': {'rot': -1},
'rotright': {'rot': 1},
}[movement]
self._move(**args)
colors = {
'I': '#3cc7d6',
'O': '#fbb414',
'T': '#b04497',
'J': '#3993d0',
'L': '#ed652f',
'S': '#95c43d',
'Z': '#e84138',
'': '#ecf0f1',
}
class BlockDisplay(tkinter.Canvas):
def __init__(self, parent, width, height, block_size=40):
tkinter.Canvas.__init__(self, parent,
width=width * block_size,
height=height * block_size)
self.block_size = block_size
self.width = width
self.height = height
self.color_mode = True
self.blocks = {
(x, y): self._create_block(x, y)
for x in range(width)
for y in range(height)
}
def _create_block(self, x, y):
flipped_y = self.height - y - 1
y = flipped_y
size = self.block_size
return self.create_rectangle(
x * size,
y * size,
(x + 1) * size,
(y + 1) * size,
fill='',
outline='',
)
def __setitem__(self, pos, char):
if self.color_mode:
fill = colors[char.upper()]
else:
if char == '':
fill = colors['']
elif char.isupper():
fill = 'gray50'
else:
fill = 'black'
block = self.blocks[pos]
self.itemconfigure(block, fill=fill)
def clear(self):
self.itemconfigure('all', fill='')
def pause(self):
self.itemconfigure('all', stipple='gray50')
def resume(self):
self.itemconfigure('all', stipple='')
class TetrisTk:
def __init__(self):
self.tk = tk = tkinter.Tk()
self.tk.title('Tetris')
self.tetris = Tetris()
self.display = BlockDisplay(tk, self.tetris.width, self.tetris.height)
self.display.pack(side=tkinter.TOP, fill=tkinter.X)
self.score_view = tkinter.Label(self.tk, text='')
self.score_view.pack(side=tkinter.TOP, fill=tkinter.X)
self.score_view['font'] = 'Helvetica 30'
tk.bind('<KeyPress>', self.keypress)
self.paused = True
self.fall_id = None
self.redraw()
self.resume()
tk.mainloop()
def fall(self):
self.tetris.move('down')
self.redraw()
if self.tetris.game_over:
self.pause()
else:
self.schedule_fall()
def schedule_fall(self):
self.cancel_fall()
self.fall_id = self.tk.after(500, self.fall)
def cancel_fall(self):
if self.fall_id is not None:
self.tk.after_cancel(self.fall_id)
self.fall_id = None
def _draw_field(self):
for y, row in enumerate(self.tetris.field):
for x, char in enumerate(row):
self.display[x, y] = char
def _draw_piece(self):
piece = self.tetris.piece
char = piece.shape.lower()
for x, y in get_piece_blocks(piece):
self.display[x, y] = char
def redraw(self):
self._draw_field()
if not self.tetris.game_over:
self._draw_piece()
if self.tetris.game_over:
self.pause()
self.score_view['text'] = str(self.tetris.score)
def pause(self):
if not self.paused:
self.paused = True
self.display.pause()
self.cancel_fall()
def resume(self):
if self.paused:
self.paused = False
self.display.resume()
self.schedule_fall()
def new_game(self):
self.tetris = Tetris()
self.display.resume()
self.resume()
def toggle_pause(self):
if self.tetris.game_over:
self.new_game()
elif self.paused:
self.resume()
else:
self.pause()
def toggle_colors(self):
self.display.color_mode = not self.display.color_mode
def keypress(self, event):
commands = {
'Escape': self.toggle_pause,
'space': self.toggle_pause,
'c': self.toggle_colors,
}
if not self.paused:
commands.update({
'Up': lambda: self.tetris.move('rotleft'),
'Left': lambda: self.tetris.move('left'),
'Right': lambda: self.tetris.move('right'),
'Down': lambda: self.tetris.move('down'),
'w': lambda: self.tetris.move('rotleft'),
'a': lambda: self.tetris.move('left'),
's': lambda: self.tetris.move('down'),
'd': lambda: self.tetris.move('right'),
})
if event.keysym in commands.keys():
commands[event.keysym]()
self.redraw()
if __name__ == '__main__':
TetrisTk()
| true | true |
f73eed23beb24e99fb12f3935fece405e40ae4ef | 946 | py | Python | tests/programs/import_variants/some_package/Child1.py | RESP3CT88/Nuitka | 0fcc25d9f00c4fc78c79a863c4b7987f573962e1 | [
"Apache-2.0"
] | 5,421 | 2018-09-24T08:04:06.000Z | 2022-03-31T20:02:37.000Z | tests/programs/import_variants/some_package/Child1.py | ztessler/Nuitka | 04c9a5471b702a0e5f28398f2661c93b83ab0d1a | [
"Apache-2.0"
] | 1,348 | 2018-09-22T13:41:00.000Z | 2022-03-31T22:33:40.000Z | tests/programs/import_variants/some_package/Child1.py | ztessler/Nuitka | 04c9a5471b702a0e5f28398f2661c93b83ab0d1a | [
"Apache-2.0"
] | 396 | 2018-09-28T15:37:03.000Z | 2022-03-29T10:52:09.000Z | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from . import Child3 as localname
print("*** Child1: Begin")
print("*** Child1: Imported Child3", localname)
print("*** Child1: End")
| 33.785714 | 79 | 0.714588 |
from __future__ import print_function
from . import Child3 as localname
print("*** Child1: Begin")
print("*** Child1: Imported Child3", localname)
print("*** Child1: End")
| true | true |
f73eed4de0acdd9e394274d88cb030e2f3e15656 | 58,707 | py | Python | ietf/meeting/models.py | jimfenton/datatracker | 25c7c4801a0d971f30027d7e1ac21120cefeb242 | [
"BSD-3-Clause"
] | null | null | null | ietf/meeting/models.py | jimfenton/datatracker | 25c7c4801a0d971f30027d7e1ac21120cefeb242 | [
"BSD-3-Clause"
] | 9 | 2022-01-14T05:56:29.000Z | 2022-01-20T05:27:41.000Z | ietf/meeting/models.py | jimfenton/datatracker | 25c7c4801a0d971f30027d7e1ac21120cefeb242 | [
"BSD-3-Clause"
] | 1 | 2022-02-22T21:43:25.000Z | 2022-02-22T21:43:25.000Z | # Copyright The IETF Trust 2007-2020, All Rights Reserved
# -*- coding: utf-8 -*-
# old meeting models can be found in ../proceedings/models.py
import datetime
import io
import os
import pytz
import random
import re
import string
from collections import namedtuple
from pathlib import Path
from urllib.parse import urljoin
import debug # pyflakes:ignore
from django.core.validators import MinValueValidator, RegexValidator
from django.db import models
from django.db.models import Max, Subquery, OuterRef, TextField, Value, Q
from django.db.models.functions import Coalesce
from django.conf import settings
from django.urls import reverse as urlreverse
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from ietf.dbtemplate.models import DBTemplate
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.group.utils import can_manage_materials
from ietf.name.models import (
MeetingTypeName, TimeSlotTypeName, SessionStatusName, ConstraintName, RoomResourceName,
ImportantDateName, TimerangeName, SlideSubmissionStatusName, ProceedingsMaterialTypeName,
SessionPurposeName,
)
from ietf.person.models import Person
from ietf.utils.decorators import memoize
from ietf.utils.storage import NoLocationMigrationFileSystemStorage
from ietf.utils.text import xslugify
from ietf.utils.timezone import date2datetime
from ietf.utils.models import ForeignKey
from ietf.utils.validators import (
MaxImageSizeValidator, WrappedValidator, validate_file_size, validate_mime_type,
validate_file_extension,
)
from ietf.utils.fields import MissingOkImageField
from ietf.utils.log import unreachable
countries = list(pytz.country_names.items())
countries.sort(key=lambda x: x[1])
timezones = []
for name in pytz.common_timezones:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, name + ".ics")
if not os.path.islink(tzfn):
timezones.append((name, name))
timezones.sort()
# this is used in models to format dates, as the built-in json serializer
# can not deal with them, and the django provided serializer is inaccessible.
from django.utils import datetime_safe
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def fmt_date(o):
d = datetime_safe.new_date(o)
return d.strftime(DATE_FORMAT)
class Meeting(models.Model):
# number is either the number for IETF meetings, or some other
# identifier for interim meetings/IESG retreats/liaison summits/...
number = models.CharField(unique=True, max_length=64)
type = ForeignKey(MeetingTypeName)
# Date is useful when generating a set of timeslot for this meeting, but
# is not used to determine date for timeslot instances thereafter, as
# they have their own datetime field.
date = models.DateField()
days = models.IntegerField(default=7, null=False, validators=[MinValueValidator(1)],
help_text="The number of days the meeting lasts")
city = models.CharField(blank=True, max_length=255)
country = models.CharField(blank=True, max_length=2, choices=countries)
# We can't derive time-zone from country, as there are some that have
# more than one timezone, and the pytz module doesn't provide timezone
# lookup information for all relevant city/country combinations.
time_zone = models.CharField(blank=True, max_length=255, choices=timezones)
idsubmit_cutoff_day_offset_00 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_00,
help_text = "The number of days before the meeting start date when the submission of -00 drafts will be closed.")
idsubmit_cutoff_day_offset_01 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_01,
help_text = "The number of days before the meeting start date when the submission of -01 drafts etc. will be closed.")
idsubmit_cutoff_time_utc = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_TIME_UTC,
help_text = "The time of day (UTC) after which submission will be closed. Use for example 23:59:59.")
idsubmit_cutoff_warning_days = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_WARNING_DAYS,
help_text = "How long before the 00 cutoff to start showing cutoff warnings. Use for example '21' or '21 days'.")
submission_start_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_START_DAYS,
help_text = "The number of days before the meeting start date after which meeting materials will be accepted.")
submission_cutoff_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CUTOFF_DAYS,
help_text = "The number of days after the meeting start date in which new meeting materials will be accepted.")
submission_correction_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CORRECTION_DAYS,
help_text = "The number of days after the meeting start date in which updates to existing meeting materials will be accepted.")
venue_name = models.CharField(blank=True, max_length=255)
venue_addr = models.TextField(blank=True)
break_area = models.CharField(blank=True, max_length=255)
reg_area = models.CharField(blank=True, max_length=255)
agenda_info_note = models.TextField(blank=True, help_text="Text in this field will be placed at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
agenda_warning_note = models.TextField(blank=True, help_text="Text in this field will be placed more prominently at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
schedule = ForeignKey('Schedule',null=True,blank=True, related_name='+')
session_request_lock_message = models.CharField(blank=True,max_length=255) # locked if not empty
proceedings_final = models.BooleanField(default=False, help_text="Are the proceedings for this meeting complete?")
acknowledgements = models.TextField(blank=True, help_text="Acknowledgements for use in meeting proceedings. Use ReStructuredText markup.")
overview = ForeignKey(DBTemplate, related_name='overview', null=True, editable=False)
show_important_dates = models.BooleanField(default=False)
attendees = models.IntegerField(blank=True, null=True, default=None,
help_text="Number of Attendees for backfilled meetings, leave it blank for new meetings, and then it is calculated from the registrations")
group_conflict_types = models.ManyToManyField(
ConstraintName, blank=True, limit_choices_to=dict(is_group_conflict=True),
help_text='Types of scheduling conflict between groups to consider')
def __str__(self):
if self.type_id == "ietf":
return u"IETF-%s" % (self.number)
else:
return self.number
def get_meeting_date (self,offset):
return self.date + datetime.timedelta(days=offset)
def end_date(self):
return self.get_meeting_date(self.days-1)
def get_00_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='00cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_01_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='01cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_reopen_time(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day)
local_tz = pytz.timezone(self.time_zone)
local_date = local_tz.localize(start_date)
cutoff = self.get_00_cutoff()
if cutoff.date() == start_date:
# no cutoff, so no local-time re-open
reopen_time = cutoff
else:
# reopen time is in local timezone. May need policy change?? XXX
reopen_time = local_date + self.idsubmit_cutoff_time_utc
return reopen_time
@classmethod
def get_current_meeting(cls, type="ietf"):
return cls.objects.filter(type=type, date__gte=datetime.datetime.today()-datetime.timedelta(days=7) ).order_by('date').first()
def get_first_cut_off(self):
return self.get_00_cutoff()
def get_second_cut_off(self):
return self.get_01_cutoff()
def get_ietf_monday(self):
for offset in range(self.days):
date = self.date+datetime.timedelta(days=offset)
if date.weekday() == 0: # Monday is 0
return date
def get_materials_path(self):
return os.path.join(settings.AGENDA_PATH,self.number)
# the various dates are currently computed
def get_submission_start_date(self):
return self.date - datetime.timedelta(days=self.submission_start_day_offset)
def get_submission_cut_off_date(self):
importantdate = self.importantdate_set.filter(name_id='procsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_cutoff_day_offset)
def get_submission_correction_date(self):
importantdate = self.importantdate_set.filter(name_id='revsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_correction_day_offset)
def enabled_constraint_names(self):
return ConstraintName.objects.filter(
Q(is_group_conflict=False) # any non-group-conflict constraints
| Q(is_group_conflict=True, meeting=self) # or specifically enabled for this meeting
)
def enabled_constraints(self):
return self.constraint_set.filter(name__in=self.enabled_constraint_names())
def get_schedule_by_name(self, name):
return self.schedule_set.filter(name=name).first()
def get_number(self):
"Return integer meeting number for ietf meetings, rather than strings."
if self.number.isdigit():
return int(self.number)
else:
return None
def get_proceedings_materials(self):
"""Get proceedings materials"""
return self.proceedings_materials.filter(
document__states__slug='active', document__states__type_id='procmaterials'
).order_by('type__order')
def get_attendance(self):
"""Get the meeting attendance from the MeetingRegistrations
Returns a NamedTuple with onsite and online attributes. Returns None if the record is unavailable
for this meeting.
"""
number = self.get_number()
if number is None or number < 110:
return None
Attendance = namedtuple('Attendance', 'onsite online')
return Attendance(
onsite=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='in_person',
).distinct().count(),
online=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='remote',
).distinct().count(),
)
@property
def proceedings_format_version(self):
"""Indicate version of proceedings that should be used for this meeting
Only makes sense for IETF meeting. Returns None for any meeting without a purely numeric number.
Uses settings.PROCEEDINGS_VERSION_CHANGES. Versions start at 1. Entries
in the array are the first meeting number using each version.
"""
if not hasattr(self, '_proceedings_format_version'):
if not self.number.isdigit():
version = None # no version for non-IETF meeting
else:
version = len(settings.PROCEEDINGS_VERSION_CHANGES) # start assuming latest version
mtg_number = self.get_number()
if mtg_number is None:
unreachable('2021-08-10')
else:
# Find the index of the first entry in the version change array that
# is >= this meeting's number. The first entry in the array is 0, so the
# version is always >= 1 for positive meeting numbers.
for vers, threshold in enumerate(settings.PROCEEDINGS_VERSION_CHANGES):
if mtg_number < threshold:
version = vers
break
self._proceedings_format_version = version # save this for later
return self._proceedings_format_version
@property
def session_constraintnames(self):
"""Gets a list of the constraint names that should be used for this meeting
Anticipated that this will soon become a many-to-many relationship with ConstraintName
(see issue #2770). Making this a @property allows use of the .all(), .filter(), etc,
so that other code should not need changes when this is replaced.
"""
try:
mtg_num = int(self.number)
except ValueError:
mtg_num = None # should not come up, but this method should not fail
if mtg_num is None or mtg_num >= 106:
# These meetings used the old 'conflic?' constraint types labeled as though
# they were the new types.
slugs = ('chair_conflict', 'tech_overlap', 'key_participant')
else:
slugs = ('conflict', 'conflic2', 'conflic3')
return ConstraintName.objects.filter(slug__in=slugs)
def base_url(self):
return "/meeting/%s" % (self.number, )
def build_timeslices(self):
"""Get unique day/time/timeslot data for meeting
Returns a list of days, time intervals for each day, and timeslots for each day,
with repeated days/time intervals removed. Ignores timeslots that do not have a
location. The slots return value contains only one TimeSlot for each distinct
time interval.
"""
days = [] # the days of the meetings
time_slices = {} # the times on each day
slots = {}
for ts in self.timeslot_set.all():
if ts.location_id is None:
continue
ymd = ts.time.date()
if ymd not in time_slices:
time_slices[ymd] = []
slots[ymd] = []
days.append(ymd)
if ymd in time_slices:
# only keep unique entries
if [ts.time, ts.time + ts.duration, ts.duration.seconds] not in time_slices[ymd]:
time_slices[ymd].append([ts.time, ts.time + ts.duration, ts.duration.seconds])
slots[ymd].append(ts)
days.sort()
for ymd in time_slices:
# Make sure these sort the same way
time_slices[ymd].sort()
slots[ymd].sort(key=lambda x: (x.time, x.duration))
return days,time_slices,slots
# this functions makes a list of timeslices and rooms, and
# makes sure that all schedules have all of them.
# def create_all_timeslots(self):
# alltimeslots = self.timeslot_set.all()
# for sched in self.schedule_set.all():
# ts_hash = {}
# for ss in sched.assignments.all():
# ts_hash[ss.timeslot] = ss
# for ts in alltimeslots:
# if not (ts in ts_hash):
# SchedTimeSessAssignment.objects.create(schedule = sched,
# timeslot = ts)
def vtimezone(self):
if self.time_zone:
try:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, self.time_zone + ".ics")
if os.path.exists(tzfn):
with io.open(tzfn) as tzf:
icstext = tzf.read()
vtimezone = re.search("(?sm)(\nBEGIN:VTIMEZONE.*\nEND:VTIMEZONE\n)", icstext).group(1).strip()
if vtimezone:
vtimezone += "\n"
return vtimezone
except IOError:
pass
return ''
def set_official_schedule(self, schedule):
if self.schedule != schedule:
self.schedule = schedule
self.save()
def updated(self):
min_time = datetime.datetime(1970, 1, 1, 0, 0, 0) # should be Meeting.modified, but we don't have that
timeslots_updated = self.timeslot_set.aggregate(Max('modified'))["modified__max"] or min_time
sessions_updated = self.session_set.aggregate(Max('modified'))["modified__max"] or min_time
assignments_updated = min_time
if self.schedule:
assignments_updated = SchedTimeSessAssignment.objects.filter(schedule__in=[self.schedule, self.schedule.base if self.schedule else None]).aggregate(Max('modified'))["modified__max"] or min_time
ts = max(timeslots_updated, sessions_updated, assignments_updated)
tz = pytz.timezone(settings.PRODUCTION_TIMEZONE)
ts = tz.localize(ts)
return ts
@memoize
def previous_meeting(self):
return Meeting.objects.filter(type_id=self.type_id,date__lt=self.date).order_by('-date').first()
class Meta:
ordering = ["-date", "-id"]
indexes = [
models.Index(fields=['-date', '-id']),
]
# === Rooms, Resources, Floorplans =============================================
class ResourceAssociation(models.Model):
name = ForeignKey(RoomResourceName)
icon = models.CharField(max_length=64) # icon to be found in /static/img
desc = models.CharField(max_length=256)
def __str__(self):
return self.desc
class Room(models.Model):
meeting = ForeignKey(Meeting)
modified = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=255)
functional_name = models.CharField(max_length=255, blank = True)
capacity = models.IntegerField(null=True, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank = True)
session_types = models.ManyToManyField(TimeSlotTypeName, blank = True)
# floorplan-related properties
floorplan = ForeignKey('FloorPlan', null=True, blank=True, default=None)
# floorplan: room pixel position : (0,0) is top left of image, (xd, yd)
# is room width, height.
x1 = models.SmallIntegerField(null=True, blank=True, default=None)
y1 = models.SmallIntegerField(null=True, blank=True, default=None)
x2 = models.SmallIntegerField(null=True, blank=True, default=None)
y2 = models.SmallIntegerField(null=True, blank=True, default=None)
# end floorplan-related stuff
def __str__(self):
return u"%s size: %s" % (self.name, self.capacity)
def delete_timeslots(self):
for ts in self.timeslot_set.all():
ts.sessionassignments.all().delete()
ts.delete()
def create_timeslots(self):
days, time_slices, slots = self.meeting.build_timeslices()
for day in days:
for ts in slots[day]:
TimeSlot.objects.create(type_id=ts.type_id,
meeting=self.meeting,
name=ts.name,
time=ts.time,
location=self,
duration=ts.duration)
#self.meeting.create_all_timeslots()
def dom_id(self):
return "room%u" % (self.pk)
# floorplan support
def floorplan_url(self):
mtg_num = self.meeting.get_number()
if not mtg_num:
return None
elif mtg_num <= settings.FLOORPLAN_LAST_LEGACY_MEETING:
base_url = settings.FLOORPLAN_LEGACY_BASE_URL.format(meeting=self.meeting)
elif self.floorplan:
base_url = urlreverse('ietf.meeting.views.floor_plan', kwargs=dict(num=mtg_num))
else:
return None
return f'{base_url}?room={xslugify(self.name)}'
def left(self):
return min(self.x1, self.x2) if (self.x1 and self.x2) else 0
def top(self):
return min(self.y1, self.y2) if (self.y1 and self.y2) else 0
def right(self):
return max(self.x1, self.x2) if (self.x1 and self.x2) else 0
def bottom(self):
return max(self.y1, self.y2) if (self.y1 and self.y2) else 0
def functional_display_name(self):
if not self.functional_name:
return ""
if 'breakout' in self.functional_name.lower():
return ""
if self.functional_name[0].isdigit():
return ""
return self.functional_name
# audio stream support
def audio_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id == 'audiostream']
return urlresources[0].url if urlresources else None
def video_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho']]
return urlresources[0].url if urlresources else None
def onsite_tool_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho_onsite']]
return urlresources[0].url if urlresources else None
def webex_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['webex']]
return urlresources[0].url if urlresources else None
#
class Meta:
ordering = ["-id"]
class UrlResource(models.Model):
"For things like audio stream urls, meetecho stream urls"
name = ForeignKey(RoomResourceName)
room = ForeignKey(Room)
url = models.URLField(null=True, blank=True)
def floorplan_path(instance, filename):
root, ext = os.path.splitext(filename)
return "%s/floorplan-%s-%s%s" % (settings.FLOORPLAN_MEDIA_DIR, instance.meeting.number, xslugify(instance.name), ext)
class FloorPlan(models.Model):
name = models.CharField(max_length=255)
short = models.CharField(max_length=3, default='')
modified= models.DateTimeField(auto_now=True)
meeting = ForeignKey(Meeting)
order = models.SmallIntegerField()
image = models.ImageField(storage=NoLocationMigrationFileSystemStorage(), upload_to=floorplan_path, blank=True, default=None)
#
class Meta:
ordering = ['-id',]
#
def __str__(self):
return u'floorplan-%s-%s' % (self.meeting.number, xslugify(self.name))
# === Schedules, Sessions, Timeslots and Assignments ===========================
class TimeSlot(models.Model):
"""
Everything that would appear on the meeting agenda of a meeting is
mapped to a timeslot, including breaks. Sessions are connected to
TimeSlots during scheduling.
"""
meeting = ForeignKey(Meeting)
type = ForeignKey(TimeSlotTypeName)
name = models.CharField(max_length=255)
time = models.DateTimeField()
duration = models.DurationField(default=datetime.timedelta(0))
location = ForeignKey(Room, blank=True, null=True)
show_location = models.BooleanField(default=True, help_text="Show location in agenda.")
sessions = models.ManyToManyField('Session', related_name='slots', through='SchedTimeSessAssignment', blank=True, help_text="Scheduled session, if any.")
modified = models.DateTimeField(auto_now=True)
#
@property
def session(self):
if not hasattr(self, "_session_cache"):
self._session_cache = self.sessions.filter(timeslotassignments__schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting else None]).first()
return self._session_cache
@property
def time_desc(self):
return "%s-%s" % (self.time.strftime("%H%M"), (self.time + self.duration).strftime("%H%M"))
def meeting_date(self):
return self.time.date()
def registration(self):
# below implements a object local cache
# it tries to find a timeslot of type registration which starts at the same time as this slot
# so that it can be shown at the top of the agenda.
if not hasattr(self, '_reg_info'):
try:
self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg")
except TimeSlot.DoesNotExist:
self._reg_info = None
return self._reg_info
def __str__(self):
location = self.get_location()
if not location:
location = u"(no location)"
return u"%s: %s-%s %s, %s" % (self.meeting.number, self.time.strftime("%m-%d %H:%M"), (self.time + self.duration).strftime("%H:%M"), self.name, location)
def end_time(self):
return self.time + self.duration
def get_hidden_location(self):
if not hasattr(self, '_cached_hidden_location'):
location = self.location
if location:
location = location.name
elif self.type_id == "reg":
location = self.meeting.reg_area
elif self.type_id == "break":
location = self.meeting.break_area
self._cached_hidden_location = location
return self._cached_hidden_location
def get_location(self):
return self.get_hidden_location() if self.show_location else ""
def get_functional_location(self):
name_parts = []
room = self.location
if room and room.functional_name:
name_parts.append(room.functional_name)
location = self.get_hidden_location()
if location:
name_parts.append(location)
return ' - '.join(name_parts)
def get_html_location(self):
if not hasattr(self, '_cached_html_location'):
self._cached_html_location = self.get_location()
if len(self._cached_html_location) > 8:
self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/<wbr>'))
else:
self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', ' '))
return self._cached_html_location
def tz(self):
if not hasattr(self, '_cached_tz'):
if self.meeting.time_zone:
self._cached_tz = pytz.timezone(self.meeting.time_zone)
else:
self._cached_tz = None
return self._cached_tz
def tzname(self):
if self.tz():
return self.tz().tzname(self.time)
else:
return ""
def utc_start_time(self):
if self.tz():
local_start_time = self.tz().localize(self.time)
return local_start_time.astimezone(pytz.utc)
else:
return None
def utc_end_time(self):
utc_start = self.utc_start_time()
# Add duration after converting start time, otherwise errors creep in around DST change
return None if utc_start is None else utc_start + self.duration
def local_start_time(self):
if self.tz():
return self.tz().localize(self.time)
else:
return None
def local_end_time(self):
local_start = self.local_start_time()
# Add duration after converting start time, otherwise errors creep in around DST change
return None if local_start is None else local_start + self.duration
@property
def js_identifier(self):
# this returns a unique identifier that is js happy.
# {{s.timeslot.time|date:'Y-m-d'}}_{{ s.timeslot.time|date:'Hi' }}"
# also must match:
# {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}}
dom_id="ts%u" % (self.pk)
if self.location is not None:
dom_id = self.location.dom_id()
return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M'))
def delete_concurrent_timeslots(self):
"""Delete all timeslots which are in the same time as this slot"""
# can not include duration in filter, because there is no support
# for having it a WHERE clause.
# below will delete self as well.
for ts in self.meeting.timeslot_set.filter(time=self.time).all():
if ts.duration!=self.duration:
continue
# now remove any schedule that might have been made to this
# timeslot.
ts.sessionassignments.all().delete()
ts.delete()
"""
Find a timeslot that comes next, in the same room. It must be on the same day,
and it must have a gap of less than 11 minutes. (10 is the spec)
"""
@property
def slot_to_the_right(self):
return self.meeting.timeslot_set.filter(
location = self.location, # same room!
type = self.type, # must be same type (usually session)
time__gt = self.time + self.duration, # must be after this session
time__lt = self.time + self.duration + datetime.timedelta(seconds=11*60)).first()
class Meta:
ordering = ["-time", "-id"]
indexes = [
models.Index(fields=['-time', '-id']),
]
# end of TimeSlot
class Schedule(models.Model):
"""
Each person may have multiple schedules saved.
A Schedule may be made visible, which means that it will show up in
public drop down menus, etc. It may also be made public, which means
that someone who knows about it by name/id would be able to reference
it. A non-visible, public schedule might be passed around by the
Secretariat to IESG members for review. Only the owner may edit the
schedule, others may copy it
"""
meeting = ForeignKey(Meeting, null=True, related_name='schedule_set')
name = models.CharField(max_length=64, blank=False, help_text="Letters, numbers and -:_ allowed.", validators=[RegexValidator(r'^[A-Za-z0-9-:_]*$')])
owner = ForeignKey(Person)
visible = models.BooleanField("Show in agenda list", default=True, help_text="Show in the list of possible agendas for the meeting.")
public = models.BooleanField(default=True, help_text="Allow others to see this agenda.")
badness = models.IntegerField(null=True, blank=True)
notes = models.TextField(blank=True)
origin = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL, related_name="+")
base = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL,
help_text="Sessions scheduled in the base schedule show up in this schedule too.", related_name="derivedschedule_set",
limit_choices_to={'base': None}) # prevent the inheritance from being more than one layer deep (no recursion)
def __str__(self):
return u"%s:%s(%s)" % (self.meeting, self.name, self.owner)
def base_url(self):
return "/meeting/%s/agenda/%s/%s" % (self.meeting.number, self.owner_email(), self.name)
# temporary property to pacify the places where Schedule.assignments is used
# @property
# def schedtimesessassignment_set(self):
# return self.assignments
#
# def url_edit(self):
# return "/meeting/%s/agenda/%s/edit" % (self.meeting.number, self.name)
#
# @property
# def relurl_edit(self):
# return self.url_edit("")
def owner_email(self):
return self.owner.email_address() or "noemail"
@property
def is_official(self):
return (self.meeting.schedule == self)
@property
def is_official_record(self):
return (self.is_official and
self.meeting.end_date() <= datetime.date.today() )
# returns a dictionary {group -> [schedtimesessassignment+]}
# and it has [] if the session is not placed.
# if there is more than one session for that group,
# then a list of them is returned (always a list)
@property
def official_token(self):
if self.is_official:
return "official"
else:
return "unofficial"
def delete_assignments(self):
self.assignments.all().delete()
@property
def qs_assignments_with_sessions(self):
return self.assignments.filter(session__isnull=False)
def qs_timeslots_in_use(self):
"""Get QuerySet containing timeslots used by the schedule"""
return TimeSlot.objects.filter(sessionassignments__schedule=self)
def qs_sessions_scheduled(self):
"""Get QuerySet containing sessions assigned to timeslots by this schedule"""
return Session.objects.filter(timeslotassignments__schedule=self)
def delete_schedule(self):
self.assignments.all().delete()
self.delete()
# to be renamed SchedTimeSessAssignments (stsa)
class SchedTimeSessAssignment(models.Model):
"""
This model provides an N:M relationship between Session and TimeSlot.
Each relationship is attached to the named schedule, which is owned by
a specific person/user.
"""
timeslot = ForeignKey('TimeSlot', null=False, blank=False, related_name='sessionassignments')
session = ForeignKey('Session', null=True, default=None, related_name='timeslotassignments', help_text="Scheduled session.")
schedule = ForeignKey('Schedule', null=False, blank=False, related_name='assignments')
extendedfrom = ForeignKey('self', null=True, default=None, help_text="Timeslot this session is an extension of.")
modified = models.DateTimeField(auto_now=True)
notes = models.TextField(blank=True)
badness = models.IntegerField(default=0, blank=True, null=True)
pinned = models.BooleanField(default=False, help_text="Do not move session during automatic placement.")
class Meta:
ordering = ["timeslot__time", "timeslot__type__slug", "session__group__parent__name", "session__group__acronym", "session__name", ]
def __str__(self):
return u"%s [%s<->%s]" % (self.schedule, self.session, self.timeslot)
@property
def room_name(self):
return self.timeslot.location.name if self.timeslot and self.timeslot.location else None
@property
def acronym(self):
if self.session and self.session.group:
return self.session.group.acronym
@property
def slot_to_the_right(self):
s = self.timeslot.slot_to_the_right
if s:
return self.schedule.assignments.filter(timeslot=s).first()
else:
return None
def meeting(self):
"""Get the meeting to which this assignment belongs"""
return self.session.meeting
def slot_type(self):
"""Get the TimeSlotTypeName that applies to this assignment"""
return self.timeslot.type
def slug(self):
"""Return sensible id string for session, e.g. suitable for use as HTML anchor."""
components = []
components.append(self.schedule.meeting.number)
if not self.timeslot:
components.append("unknown")
if not self.session or not (getattr(self.session, "historic_group", None) or self.session.group):
components.append("unknown")
else:
components.append(self.timeslot.time.strftime("%Y-%m-%d-%a-%H%M"))
g = getattr(self.session, "historic_group", None) or self.session.group
if self.timeslot.type.slug in ('break', 'reg', 'other'):
components.append(g.acronym)
components.append(slugify(self.session.name))
if self.timeslot.type.slug in ('regular', 'plenary'):
if self.timeslot.type.slug == "plenary":
components.append("1plenary")
else:
p = getattr(g, "historic_parent", None) or g.parent
if p and p.type_id in ("area", "irtf", 'ietf'):
components.append(p.acronym)
components.append(g.acronym)
return "-".join(components).lower()
class BusinessConstraint(models.Model):
"""
Constraints on the scheduling that apply across all qualifying
sessions in all meetings. Used by the ScheduleGenerator.
"""
slug = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=255)
penalty = models.IntegerField(default=0, help_text="The penalty for violating this kind of constraint; for instance 10 (small penalty) or 10000 (large penalty)")
class Constraint(models.Model):
"""
Specifies a constraint on the scheduling.
These constraints apply to a specific group during a specific meeting.
Available types are:
- conflict/conflic2/conflic3: a conflict between source and target WG/session,
with varying priority. The first is used for a chair conflict, the second for
technology overlap, third for key person conflict
- bethere: a constraint between source WG and a particular person
- timerange: can not meet during these times
- time_relation: preference for a time difference between sessions
- wg_adjacent: request for source WG to be adjacent (directly before or after,
no breaks, same room) the target WG
In the schedule editor, run-time, a couple non-persistent ConstraintName instances
are created for rendering purposes. This is done in
meeting.utils.preprocess_constraints_for_meeting_schedule_editor(). This adds:
- joint_with_groups
- responsible_ad
"""
TIME_RELATION_CHOICES = (
('subsequent-days', 'Schedule the sessions on subsequent days'),
('one-day-seperation', 'Leave at least one free day in between the two sessions'),
)
meeting = ForeignKey(Meeting)
source = ForeignKey(Group, related_name="constraint_source_set")
target = ForeignKey(Group, related_name="constraint_target_set", null=True)
person = ForeignKey(Person, null=True, blank=True)
name = ForeignKey(ConstraintName)
time_relation = models.CharField(max_length=200, choices=TIME_RELATION_CHOICES, blank=True)
timeranges = models.ManyToManyField(TimerangeName)
active_status = None
def __str__(self):
return u"%s %s target=%s person=%s" % (self.source, self.name.name.lower(), self.target, self.person)
def brief_display(self):
if self.name.slug == "wg_adjacent":
return "Adjacent with %s" % self.target.acronym
elif self.name.slug == "time_relation":
return self.get_time_relation_display()
elif self.name.slug == "timerange":
timeranges_str = ", ".join([t.desc for t in self.timeranges.all()])
return "Can't meet %s" % timeranges_str
elif self.target and self.person:
return "%s ; %s" % (self.target.acronym, self.person)
elif self.target and not self.person:
return "%s " % (self.target.acronym)
elif not self.target and self.person:
return "%s " % (self.person)
class SessionPresentation(models.Model):
session = ForeignKey('Session')
document = ForeignKey(Document)
rev = models.CharField(verbose_name="revision", max_length=16, null=True, blank=True)
order = models.PositiveSmallIntegerField(default=0)
class Meta:
db_table = 'meeting_session_materials'
ordering = ('order',)
unique_together = (('session', 'document'),)
def __str__(self):
return u"%s -> %s-%s" % (self.session, self.document.name, self.rev)
constraint_cache_uses = 0
constraint_cache_initials = 0
class SessionQuerySet(models.QuerySet):
def with_current_status(self):
"""Annotate session with its current status
Adds current_status, containing the text representation of the status.
"""
return self.annotate(
# coalesce with '' to avoid nulls which give funny
# results, e.g. .exclude(current_status='canceled') also
# skips rows with null in them
current_status=Coalesce(
Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'-time', '-id'
).values('status')[:1]),
Value(''),
output_field=TextField()),
)
def with_requested_by(self):
"""Annotate session with requested_by field
Adds requested_by field - pk of the Person who made the request
"""
return self.annotate(
requested_by=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('by')[:1]),
)
def with_requested_time(self):
"""Annotate session with requested_time field"""
return self.annotate(
requested_time=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('time')[:1]),
)
def not_canceled(self):
"""Queryset containing all sessions not canceled
Results annotated with current_status
"""
return self.with_current_status().exclude(current_status__in=Session.CANCELED_STATUSES)
def not_deleted(self):
"""Queryset containing all sessions not deleted
Results annotated with current_status
"""
return self.with_current_status().exclude(current_status='deleted')
def that_can_meet(self):
"""Queryset containing sessions that can meet
Results annotated with current_status
"""
return self.with_current_status().exclude(
current_status__in=['notmeet', 'disappr', 'deleted', 'apprw']
).filter(
type__slug='regular'
)
def requests(self):
"""Queryset containing sessions that may be handled as requests"""
return self.exclude(
type__in=('offagenda', 'reserved', 'unavail')
)
class Session(models.Model):
"""Session records that a group should have a session on the
meeting (time and location is stored in a TimeSlot) - if multiple
timeslots are needed, multiple sessions will have to be created.
Training sessions and similar are modeled by filling in a
responsible group (e.g. Edu team) and filling in the name."""
objects = SessionQuerySet.as_manager() # sets default query manager
meeting = ForeignKey(Meeting)
name = models.CharField(blank=True, max_length=255, help_text="Name of session, in case the session has a purpose rather than just being a group meeting.")
short = models.CharField(blank=True, max_length=32, help_text="Short version of 'name' above, for use in filenames.")
purpose = ForeignKey(SessionPurposeName, null=False, help_text='Purpose of the session')
type = ForeignKey(TimeSlotTypeName)
group = ForeignKey(Group) # The group type historically determined the session type. BOFs also need to be added as a group. Note that not all meeting requests have a natural group to associate with.
joint_with_groups = models.ManyToManyField(Group, related_name='sessions_joint_in',blank=True)
attendees = models.IntegerField(null=True, blank=True)
agenda_note = models.CharField(blank=True, max_length=255)
requested_duration = models.DurationField(default=datetime.timedelta(0))
comments = models.TextField(blank=True)
scheduled = models.DateTimeField(null=True, blank=True)
modified = models.DateTimeField(auto_now=True)
remote_instructions = models.CharField(blank=True,max_length=1024)
on_agenda = models.BooleanField(default=True, help_text='Is this session visible on the meeting agenda?')
tombstone_for = models.ForeignKey('Session', blank=True, null=True, help_text="This session is the tombstone for a session that was rescheduled", on_delete=models.CASCADE)
materials = models.ManyToManyField(Document, through=SessionPresentation, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank=True)
unique_constraints_dict = None
CANCELED_STATUSES = ['canceled', 'canceledpa']
# Should work on how materials are captured so that deleted things are no longer associated with the session
# (We can keep the information about something being added to and removed from a session in the document's history)
def get_material(self, material_type, only_one):
if hasattr(self, "prefetched_active_materials"):
l = [d for d in self.prefetched_active_materials if d.type_id == material_type]
for d in l:
d.meeting_related = lambda: True
else:
l = self.materials.filter(type=material_type).exclude(states__type=material_type, states__slug='deleted').order_by('sessionpresentation__order')
if only_one:
if l:
return l[0]
else:
return None
else:
return l
def agenda(self):
if not hasattr(self, "_agenda_cache"):
self._agenda_cache = self.get_material("agenda", only_one=True)
return self._agenda_cache
def minutes(self):
if not hasattr(self, '_cached_minutes'):
self._cached_minutes = self.get_material("minutes", only_one=True)
return self._cached_minutes
def recordings(self):
return list(self.get_material("recording", only_one=False))
def bluesheets(self):
return list(self.get_material("bluesheets", only_one=False))
def slides(self):
if not hasattr(self, "_slides_cache"):
self._slides_cache = list(self.get_material("slides", only_one=False))
return self._slides_cache
def drafts(self):
return list(self.materials.filter(type='draft'))
# The utilities below are used in the proceedings and materials
# templates, and should be moved there - then we could also query
# out the needed information in a few passes and speed up those
# pages.
def all_meeting_sessions_for_group(self):
from ietf.meeting.utils import add_event_info_to_session_qs
if self.group.features.has_meetings:
if not hasattr(self, "_all_meeting_sessions_for_group_cache"):
sessions = [s for s in add_event_info_to_session_qs(self.meeting.session_set.filter(group=self.group,type=self.type)) if s.official_timeslotassignment()]
self._all_meeting_sessions_for_group_cache = sorted(sessions, key = lambda x: x.official_timeslotassignment().timeslot.time)
return self._all_meeting_sessions_for_group_cache
else:
return [self]
def order_in_meeting(self):
if not hasattr(self, '_order_in_meeting'):
session_list = self.all_meeting_sessions_for_group()
self._order_in_meeting = session_list.index(self) + 1 if self in session_list else 0
return self._order_in_meeting
def all_meeting_sessions_cancelled(self):
return set(s.current_status for s in self.all_meeting_sessions_for_group()) == {'canceled'}
def all_meeting_recordings(self):
recordings = [] # These are not sets because we need to preserve relative ordering or redo the ordering work later
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
recordings.extend([r for r in session.recordings() if r not in recordings])
return recordings
def all_meeting_bluesheets(self):
bluesheets = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
bluesheets.extend([b for b in session.bluesheets() if b not in bluesheets])
return bluesheets
def all_meeting_drafts(self):
drafts = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
drafts.extend([d for d in session.drafts() if d not in drafts])
return drafts
def all_meeting_agendas(self):
agendas = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
agenda = session.agenda()
if agenda and agenda not in agendas:
agendas.append(agenda)
return agendas
def all_meeting_slides(self):
slides = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
slides.extend([s for s in session.slides() if s not in slides])
return slides
def all_meeting_minutes(self):
minutes = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
minutes_doc = session.minutes()
if minutes_doc and minutes_doc not in minutes:
minutes.append(minutes_doc)
return minutes
def can_manage_materials(self, user):
return can_manage_materials(user,self.group)
def is_material_submission_cutoff(self):
return datetime.date.today() > self.meeting.get_submission_correction_date()
def joint_with_groups_acronyms(self):
return [group.acronym for group in self.joint_with_groups.all()]
def __str__(self):
if self.meeting.type_id == "interim":
return self.meeting.number
status_id = None
if hasattr(self, 'current_status'):
status_id = self.current_status
elif self.pk is not None:
latest_event = SchedulingEvent.objects.filter(session=self.pk).order_by('-time', '-id').first()
if latest_event:
status_id = latest_event.status_id
if status_id in ('canceled','disappr','notmeet','deleted'):
ss0name = "(%s)" % SessionStatusName.objects.get(slug=status_id).name
else:
ss0name = "(unscheduled)"
ss = self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).order_by('timeslot__time')
if ss:
ss0name = ','.join(x.timeslot.time.strftime("%a-%H%M") for x in ss)
return "%s: %s %s %s" % (self.meeting, self.group.acronym, self.name, ss0name)
@property
def short_name(self):
if self.name:
return self.name
if self.short:
return self.short
if self.group:
return self.group.acronym
return "req#%u" % (id)
@property
def special_request_token(self):
if self.comments is not None and len(self.comments)>0:
return "*"
else:
return ""
def docname_token(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
index = list(sess_mtg).index(self)
return 'sess%s' % (string.ascii_lowercase[index])
def docname_token_only_for_multiple(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
if len(list(sess_mtg)) > 1:
index = list(sess_mtg).index(self)
if index < 26:
token = 'sess%s' % (string.ascii_lowercase[index])
else:
token = 'sess%s%s' % (string.ascii_lowercase[index//26],string.ascii_lowercase[index%26])
return token
return None
def constraints(self):
return Constraint.objects.filter(source=self.group, meeting=self.meeting).order_by('name__name', 'target__acronym', 'person__name').prefetch_related("source","target","person")
def reverse_constraints(self):
return Constraint.objects.filter(target=self.group, meeting=self.meeting).order_by('name__name')
def official_timeslotassignment(self):
return self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).first()
@property
def people_constraints(self):
return self.group.constraint_source_set.filter(meeting=self.meeting, name='bethere')
def agenda_text(self):
doc = self.agenda()
if doc:
path = os.path.join(settings.AGENDA_PATH, self.meeting.number, "agenda", doc.uploaded_filename)
if os.path.exists(path):
with io.open(path) as f:
return f.read()
else:
return "No agenda file found"
else:
return "The agenda has not been uploaded yet."
def agenda_file(self):
if not hasattr(self, '_agenda_file'):
self._agenda_file = ""
agenda = self.agenda()
if not agenda:
return ""
# FIXME: uploaded_filename should be replaced with a function that computes filenames when they are of a fixed schema and not uploaded names
self._agenda_file = "%s/agenda/%s" % (self.meeting.number, agenda.uploaded_filename)
return self._agenda_file
def jabber_room_name(self):
if self.type_id=='plenary':
return 'plenary'
elif self.historic_group:
return self.historic_group.acronym
else:
return self.group.acronym
def notes_id(self):
note_id_fragment = 'plenary' if self.type.slug == 'plenary' else self.group.acronym
return f'notes-ietf-{self.meeting.number}-{note_id_fragment}'
def notes_url(self):
return urljoin(settings.IETF_NOTES_URL, self.notes_id())
class SchedulingEvent(models.Model):
session = ForeignKey(Session)
time = models.DateTimeField(default=datetime.datetime.now, help_text="When the event happened")
status = ForeignKey(SessionStatusName)
by = ForeignKey(Person)
def __str__(self):
return u'%s : %s : %s : %s' % (self.session, self.status, self.time, self.by)
class ImportantDate(models.Model):
meeting = ForeignKey(Meeting)
date = models.DateField()
name = ForeignKey(ImportantDateName)
class Meta:
ordering = ["-meeting_id","date", ]
def __str__(self):
return u'%s : %s : %s' % ( self.meeting, self.name, self.date )
class SlideSubmission(models.Model):
time = models.DateTimeField(auto_now=True)
session = ForeignKey(Session)
title = models.CharField(max_length=255)
filename = models.CharField(max_length=255)
apply_to_all = models.BooleanField(default=False)
submitter = ForeignKey(Person)
status = ForeignKey(SlideSubmissionStatusName, null=True, default='pending', on_delete=models.SET_NULL)
doc = ForeignKey(Document, null=True, on_delete=models.SET_NULL)
def staged_filepath(self):
return os.path.join(settings.SLIDE_STAGING_PATH , self.filename)
def staged_url(self):
return "".join([settings.SLIDE_STAGING_URL, self.filename])
class ProceedingsMaterial(models.Model):
meeting = ForeignKey(Meeting, related_name='proceedings_materials')
document = ForeignKey(
Document,
limit_choices_to=dict(type_id='procmaterials'),
unique=True,
)
type = ForeignKey(ProceedingsMaterialTypeName)
class Meta:
unique_together = (('meeting', 'type'),)
def __str__(self):
return self.document.title
def get_href(self):
return f'{self.document.get_href(self.meeting)}'
def active(self):
return self.document.get_state().slug == 'active'
def is_url(self):
return len(self.document.external_url) > 0
def _host_upload_path(instance : 'MeetingHost', filename):
"""Compute filename relative to the storage location
Must live outside a class to allow migrations to deconstruct fields that use it
"""
num = instance.meeting.number
path = (
Path(num) / 'meetinghosts' / f'logo-{"".join(random.choices(string.ascii_lowercase, k=10))}'
).with_suffix(
Path(filename).suffix
)
return str(path)
class MeetingHost(models.Model):
"""Meeting sponsor"""
meeting = ForeignKey(Meeting, related_name='meetinghosts')
name = models.CharField(max_length=255, blank=False)
logo = MissingOkImageField(
storage=NoLocationMigrationFileSystemStorage(location=settings.MEETINGHOST_LOGO_PATH),
upload_to=_host_upload_path,
width_field='logo_width',
height_field='logo_height',
blank=False,
validators=[
MaxImageSizeValidator(
settings.MEETINGHOST_LOGO_MAX_UPLOAD_WIDTH,
settings.MEETINGHOST_LOGO_MAX_UPLOAD_HEIGHT,
),
WrappedValidator(validate_file_size, True),
WrappedValidator(
validate_file_extension,
settings.MEETING_VALID_UPLOAD_EXTENSIONS['meetinghostlogo'],
),
WrappedValidator(
validate_mime_type,
settings.MEETING_VALID_UPLOAD_MIME_TYPES['meetinghostlogo'],
True,
),
],
)
# These are filled in by the ImageField allow retrieval of image dimensions
# without processing the image each time it's loaded.
logo_width = models.PositiveIntegerField(null=True)
logo_height = models.PositiveIntegerField(null=True)
class Meta:
unique_together = (('meeting', 'name'),)
ordering = ('pk',) | 42.47974 | 216 | 0.657877 |
import datetime
import io
import os
import pytz
import random
import re
import string
from collections import namedtuple
from pathlib import Path
from urllib.parse import urljoin
import debug
from django.core.validators import MinValueValidator, RegexValidator
from django.db import models
from django.db.models import Max, Subquery, OuterRef, TextField, Value, Q
from django.db.models.functions import Coalesce
from django.conf import settings
from django.urls import reverse as urlreverse
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from ietf.dbtemplate.models import DBTemplate
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.group.utils import can_manage_materials
from ietf.name.models import (
MeetingTypeName, TimeSlotTypeName, SessionStatusName, ConstraintName, RoomResourceName,
ImportantDateName, TimerangeName, SlideSubmissionStatusName, ProceedingsMaterialTypeName,
SessionPurposeName,
)
from ietf.person.models import Person
from ietf.utils.decorators import memoize
from ietf.utils.storage import NoLocationMigrationFileSystemStorage
from ietf.utils.text import xslugify
from ietf.utils.timezone import date2datetime
from ietf.utils.models import ForeignKey
from ietf.utils.validators import (
MaxImageSizeValidator, WrappedValidator, validate_file_size, validate_mime_type,
validate_file_extension,
)
from ietf.utils.fields import MissingOkImageField
from ietf.utils.log import unreachable
countries = list(pytz.country_names.items())
countries.sort(key=lambda x: x[1])
timezones = []
for name in pytz.common_timezones:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, name + ".ics")
if not os.path.islink(tzfn):
timezones.append((name, name))
timezones.sort()
from django.utils import datetime_safe
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def fmt_date(o):
d = datetime_safe.new_date(o)
return d.strftime(DATE_FORMAT)
class Meeting(models.Model):
number = models.CharField(unique=True, max_length=64)
type = ForeignKey(MeetingTypeName)
date = models.DateField()
days = models.IntegerField(default=7, null=False, validators=[MinValueValidator(1)],
help_text="The number of days the meeting lasts")
city = models.CharField(blank=True, max_length=255)
country = models.CharField(blank=True, max_length=2, choices=countries)
# more than one timezone, and the pytz module doesn't provide timezone
time_zone = models.CharField(blank=True, max_length=255, choices=timezones)
idsubmit_cutoff_day_offset_00 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_00,
help_text = "The number of days before the meeting start date when the submission of -00 drafts will be closed.")
idsubmit_cutoff_day_offset_01 = models.IntegerField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_DAY_OFFSET_01,
help_text = "The number of days before the meeting start date when the submission of -01 drafts etc. will be closed.")
idsubmit_cutoff_time_utc = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_TIME_UTC,
help_text = "The time of day (UTC) after which submission will be closed. Use for example 23:59:59.")
idsubmit_cutoff_warning_days = models.DurationField(blank=True,
default=settings.IDSUBMIT_DEFAULT_CUTOFF_WARNING_DAYS,
help_text = "How long before the 00 cutoff to start showing cutoff warnings. Use for example '21' or '21 days'.")
submission_start_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_START_DAYS,
help_text = "The number of days before the meeting start date after which meeting materials will be accepted.")
submission_cutoff_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CUTOFF_DAYS,
help_text = "The number of days after the meeting start date in which new meeting materials will be accepted.")
submission_correction_day_offset = models.IntegerField(blank=True,
default=settings.MEETING_MATERIALS_DEFAULT_SUBMISSION_CORRECTION_DAYS,
help_text = "The number of days after the meeting start date in which updates to existing meeting materials will be accepted.")
venue_name = models.CharField(blank=True, max_length=255)
venue_addr = models.TextField(blank=True)
break_area = models.CharField(blank=True, max_length=255)
reg_area = models.CharField(blank=True, max_length=255)
agenda_info_note = models.TextField(blank=True, help_text="Text in this field will be placed at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
agenda_warning_note = models.TextField(blank=True, help_text="Text in this field will be placed more prominently at the top of the html agenda page for the meeting. HTML can be used, but will not be validated.")
schedule = ForeignKey('Schedule',null=True,blank=True, related_name='+')
session_request_lock_message = models.CharField(blank=True,max_length=255)
proceedings_final = models.BooleanField(default=False, help_text="Are the proceedings for this meeting complete?")
acknowledgements = models.TextField(blank=True, help_text="Acknowledgements for use in meeting proceedings. Use ReStructuredText markup.")
overview = ForeignKey(DBTemplate, related_name='overview', null=True, editable=False)
show_important_dates = models.BooleanField(default=False)
attendees = models.IntegerField(blank=True, null=True, default=None,
help_text="Number of Attendees for backfilled meetings, leave it blank for new meetings, and then it is calculated from the registrations")
group_conflict_types = models.ManyToManyField(
ConstraintName, blank=True, limit_choices_to=dict(is_group_conflict=True),
help_text='Types of scheduling conflict between groups to consider')
def __str__(self):
if self.type_id == "ietf":
return u"IETF-%s" % (self.number)
else:
return self.number
def get_meeting_date (self,offset):
return self.date + datetime.timedelta(days=offset)
def end_date(self):
return self.get_meeting_date(self.days-1)
def get_00_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='00cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_01_cutoff(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day, tzinfo=pytz.utc)
importantdate = self.importantdate_set.filter(name_id='idcutoff').first()
if not importantdate:
importantdate = self.importantdate_set.filter(name_id='01cutoff').first()
if importantdate:
cutoff_date = importantdate.date
else:
cutoff_date = start_date + datetime.timedelta(days=ImportantDateName.objects.get(slug='idcutoff').default_offset_days)
cutoff_time = date2datetime(cutoff_date) + self.idsubmit_cutoff_time_utc
return cutoff_time
def get_reopen_time(self):
start_date = datetime.datetime(year=self.date.year, month=self.date.month, day=self.date.day)
local_tz = pytz.timezone(self.time_zone)
local_date = local_tz.localize(start_date)
cutoff = self.get_00_cutoff()
if cutoff.date() == start_date:
reopen_time = cutoff
else:
reopen_time = local_date + self.idsubmit_cutoff_time_utc
return reopen_time
@classmethod
def get_current_meeting(cls, type="ietf"):
return cls.objects.filter(type=type, date__gte=datetime.datetime.today()-datetime.timedelta(days=7) ).order_by('date').first()
def get_first_cut_off(self):
return self.get_00_cutoff()
def get_second_cut_off(self):
return self.get_01_cutoff()
def get_ietf_monday(self):
for offset in range(self.days):
date = self.date+datetime.timedelta(days=offset)
if date.weekday() == 0:
return date
def get_materials_path(self):
return os.path.join(settings.AGENDA_PATH,self.number)
def get_submission_start_date(self):
return self.date - datetime.timedelta(days=self.submission_start_day_offset)
def get_submission_cut_off_date(self):
importantdate = self.importantdate_set.filter(name_id='procsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_cutoff_day_offset)
def get_submission_correction_date(self):
importantdate = self.importantdate_set.filter(name_id='revsub').first()
if importantdate:
return importantdate.date
else:
return self.date + datetime.timedelta(days=self.submission_correction_day_offset)
def enabled_constraint_names(self):
return ConstraintName.objects.filter(
Q(is_group_conflict=False)
| Q(is_group_conflict=True, meeting=self)
)
def enabled_constraints(self):
return self.constraint_set.filter(name__in=self.enabled_constraint_names())
def get_schedule_by_name(self, name):
return self.schedule_set.filter(name=name).first()
def get_number(self):
if self.number.isdigit():
return int(self.number)
else:
return None
def get_proceedings_materials(self):
return self.proceedings_materials.filter(
document__states__slug='active', document__states__type_id='procmaterials'
).order_by('type__order')
def get_attendance(self):
number = self.get_number()
if number is None or number < 110:
return None
Attendance = namedtuple('Attendance', 'onsite online')
return Attendance(
onsite=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='in_person',
).distinct().count(),
online=Person.objects.filter(
meetingregistration__meeting=self,
meetingregistration__attended=True,
meetingregistration__reg_type__contains='remote',
).distinct().count(),
)
@property
def proceedings_format_version(self):
if not hasattr(self, '_proceedings_format_version'):
if not self.number.isdigit():
version = None
else:
version = len(settings.PROCEEDINGS_VERSION_CHANGES)
mtg_number = self.get_number()
if mtg_number is None:
unreachable('2021-08-10')
else:
# version is always >= 1 for positive meeting numbers.
for vers, threshold in enumerate(settings.PROCEEDINGS_VERSION_CHANGES):
if mtg_number < threshold:
version = vers
break
self._proceedings_format_version = version # save this for later
return self._proceedings_format_version
@property
def session_constraintnames(self):
try:
mtg_num = int(self.number)
except ValueError:
mtg_num = None # should not come up, but this method should not fail
if mtg_num is None or mtg_num >= 106:
# These meetings used the old 'conflic?' constraint types labeled as though
# they were the new types.
slugs = ('chair_conflict', 'tech_overlap', 'key_participant')
else:
slugs = ('conflict', 'conflic2', 'conflic3')
return ConstraintName.objects.filter(slug__in=slugs)
def base_url(self):
return "/meeting/%s" % (self.number, )
def build_timeslices(self):
days = [] # the days of the meetings
time_slices = {} # the times on each day
slots = {}
for ts in self.timeslot_set.all():
if ts.location_id is None:
continue
ymd = ts.time.date()
if ymd not in time_slices:
time_slices[ymd] = []
slots[ymd] = []
days.append(ymd)
if ymd in time_slices:
# only keep unique entries
if [ts.time, ts.time + ts.duration, ts.duration.seconds] not in time_slices[ymd]:
time_slices[ymd].append([ts.time, ts.time + ts.duration, ts.duration.seconds])
slots[ymd].append(ts)
days.sort()
for ymd in time_slices:
# Make sure these sort the same way
time_slices[ymd].sort()
slots[ymd].sort(key=lambda x: (x.time, x.duration))
return days,time_slices,slots
# this functions makes a list of timeslices and rooms, and
# makes sure that all schedules have all of them.
# def create_all_timeslots(self):
# alltimeslots = self.timeslot_set.all()
# for sched in self.schedule_set.all():
# ts_hash = {}
# for ss in sched.assignments.all():
# ts_hash[ss.timeslot] = ss
# for ts in alltimeslots:
# if not (ts in ts_hash):
# SchedTimeSessAssignment.objects.create(schedule = sched,
# timeslot = ts)
def vtimezone(self):
if self.time_zone:
try:
tzfn = os.path.join(settings.TZDATA_ICS_PATH, self.time_zone + ".ics")
if os.path.exists(tzfn):
with io.open(tzfn) as tzf:
icstext = tzf.read()
vtimezone = re.search("(?sm)(\nBEGIN:VTIMEZONE.*\nEND:VTIMEZONE\n)", icstext).group(1).strip()
if vtimezone:
vtimezone += "\n"
return vtimezone
except IOError:
pass
return ''
def set_official_schedule(self, schedule):
if self.schedule != schedule:
self.schedule = schedule
self.save()
def updated(self):
min_time = datetime.datetime(1970, 1, 1, 0, 0, 0) # should be Meeting.modified, but we don't have that
timeslots_updated = self.timeslot_set.aggregate(Max('modified'))["modified__max"] or min_time
sessions_updated = self.session_set.aggregate(Max('modified'))["modified__max"] or min_time
assignments_updated = min_time
if self.schedule:
assignments_updated = SchedTimeSessAssignment.objects.filter(schedule__in=[self.schedule, self.schedule.base if self.schedule else None]).aggregate(Max('modified'))["modified__max"] or min_time
ts = max(timeslots_updated, sessions_updated, assignments_updated)
tz = pytz.timezone(settings.PRODUCTION_TIMEZONE)
ts = tz.localize(ts)
return ts
@memoize
def previous_meeting(self):
return Meeting.objects.filter(type_id=self.type_id,date__lt=self.date).order_by('-date').first()
class Meta:
ordering = ["-date", "-id"]
indexes = [
models.Index(fields=['-date', '-id']),
]
class ResourceAssociation(models.Model):
name = ForeignKey(RoomResourceName)
icon = models.CharField(max_length=64)
desc = models.CharField(max_length=256)
def __str__(self):
return self.desc
class Room(models.Model):
meeting = ForeignKey(Meeting)
modified = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=255)
functional_name = models.CharField(max_length=255, blank = True)
capacity = models.IntegerField(null=True, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank = True)
session_types = models.ManyToManyField(TimeSlotTypeName, blank = True)
floorplan = ForeignKey('FloorPlan', null=True, blank=True, default=None)
x1 = models.SmallIntegerField(null=True, blank=True, default=None)
y1 = models.SmallIntegerField(null=True, blank=True, default=None)
x2 = models.SmallIntegerField(null=True, blank=True, default=None)
y2 = models.SmallIntegerField(null=True, blank=True, default=None)
def __str__(self):
return u"%s size: %s" % (self.name, self.capacity)
def delete_timeslots(self):
for ts in self.timeslot_set.all():
ts.sessionassignments.all().delete()
ts.delete()
def create_timeslots(self):
days, time_slices, slots = self.meeting.build_timeslices()
for day in days:
for ts in slots[day]:
TimeSlot.objects.create(type_id=ts.type_id,
meeting=self.meeting,
name=ts.name,
time=ts.time,
location=self,
duration=ts.duration)
def dom_id(self):
return "room%u" % (self.pk)
def floorplan_url(self):
mtg_num = self.meeting.get_number()
if not mtg_num:
return None
elif mtg_num <= settings.FLOORPLAN_LAST_LEGACY_MEETING:
base_url = settings.FLOORPLAN_LEGACY_BASE_URL.format(meeting=self.meeting)
elif self.floorplan:
base_url = urlreverse('ietf.meeting.views.floor_plan', kwargs=dict(num=mtg_num))
else:
return None
return f'{base_url}?room={xslugify(self.name)}'
def left(self):
return min(self.x1, self.x2) if (self.x1 and self.x2) else 0
def top(self):
return min(self.y1, self.y2) if (self.y1 and self.y2) else 0
def right(self):
return max(self.x1, self.x2) if (self.x1 and self.x2) else 0
def bottom(self):
return max(self.y1, self.y2) if (self.y1 and self.y2) else 0
def functional_display_name(self):
if not self.functional_name:
return ""
if 'breakout' in self.functional_name.lower():
return ""
if self.functional_name[0].isdigit():
return ""
return self.functional_name
def audio_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id == 'audiostream']
return urlresources[0].url if urlresources else None
def video_stream_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho']]
return urlresources[0].url if urlresources else None
def onsite_tool_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['meetecho_onsite']]
return urlresources[0].url if urlresources else None
def webex_url(self):
urlresources = [ur for ur in self.urlresource_set.all() if ur.name_id in ['webex']]
return urlresources[0].url if urlresources else None
class Meta:
ordering = ["-id"]
class UrlResource(models.Model):
name = ForeignKey(RoomResourceName)
room = ForeignKey(Room)
url = models.URLField(null=True, blank=True)
def floorplan_path(instance, filename):
root, ext = os.path.splitext(filename)
return "%s/floorplan-%s-%s%s" % (settings.FLOORPLAN_MEDIA_DIR, instance.meeting.number, xslugify(instance.name), ext)
class FloorPlan(models.Model):
name = models.CharField(max_length=255)
short = models.CharField(max_length=3, default='')
modified= models.DateTimeField(auto_now=True)
meeting = ForeignKey(Meeting)
order = models.SmallIntegerField()
image = models.ImageField(storage=NoLocationMigrationFileSystemStorage(), upload_to=floorplan_path, blank=True, default=None)
class Meta:
ordering = ['-id',]
def __str__(self):
return u'floorplan-%s-%s' % (self.meeting.number, xslugify(self.name))
class TimeSlot(models.Model):
meeting = ForeignKey(Meeting)
type = ForeignKey(TimeSlotTypeName)
name = models.CharField(max_length=255)
time = models.DateTimeField()
duration = models.DurationField(default=datetime.timedelta(0))
location = ForeignKey(Room, blank=True, null=True)
show_location = models.BooleanField(default=True, help_text="Show location in agenda.")
sessions = models.ManyToManyField('Session', related_name='slots', through='SchedTimeSessAssignment', blank=True, help_text="Scheduled session, if any.")
modified = models.DateTimeField(auto_now=True)
@property
def session(self):
if not hasattr(self, "_session_cache"):
self._session_cache = self.sessions.filter(timeslotassignments__schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting else None]).first()
return self._session_cache
@property
def time_desc(self):
return "%s-%s" % (self.time.strftime("%H%M"), (self.time + self.duration).strftime("%H%M"))
def meeting_date(self):
return self.time.date()
def registration(self):
if not hasattr(self, '_reg_info'):
try:
self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg")
except TimeSlot.DoesNotExist:
self._reg_info = None
return self._reg_info
def __str__(self):
location = self.get_location()
if not location:
location = u"(no location)"
return u"%s: %s-%s %s, %s" % (self.meeting.number, self.time.strftime("%m-%d %H:%M"), (self.time + self.duration).strftime("%H:%M"), self.name, location)
def end_time(self):
return self.time + self.duration
def get_hidden_location(self):
if not hasattr(self, '_cached_hidden_location'):
location = self.location
if location:
location = location.name
elif self.type_id == "reg":
location = self.meeting.reg_area
elif self.type_id == "break":
location = self.meeting.break_area
self._cached_hidden_location = location
return self._cached_hidden_location
def get_location(self):
return self.get_hidden_location() if self.show_location else ""
def get_functional_location(self):
name_parts = []
room = self.location
if room and room.functional_name:
name_parts.append(room.functional_name)
location = self.get_hidden_location()
if location:
name_parts.append(location)
return ' - '.join(name_parts)
def get_html_location(self):
if not hasattr(self, '_cached_html_location'):
self._cached_html_location = self.get_location()
if len(self._cached_html_location) > 8:
self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/<wbr>'))
else:
self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', ' '))
return self._cached_html_location
def tz(self):
if not hasattr(self, '_cached_tz'):
if self.meeting.time_zone:
self._cached_tz = pytz.timezone(self.meeting.time_zone)
else:
self._cached_tz = None
return self._cached_tz
def tzname(self):
if self.tz():
return self.tz().tzname(self.time)
else:
return ""
def utc_start_time(self):
if self.tz():
local_start_time = self.tz().localize(self.time)
return local_start_time.astimezone(pytz.utc)
else:
return None
def utc_end_time(self):
utc_start = self.utc_start_time()
return None if utc_start is None else utc_start + self.duration
def local_start_time(self):
if self.tz():
return self.tz().localize(self.time)
else:
return None
def local_end_time(self):
local_start = self.local_start_time()
return None if local_start is None else local_start + self.duration
@property
def js_identifier(self):
# also must match:
# {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}}
dom_id="ts%u" % (self.pk)
if self.location is not None:
dom_id = self.location.dom_id()
return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M'))
def delete_concurrent_timeslots(self):
# can not include duration in filter, because there is no support
# for having it a WHERE clause.
# below will delete self as well.
for ts in self.meeting.timeslot_set.filter(time=self.time).all():
if ts.duration!=self.duration:
continue
# now remove any schedule that might have been made to this
# timeslot.
ts.sessionassignments.all().delete()
ts.delete()
@property
def slot_to_the_right(self):
return self.meeting.timeslot_set.filter(
location = self.location, # same room!
type = self.type, # must be same type (usually session)
time__gt = self.time + self.duration, # must be after this session
time__lt = self.time + self.duration + datetime.timedelta(seconds=11*60)).first()
class Meta:
ordering = ["-time", "-id"]
indexes = [
models.Index(fields=['-time', '-id']),
]
# end of TimeSlot
class Schedule(models.Model):
meeting = ForeignKey(Meeting, null=True, related_name='schedule_set')
name = models.CharField(max_length=64, blank=False, help_text="Letters, numbers and -:_ allowed.", validators=[RegexValidator(r'^[A-Za-z0-9-:_]*$')])
owner = ForeignKey(Person)
visible = models.BooleanField("Show in agenda list", default=True, help_text="Show in the list of possible agendas for the meeting.")
public = models.BooleanField(default=True, help_text="Allow others to see this agenda.")
badness = models.IntegerField(null=True, blank=True)
notes = models.TextField(blank=True)
origin = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL, related_name="+")
base = ForeignKey('Schedule', blank=True, null=True, on_delete=models.SET_NULL,
help_text="Sessions scheduled in the base schedule show up in this schedule too.", related_name="derivedschedule_set",
limit_choices_to={'base': None}) # prevent the inheritance from being more than one layer deep (no recursion)
def __str__(self):
return u"%s:%s(%s)" % (self.meeting, self.name, self.owner)
def base_url(self):
return "/meeting/%s/agenda/%s/%s" % (self.meeting.number, self.owner_email(), self.name)
# temporary property to pacify the places where Schedule.assignments is used
# @property
# def schedtimesessassignment_set(self):
# return self.assignments
#
# def url_edit(self):
# return "/meeting/%s/agenda/%s/edit" % (self.meeting.number, self.name)
#
# @property
# def relurl_edit(self):
# return self.url_edit("")
def owner_email(self):
return self.owner.email_address() or "noemail"
@property
def is_official(self):
return (self.meeting.schedule == self)
@property
def is_official_record(self):
return (self.is_official and
self.meeting.end_date() <= datetime.date.today() )
# returns a dictionary {group -> [schedtimesessassignment+]}
# and it has [] if the session is not placed.
# if there is more than one session for that group,
# then a list of them is returned (always a list)
@property
def official_token(self):
if self.is_official:
return "official"
else:
return "unofficial"
def delete_assignments(self):
self.assignments.all().delete()
@property
def qs_assignments_with_sessions(self):
return self.assignments.filter(session__isnull=False)
def qs_timeslots_in_use(self):
return TimeSlot.objects.filter(sessionassignments__schedule=self)
def qs_sessions_scheduled(self):
return Session.objects.filter(timeslotassignments__schedule=self)
def delete_schedule(self):
self.assignments.all().delete()
self.delete()
# to be renamed SchedTimeSessAssignments (stsa)
class SchedTimeSessAssignment(models.Model):
timeslot = ForeignKey('TimeSlot', null=False, blank=False, related_name='sessionassignments')
session = ForeignKey('Session', null=True, default=None, related_name='timeslotassignments', help_text="Scheduled session.")
schedule = ForeignKey('Schedule', null=False, blank=False, related_name='assignments')
extendedfrom = ForeignKey('self', null=True, default=None, help_text="Timeslot this session is an extension of.")
modified = models.DateTimeField(auto_now=True)
notes = models.TextField(blank=True)
badness = models.IntegerField(default=0, blank=True, null=True)
pinned = models.BooleanField(default=False, help_text="Do not move session during automatic placement.")
class Meta:
ordering = ["timeslot__time", "timeslot__type__slug", "session__group__parent__name", "session__group__acronym", "session__name", ]
def __str__(self):
return u"%s [%s<->%s]" % (self.schedule, self.session, self.timeslot)
@property
def room_name(self):
return self.timeslot.location.name if self.timeslot and self.timeslot.location else None
@property
def acronym(self):
if self.session and self.session.group:
return self.session.group.acronym
@property
def slot_to_the_right(self):
s = self.timeslot.slot_to_the_right
if s:
return self.schedule.assignments.filter(timeslot=s).first()
else:
return None
def meeting(self):
return self.session.meeting
def slot_type(self):
return self.timeslot.type
def slug(self):
components = []
components.append(self.schedule.meeting.number)
if not self.timeslot:
components.append("unknown")
if not self.session or not (getattr(self.session, "historic_group", None) or self.session.group):
components.append("unknown")
else:
components.append(self.timeslot.time.strftime("%Y-%m-%d-%a-%H%M"))
g = getattr(self.session, "historic_group", None) or self.session.group
if self.timeslot.type.slug in ('break', 'reg', 'other'):
components.append(g.acronym)
components.append(slugify(self.session.name))
if self.timeslot.type.slug in ('regular', 'plenary'):
if self.timeslot.type.slug == "plenary":
components.append("1plenary")
else:
p = getattr(g, "historic_parent", None) or g.parent
if p and p.type_id in ("area", "irtf", 'ietf'):
components.append(p.acronym)
components.append(g.acronym)
return "-".join(components).lower()
class BusinessConstraint(models.Model):
slug = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=255)
penalty = models.IntegerField(default=0, help_text="The penalty for violating this kind of constraint; for instance 10 (small penalty) or 10000 (large penalty)")
class Constraint(models.Model):
TIME_RELATION_CHOICES = (
('subsequent-days', 'Schedule the sessions on subsequent days'),
('one-day-seperation', 'Leave at least one free day in between the two sessions'),
)
meeting = ForeignKey(Meeting)
source = ForeignKey(Group, related_name="constraint_source_set")
target = ForeignKey(Group, related_name="constraint_target_set", null=True)
person = ForeignKey(Person, null=True, blank=True)
name = ForeignKey(ConstraintName)
time_relation = models.CharField(max_length=200, choices=TIME_RELATION_CHOICES, blank=True)
timeranges = models.ManyToManyField(TimerangeName)
active_status = None
def __str__(self):
return u"%s %s target=%s person=%s" % (self.source, self.name.name.lower(), self.target, self.person)
def brief_display(self):
if self.name.slug == "wg_adjacent":
return "Adjacent with %s" % self.target.acronym
elif self.name.slug == "time_relation":
return self.get_time_relation_display()
elif self.name.slug == "timerange":
timeranges_str = ", ".join([t.desc for t in self.timeranges.all()])
return "Can't meet %s" % timeranges_str
elif self.target and self.person:
return "%s ; %s" % (self.target.acronym, self.person)
elif self.target and not self.person:
return "%s " % (self.target.acronym)
elif not self.target and self.person:
return "%s " % (self.person)
class SessionPresentation(models.Model):
session = ForeignKey('Session')
document = ForeignKey(Document)
rev = models.CharField(verbose_name="revision", max_length=16, null=True, blank=True)
order = models.PositiveSmallIntegerField(default=0)
class Meta:
db_table = 'meeting_session_materials'
ordering = ('order',)
unique_together = (('session', 'document'),)
def __str__(self):
return u"%s -> %s-%s" % (self.session, self.document.name, self.rev)
constraint_cache_uses = 0
constraint_cache_initials = 0
class SessionQuerySet(models.QuerySet):
def with_current_status(self):
return self.annotate(
# coalesce with '' to avoid nulls which give funny
# results, e.g. .exclude(current_status='canceled') also
# skips rows with null in them
current_status=Coalesce(
Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'-time', '-id'
).values('status')[:1]),
Value(''),
output_field=TextField()),
)
def with_requested_by(self):
return self.annotate(
requested_by=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('by')[:1]),
)
def with_requested_time(self):
return self.annotate(
requested_time=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('time')[:1]),
)
def not_canceled(self):
return self.with_current_status().exclude(current_status__in=Session.CANCELED_STATUSES)
def not_deleted(self):
return self.with_current_status().exclude(current_status='deleted')
def that_can_meet(self):
return self.with_current_status().exclude(
current_status__in=['notmeet', 'disappr', 'deleted', 'apprw']
).filter(
type__slug='regular'
)
def requests(self):
return self.exclude(
type__in=('offagenda', 'reserved', 'unavail')
)
class Session(models.Model):
objects = SessionQuerySet.as_manager() # sets default query manager
meeting = ForeignKey(Meeting)
name = models.CharField(blank=True, max_length=255, help_text="Name of session, in case the session has a purpose rather than just being a group meeting.")
short = models.CharField(blank=True, max_length=32, help_text="Short version of 'name' above, for use in filenames.")
purpose = ForeignKey(SessionPurposeName, null=False, help_text='Purpose of the session')
type = ForeignKey(TimeSlotTypeName)
group = ForeignKey(Group) # The group type historically determined the session type. BOFs also need to be added as a group. Note that not all meeting requests have a natural group to associate with.
joint_with_groups = models.ManyToManyField(Group, related_name='sessions_joint_in',blank=True)
attendees = models.IntegerField(null=True, blank=True)
agenda_note = models.CharField(blank=True, max_length=255)
requested_duration = models.DurationField(default=datetime.timedelta(0))
comments = models.TextField(blank=True)
scheduled = models.DateTimeField(null=True, blank=True)
modified = models.DateTimeField(auto_now=True)
remote_instructions = models.CharField(blank=True,max_length=1024)
on_agenda = models.BooleanField(default=True, help_text='Is this session visible on the meeting agenda?')
tombstone_for = models.ForeignKey('Session', blank=True, null=True, help_text="This session is the tombstone for a session that was rescheduled", on_delete=models.CASCADE)
materials = models.ManyToManyField(Document, through=SessionPresentation, blank=True)
resources = models.ManyToManyField(ResourceAssociation, blank=True)
unique_constraints_dict = None
CANCELED_STATUSES = ['canceled', 'canceledpa']
# Should work on how materials are captured so that deleted things are no longer associated with the session
# (We can keep the information about something being added to and removed from a session in the document's history)
def get_material(self, material_type, only_one):
if hasattr(self, "prefetched_active_materials"):
l = [d for d in self.prefetched_active_materials if d.type_id == material_type]
for d in l:
d.meeting_related = lambda: True
else:
l = self.materials.filter(type=material_type).exclude(states__type=material_type, states__slug='deleted').order_by('sessionpresentation__order')
if only_one:
if l:
return l[0]
else:
return None
else:
return l
def agenda(self):
if not hasattr(self, "_agenda_cache"):
self._agenda_cache = self.get_material("agenda", only_one=True)
return self._agenda_cache
def minutes(self):
if not hasattr(self, '_cached_minutes'):
self._cached_minutes = self.get_material("minutes", only_one=True)
return self._cached_minutes
def recordings(self):
return list(self.get_material("recording", only_one=False))
def bluesheets(self):
return list(self.get_material("bluesheets", only_one=False))
def slides(self):
if not hasattr(self, "_slides_cache"):
self._slides_cache = list(self.get_material("slides", only_one=False))
return self._slides_cache
def drafts(self):
return list(self.materials.filter(type='draft'))
# The utilities below are used in the proceedings and materials
# templates, and should be moved there - then we could also query
# out the needed information in a few passes and speed up those
# pages.
def all_meeting_sessions_for_group(self):
from ietf.meeting.utils import add_event_info_to_session_qs
if self.group.features.has_meetings:
if not hasattr(self, "_all_meeting_sessions_for_group_cache"):
sessions = [s for s in add_event_info_to_session_qs(self.meeting.session_set.filter(group=self.group,type=self.type)) if s.official_timeslotassignment()]
self._all_meeting_sessions_for_group_cache = sorted(sessions, key = lambda x: x.official_timeslotassignment().timeslot.time)
return self._all_meeting_sessions_for_group_cache
else:
return [self]
def order_in_meeting(self):
if not hasattr(self, '_order_in_meeting'):
session_list = self.all_meeting_sessions_for_group()
self._order_in_meeting = session_list.index(self) + 1 if self in session_list else 0
return self._order_in_meeting
def all_meeting_sessions_cancelled(self):
return set(s.current_status for s in self.all_meeting_sessions_for_group()) == {'canceled'}
def all_meeting_recordings(self):
recordings = [] # These are not sets because we need to preserve relative ordering or redo the ordering work later
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
recordings.extend([r for r in session.recordings() if r not in recordings])
return recordings
def all_meeting_bluesheets(self):
bluesheets = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
bluesheets.extend([b for b in session.bluesheets() if b not in bluesheets])
return bluesheets
def all_meeting_drafts(self):
drafts = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
drafts.extend([d for d in session.drafts() if d not in drafts])
return drafts
def all_meeting_agendas(self):
agendas = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
agenda = session.agenda()
if agenda and agenda not in agendas:
agendas.append(agenda)
return agendas
def all_meeting_slides(self):
slides = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
slides.extend([s for s in session.slides() if s not in slides])
return slides
def all_meeting_minutes(self):
minutes = []
sessions = self.all_meeting_sessions_for_group()
for session in sessions:
minutes_doc = session.minutes()
if minutes_doc and minutes_doc not in minutes:
minutes.append(minutes_doc)
return minutes
def can_manage_materials(self, user):
return can_manage_materials(user,self.group)
def is_material_submission_cutoff(self):
return datetime.date.today() > self.meeting.get_submission_correction_date()
def joint_with_groups_acronyms(self):
return [group.acronym for group in self.joint_with_groups.all()]
def __str__(self):
if self.meeting.type_id == "interim":
return self.meeting.number
status_id = None
if hasattr(self, 'current_status'):
status_id = self.current_status
elif self.pk is not None:
latest_event = SchedulingEvent.objects.filter(session=self.pk).order_by('-time', '-id').first()
if latest_event:
status_id = latest_event.status_id
if status_id in ('canceled','disappr','notmeet','deleted'):
ss0name = "(%s)" % SessionStatusName.objects.get(slug=status_id).name
else:
ss0name = "(unscheduled)"
ss = self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).order_by('timeslot__time')
if ss:
ss0name = ','.join(x.timeslot.time.strftime("%a-%H%M") for x in ss)
return "%s: %s %s %s" % (self.meeting, self.group.acronym, self.name, ss0name)
@property
def short_name(self):
if self.name:
return self.name
if self.short:
return self.short
if self.group:
return self.group.acronym
return "req
@property
def special_request_token(self):
if self.comments is not None and len(self.comments)>0:
return "*"
else:
return ""
def docname_token(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
index = list(sess_mtg).index(self)
return 'sess%s' % (string.ascii_lowercase[index])
def docname_token_only_for_multiple(self):
sess_mtg = Session.objects.filter(meeting=self.meeting, group=self.group).order_by('pk')
if len(list(sess_mtg)) > 1:
index = list(sess_mtg).index(self)
if index < 26:
token = 'sess%s' % (string.ascii_lowercase[index])
else:
token = 'sess%s%s' % (string.ascii_lowercase[index//26],string.ascii_lowercase[index%26])
return token
return None
def constraints(self):
return Constraint.objects.filter(source=self.group, meeting=self.meeting).order_by('name__name', 'target__acronym', 'person__name').prefetch_related("source","target","person")
def reverse_constraints(self):
return Constraint.objects.filter(target=self.group, meeting=self.meeting).order_by('name__name')
def official_timeslotassignment(self):
return self.timeslotassignments.filter(schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting.schedule else None]).first()
@property
def people_constraints(self):
return self.group.constraint_source_set.filter(meeting=self.meeting, name='bethere')
def agenda_text(self):
doc = self.agenda()
if doc:
path = os.path.join(settings.AGENDA_PATH, self.meeting.number, "agenda", doc.uploaded_filename)
if os.path.exists(path):
with io.open(path) as f:
return f.read()
else:
return "No agenda file found"
else:
return "The agenda has not been uploaded yet."
def agenda_file(self):
if not hasattr(self, '_agenda_file'):
self._agenda_file = ""
agenda = self.agenda()
if not agenda:
return ""
# FIXME: uploaded_filename should be replaced with a function that computes filenames when they are of a fixed schema and not uploaded names
self._agenda_file = "%s/agenda/%s" % (self.meeting.number, agenda.uploaded_filename)
return self._agenda_file
def jabber_room_name(self):
if self.type_id=='plenary':
return 'plenary'
elif self.historic_group:
return self.historic_group.acronym
else:
return self.group.acronym
def notes_id(self):
note_id_fragment = 'plenary' if self.type.slug == 'plenary' else self.group.acronym
return f'notes-ietf-{self.meeting.number}-{note_id_fragment}'
def notes_url(self):
return urljoin(settings.IETF_NOTES_URL, self.notes_id())
class SchedulingEvent(models.Model):
session = ForeignKey(Session)
time = models.DateTimeField(default=datetime.datetime.now, help_text="When the event happened")
status = ForeignKey(SessionStatusName)
by = ForeignKey(Person)
def __str__(self):
return u'%s : %s : %s : %s' % (self.session, self.status, self.time, self.by)
class ImportantDate(models.Model):
meeting = ForeignKey(Meeting)
date = models.DateField()
name = ForeignKey(ImportantDateName)
class Meta:
ordering = ["-meeting_id","date", ]
def __str__(self):
return u'%s : %s : %s' % ( self.meeting, self.name, self.date )
class SlideSubmission(models.Model):
time = models.DateTimeField(auto_now=True)
session = ForeignKey(Session)
title = models.CharField(max_length=255)
filename = models.CharField(max_length=255)
apply_to_all = models.BooleanField(default=False)
submitter = ForeignKey(Person)
status = ForeignKey(SlideSubmissionStatusName, null=True, default='pending', on_delete=models.SET_NULL)
doc = ForeignKey(Document, null=True, on_delete=models.SET_NULL)
def staged_filepath(self):
return os.path.join(settings.SLIDE_STAGING_PATH , self.filename)
def staged_url(self):
return "".join([settings.SLIDE_STAGING_URL, self.filename])
class ProceedingsMaterial(models.Model):
meeting = ForeignKey(Meeting, related_name='proceedings_materials')
document = ForeignKey(
Document,
limit_choices_to=dict(type_id='procmaterials'),
unique=True,
)
type = ForeignKey(ProceedingsMaterialTypeName)
class Meta:
unique_together = (('meeting', 'type'),)
def __str__(self):
return self.document.title
def get_href(self):
return f'{self.document.get_href(self.meeting)}'
def active(self):
return self.document.get_state().slug == 'active'
def is_url(self):
return len(self.document.external_url) > 0
def _host_upload_path(instance : 'MeetingHost', filename):
num = instance.meeting.number
path = (
Path(num) / 'meetinghosts' / f'logo-{"".join(random.choices(string.ascii_lowercase, k=10))}'
).with_suffix(
Path(filename).suffix
)
return str(path)
class MeetingHost(models.Model):
meeting = ForeignKey(Meeting, related_name='meetinghosts')
name = models.CharField(max_length=255, blank=False)
logo = MissingOkImageField(
storage=NoLocationMigrationFileSystemStorage(location=settings.MEETINGHOST_LOGO_PATH),
upload_to=_host_upload_path,
width_field='logo_width',
height_field='logo_height',
blank=False,
validators=[
MaxImageSizeValidator(
settings.MEETINGHOST_LOGO_MAX_UPLOAD_WIDTH,
settings.MEETINGHOST_LOGO_MAX_UPLOAD_HEIGHT,
),
WrappedValidator(validate_file_size, True),
WrappedValidator(
validate_file_extension,
settings.MEETING_VALID_UPLOAD_EXTENSIONS['meetinghostlogo'],
),
WrappedValidator(
validate_mime_type,
settings.MEETING_VALID_UPLOAD_MIME_TYPES['meetinghostlogo'],
True,
),
],
)
# These are filled in by the ImageField allow retrieval of image dimensions
# without processing the image each time it's loaded.
logo_width = models.PositiveIntegerField(null=True)
logo_height = models.PositiveIntegerField(null=True)
class Meta:
unique_together = (('meeting', 'name'),)
ordering = ('pk',) | true | true |
f73eed7e2c3b541fcdd6322f243ee0ce268c9008 | 89 | py | Python | coaches/apps.py | keeks-mtl/go-tennis | af3f325a9cfb2faba4d935824492f4aea6d10309 | [
"W3C",
"PostgreSQL"
] | null | null | null | coaches/apps.py | keeks-mtl/go-tennis | af3f325a9cfb2faba4d935824492f4aea6d10309 | [
"W3C",
"PostgreSQL"
] | null | null | null | coaches/apps.py | keeks-mtl/go-tennis | af3f325a9cfb2faba4d935824492f4aea6d10309 | [
"W3C",
"PostgreSQL"
] | null | null | null | from django.apps import AppConfig
class CoachesConfig(AppConfig):
name = 'coaches'
| 14.833333 | 33 | 0.752809 | from django.apps import AppConfig
class CoachesConfig(AppConfig):
name = 'coaches'
| true | true |
f73eeebd09ea4fb83b7179f39a77f63e26e50787 | 4,743 | py | Python | tests/test_dask_image/test_ndfilters/test__conv.py | anlavandier/dask-image | a858c61ac5beb7de7d7644d7e85714b5c16c2a7a | [
"BSD-3-Clause"
] | 144 | 2018-03-02T16:52:10.000Z | 2022-03-29T11:41:06.000Z | tests/test_dask_image/test_ndfilters/test__conv.py | anlavandier/dask-image | a858c61ac5beb7de7d7644d7e85714b5c16c2a7a | [
"BSD-3-Clause"
] | 184 | 2018-03-02T19:43:26.000Z | 2022-03-31T10:20:05.000Z | tests/test_dask_image/test_ndfilters/test__conv.py | anlavandier/dask-image | a858c61ac5beb7de7d7644d7e85714b5c16c2a7a | [
"BSD-3-Clause"
] | 35 | 2018-03-02T16:37:48.000Z | 2021-12-10T20:47:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import scipy.ndimage
import dask.array as da
import dask_image.ndfilters
@pytest.mark.parametrize(
"da_func",
[
(dask_image.ndfilters.convolve),
(dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"err_type, weights, origin",
[
(ValueError, np.ones((1,)), 0),
(ValueError, np.ones((1, 0)), 0),
(RuntimeError, np.ones((1, 1)), (0,)),
(RuntimeError, np.ones((1, 1)), [(0,)]),
(ValueError, np.ones((1, 1)), 1),
(TypeError, np.ones((1, 1)), 0.0),
(TypeError, np.ones((1, 1)), (0.0, 0.0)),
(TypeError, np.ones((1, 1)), 1+0j),
(TypeError, np.ones((1, 1)), (0+0j, 1+0j)),
]
)
def test_convolutions_params(da_func,
err_type,
weights,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
with pytest.raises(err_type):
da_func(d,
weights,
origin=origin)
@pytest.mark.parametrize(
"da_func",
[
dask_image.ndfilters.convolve,
dask_image.ndfilters.correlate,
]
)
def test_convolutions_shape_type(da_func):
weights = np.ones((1, 1))
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
assert all([(type(s) is int) for s in d.shape])
d2 = da_func(d, weights)
assert all([(type(s) is int) for s in d2.shape])
@pytest.mark.parametrize(
"da_func",
[
dask_image.ndfilters.convolve,
dask_image.ndfilters.correlate,
]
)
def test_convolutions_comprehensions(da_func):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
weights = np.ones((1, 1))
l2s = [da_func(d[i], weights) for i in range(len(d))]
l2c = [da_func(d[i], weights)[None] for i in range(len(d))]
da.utils.assert_eq(np.stack(l2s), da.stack(l2s))
da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
@pytest.mark.parametrize(
"sp_func, da_func",
[
(scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),
(scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"weights",
[
np.ones((1, 1)),
]
)
def test_convolutions_identity(sp_func,
da_func,
weights):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
da.utils.assert_eq(
d, da_func(d, weights)
)
da.utils.assert_eq(
sp_func(a, weights),
da_func(d, weights)
)
@pytest.mark.parametrize(
"sp_func, da_func",
[
(scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),
(scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"weights, origin",
[
(np.ones((2, 2)), 0),
(np.ones((2, 3)), 0),
(np.ones((2, 3)), (0, 1)),
(np.ones((2, 3)), (0, -1)),
((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),
((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),
((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),
(np.ones((5, 5)), 0),
(np.ones((7, 7)), 0),
(np.ones((8, 8)), 0),
(np.ones((10, 10)), 0),
(np.ones((5, 5)), 2),
(np.ones((5, 5)), -2),
]
)
def test_convolutions_compare(sp_func,
da_func,
weights,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
da.utils.assert_eq(
sp_func(
a, weights, origin=origin
),
da_func(
d, weights, origin=origin
)
)
@pytest.mark.parametrize(
"sp_func, da_func",
[
(scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),
(scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"weights",
[
np.ones((1,5)),
np.ones((5,1)),
]
)
@pytest.mark.parametrize(
"mode",
["reflect","wrap","nearest","constant","mirror"])
def test_convolutions_modes(sp_func,
da_func,
weights,
mode):
a = np.arange(140).reshape(10,14)
d = da.from_array(a,chunks =(5, 7))
da.utils.assert_eq(
sp_func(
a, weights, mode = mode
),
da_func(
d, weights, mode = mode
)
) | 25.228723 | 74 | 0.520557 |
import pytest
import numpy as np
import scipy.ndimage
import dask.array as da
import dask_image.ndfilters
@pytest.mark.parametrize(
"da_func",
[
(dask_image.ndfilters.convolve),
(dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"err_type, weights, origin",
[
(ValueError, np.ones((1,)), 0),
(ValueError, np.ones((1, 0)), 0),
(RuntimeError, np.ones((1, 1)), (0,)),
(RuntimeError, np.ones((1, 1)), [(0,)]),
(ValueError, np.ones((1, 1)), 1),
(TypeError, np.ones((1, 1)), 0.0),
(TypeError, np.ones((1, 1)), (0.0, 0.0)),
(TypeError, np.ones((1, 1)), 1+0j),
(TypeError, np.ones((1, 1)), (0+0j, 1+0j)),
]
)
def test_convolutions_params(da_func,
err_type,
weights,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
with pytest.raises(err_type):
da_func(d,
weights,
origin=origin)
@pytest.mark.parametrize(
"da_func",
[
dask_image.ndfilters.convolve,
dask_image.ndfilters.correlate,
]
)
def test_convolutions_shape_type(da_func):
weights = np.ones((1, 1))
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
assert all([(type(s) is int) for s in d.shape])
d2 = da_func(d, weights)
assert all([(type(s) is int) for s in d2.shape])
@pytest.mark.parametrize(
"da_func",
[
dask_image.ndfilters.convolve,
dask_image.ndfilters.correlate,
]
)
def test_convolutions_comprehensions(da_func):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
weights = np.ones((1, 1))
l2s = [da_func(d[i], weights) for i in range(len(d))]
l2c = [da_func(d[i], weights)[None] for i in range(len(d))]
da.utils.assert_eq(np.stack(l2s), da.stack(l2s))
da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
@pytest.mark.parametrize(
"sp_func, da_func",
[
(scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),
(scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"weights",
[
np.ones((1, 1)),
]
)
def test_convolutions_identity(sp_func,
da_func,
weights):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
da.utils.assert_eq(
d, da_func(d, weights)
)
da.utils.assert_eq(
sp_func(a, weights),
da_func(d, weights)
)
@pytest.mark.parametrize(
"sp_func, da_func",
[
(scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),
(scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"weights, origin",
[
(np.ones((2, 2)), 0),
(np.ones((2, 3)), 0),
(np.ones((2, 3)), (0, 1)),
(np.ones((2, 3)), (0, -1)),
((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),
((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),
((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),
(np.ones((5, 5)), 0),
(np.ones((7, 7)), 0),
(np.ones((8, 8)), 0),
(np.ones((10, 10)), 0),
(np.ones((5, 5)), 2),
(np.ones((5, 5)), -2),
]
)
def test_convolutions_compare(sp_func,
da_func,
weights,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
da.utils.assert_eq(
sp_func(
a, weights, origin=origin
),
da_func(
d, weights, origin=origin
)
)
@pytest.mark.parametrize(
"sp_func, da_func",
[
(scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),
(scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),
]
)
@pytest.mark.parametrize(
"weights",
[
np.ones((1,5)),
np.ones((5,1)),
]
)
@pytest.mark.parametrize(
"mode",
["reflect","wrap","nearest","constant","mirror"])
def test_convolutions_modes(sp_func,
da_func,
weights,
mode):
a = np.arange(140).reshape(10,14)
d = da.from_array(a,chunks =(5, 7))
da.utils.assert_eq(
sp_func(
a, weights, mode = mode
),
da_func(
d, weights, mode = mode
)
) | true | true |
f73eeed44ee1612072a37709a299d44b2467fc68 | 134 | py | Python | fedot/core/utils.py | alievilya/nas-fedot | ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea | [
"BSD-3-Clause"
] | 13 | 2020-07-14T10:52:40.000Z | 2022-03-31T13:01:47.000Z | fedot/core/utils.py | alievilya/nas-fedot | ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea | [
"BSD-3-Clause"
] | null | null | null | fedot/core/utils.py | alievilya/nas-fedot | ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea | [
"BSD-3-Clause"
] | 5 | 2020-08-10T09:43:22.000Z | 2022-03-22T08:28:08.000Z | from pathlib import Path
def project_root() -> Path:
"""Returns project root folder."""
return Path(__file__).parent.parent
| 19.142857 | 39 | 0.701493 | from pathlib import Path
def project_root() -> Path:
return Path(__file__).parent.parent
| true | true |
f73ef0e03b53552f5c2372166d7234b5de548074 | 404 | py | Python | onadata/apps/messaging/constants.py | childhelpline/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 1 | 2018-07-15T13:13:43.000Z | 2018-07-15T13:13:43.000Z | onadata/apps/messaging/constants.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 14 | 2018-07-10T12:48:46.000Z | 2022-03-11T23:24:51.000Z | onadata/apps/messaging/constants.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 5 | 2018-07-04T07:59:14.000Z | 2020-01-28T07:50:18.000Z | # -*- coding: utf-8 -*-
"""
Messaging constant variables.
"""
from __future__ import unicode_literals
from builtins import str as text
from django.utils.translation import ugettext as _
XFORM = text('xform')
PROJECT = text('project')
USER = text('user')
APP_LABEL_MAPPING = {
XFORM: 'logger',
PROJECT: 'logger',
USER: 'auth',
}
MESSAGE = 'message'
UNKNOWN_TARGET = _("Unknown target.")
| 16.833333 | 50 | 0.685644 |
from __future__ import unicode_literals
from builtins import str as text
from django.utils.translation import ugettext as _
XFORM = text('xform')
PROJECT = text('project')
USER = text('user')
APP_LABEL_MAPPING = {
XFORM: 'logger',
PROJECT: 'logger',
USER: 'auth',
}
MESSAGE = 'message'
UNKNOWN_TARGET = _("Unknown target.")
| true | true |
f73ef15a6862806cc0d1f25594d5afbcc3576211 | 32,638 | py | Python | venv/lib/python3.7/site-packages/scapy/layers/tls/automaton_cli.py | nicholasadamou/python-proxy | 5be02cf0a20c243bd619808e11a1fa6b26bf7998 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/scapy/layers/tls/automaton_cli.py | nicholasadamou/python-proxy | 5be02cf0a20c243bd619808e11a1fa6b26bf7998 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/scapy/layers/tls/automaton_cli.py | nicholasadamou/python-proxy | 5be02cf0a20c243bd619808e11a1fa6b26bf7998 | [
"MIT"
] | null | null | null | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS client automaton. This makes for a primitive TLS stack.
Obviously you need rights for network access.
We support versions SSLv2 to TLS 1.2, along with many features.
There is no session resumption mechanism for now.
In order to run a client to tcp/50000 with one cipher suite of your choice:
> from scapy.all import *
> ch = TLSClientHello(ciphers=<int code of the cipher suite>)
> t = TLSClientAutomaton(dport=50000, client_hello=ch)
> t.run()
"""
from __future__ import print_function
import socket
from scapy.pton_ntop import inet_pton
from scapy.utils import randstring, repr_hex
from scapy.automaton import ATMT
from scapy.layers.tls.automaton import _TLSAutomaton
from scapy.layers.tls.basefields import _tls_version, _tls_version_options
from scapy.layers.tls.session import tlsSession
from scapy.layers.tls.extensions import TLS_Ext_SupportedGroups, \
TLS_Ext_SupportedVersions, TLS_Ext_SignatureAlgorithms
from scapy.layers.tls.handshake import TLSCertificate, TLSCertificateRequest, \
TLSCertificateVerify, TLSClientHello, TLSClientKeyExchange, \
TLSEncryptedExtensions, TLSFinished, TLSServerHello, TLSServerHelloDone, \
TLSServerKeyExchange, TLS13Certificate, TLS13ServerHello
from scapy.layers.tls.handshake_sslv2 import SSLv2ClientHello, \
SSLv2ServerHello, SSLv2ClientMasterKey, SSLv2ServerVerify, \
SSLv2ClientFinished, SSLv2ServerFinished, SSLv2ClientCertificate, \
SSLv2RequestCertificate
from scapy.layers.tls.keyexchange_tls13 import TLS_Ext_KeyShare_CH, \
KeyShareEntry
from scapy.layers.tls.record import TLSAlert, TLSChangeCipherSpec, \
TLSApplicationData
from scapy.modules import six
from scapy.packet import Raw
from scapy.compat import raw
class TLSClientAutomaton(_TLSAutomaton):
"""
A simple TLS test client automaton. Try to overload some states or
conditions and see what happens on the other side.
Rather than with an interruption, the best way to stop this client is by
typing 'quit'. This won't be a message sent to the server.
_'mycert' and 'mykey' may be provided as filenames. They will be used in
the handshake, should the server ask for client authentication.
_'server_name' does not need to be set.
_'client_hello' may hold a TLSClientHello or SSLv2ClientHello to be sent
to the server. This is particularly useful for extensions tweaking.
_'version' is a quicker way to advertise a protocol version ("sslv2",
"tls1", "tls12", etc.) It may be overridden by the previous 'client_hello'.
_'data' is a list of raw data to be sent to the server once the handshake
has been completed. Both 'stop_server' and 'quit' will work this way.
"""
def parse_args(self, server="127.0.0.1", dport=4433, server_name=None,
mycert=None, mykey=None,
client_hello=None, version=None,
data=None,
**kargs):
super(TLSClientAutomaton, self).parse_args(mycert=mycert,
mykey=mykey,
**kargs)
tmp = socket.getaddrinfo(server, dport)
self.remote_name = None
try:
if ':' in server:
inet_pton(socket.AF_INET6, server)
else:
inet_pton(socket.AF_INET, server)
except Exception:
self.remote_name = socket.getfqdn(server)
if self.remote_name != server:
tmp = socket.getaddrinfo(self.remote_name, dport)
if server_name:
self.remote_name = server_name
self.remote_family = tmp[0][0]
self.remote_ip = tmp[0][4][0]
self.remote_port = dport
self.local_ip = None
self.local_port = None
self.socket = None
self.client_hello = client_hello
self.advertised_tls_version = None
if version:
v = _tls_version_options.get(version, None)
if not v:
self.vprint("Unrecognized TLS version option.")
else:
self.advertised_tls_version = v
self.linebreak = False
if isinstance(data, bytes):
self.data_to_send = [data]
elif isinstance(data, six.string_types):
self.data_to_send = [raw(data)]
elif isinstance(data, list):
self.data_to_send = list(raw(d) for d in reversed(data))
else:
self.data_to_send = []
def vprint_sessioninfo(self):
if self.verbose:
s = self.cur_session
v = _tls_version[s.tls_version]
self.vprint("Version : %s" % v)
cs = s.wcs.ciphersuite.name
self.vprint("Cipher suite : %s" % cs)
if s.tls_version >= 0x0304:
ms = s.tls13_master_secret
else:
ms = s.master_secret
self.vprint("Master secret : %s" % repr_hex(ms))
if s.server_certs:
self.vprint("Server certificate chain: %r" % s.server_certs)
self.vprint()
@ATMT.state(initial=True)
def INITIAL(self):
self.vprint("Starting TLS client automaton.")
raise self.INIT_TLS_SESSION()
@ATMT.state()
def INIT_TLS_SESSION(self):
self.cur_session = tlsSession(connection_end="client")
self.cur_session.client_certs = self.mycert
self.cur_session.client_key = self.mykey
v = self.advertised_tls_version
if v:
self.cur_session.advertised_tls_version = v
else:
default_version = self.cur_session.advertised_tls_version
self.advertised_tls_version = default_version
raise self.CONNECT()
@ATMT.state()
def CONNECT(self):
s = socket.socket(self.remote_family, socket.SOCK_STREAM)
self.vprint()
self.vprint("Trying to connect on %s:%d" % (self.remote_ip,
self.remote_port))
s.connect((self.remote_ip, self.remote_port))
self.socket = s
self.local_ip, self.local_port = self.socket.getsockname()[:2]
self.vprint()
if self.cur_session.advertised_tls_version in [0x0200, 0x0002]:
raise self.SSLv2_PREPARE_CLIENTHELLO()
elif self.cur_session.advertised_tls_version >= 0x0304:
raise self.TLS13_START()
else:
raise self.PREPARE_CLIENTFLIGHT1()
# TLS handshake #
@ATMT.state()
def PREPARE_CLIENTFLIGHT1(self):
self.add_record()
@ATMT.condition(PREPARE_CLIENTFLIGHT1)
def should_add_ClientHello(self):
self.add_msg(self.client_hello or TLSClientHello())
raise self.ADDED_CLIENTHELLO()
@ATMT.state()
def ADDED_CLIENTHELLO(self):
pass
@ATMT.condition(ADDED_CLIENTHELLO)
def should_send_ClientFlight1(self):
self.flush_records()
raise self.SENT_CLIENTFLIGHT1()
@ATMT.state()
def SENT_CLIENTFLIGHT1(self):
raise self.WAITING_SERVERFLIGHT1()
@ATMT.state()
def WAITING_SERVERFLIGHT1(self):
self.get_next_msg()
raise self.RECEIVED_SERVERFLIGHT1()
@ATMT.state()
def RECEIVED_SERVERFLIGHT1(self):
pass
@ATMT.condition(RECEIVED_SERVERFLIGHT1, prio=1)
def should_handle_ServerHello(self):
"""
XXX We should check the ServerHello attributes for discrepancies with
our own ClientHello.
"""
self.raise_on_packet(TLSServerHello,
self.HANDLED_SERVERHELLO)
@ATMT.state()
def HANDLED_SERVERHELLO(self):
pass
@ATMT.condition(RECEIVED_SERVERFLIGHT1, prio=2)
def missing_ServerHello(self):
raise self.MISSING_SERVERHELLO()
@ATMT.state()
def MISSING_SERVERHELLO(self):
self.vprint("Missing TLS ServerHello message!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(HANDLED_SERVERHELLO, prio=1)
def should_handle_ServerCertificate(self):
if not self.cur_session.prcs.key_exchange.anonymous:
self.raise_on_packet(TLSCertificate,
self.HANDLED_SERVERCERTIFICATE)
raise self.HANDLED_SERVERCERTIFICATE()
@ATMT.state()
def HANDLED_SERVERCERTIFICATE(self):
pass
@ATMT.condition(HANDLED_SERVERHELLO, prio=2)
def missing_ServerCertificate(self):
raise self.MISSING_SERVERCERTIFICATE()
@ATMT.state()
def MISSING_SERVERCERTIFICATE(self):
self.vprint("Missing TLS Certificate message!")
raise self.CLOSE_NOTIFY()
@ATMT.state()
def HANDLED_CERTIFICATEREQUEST(self):
self.vprint("Server asked for a certificate...")
if not self.mykey or not self.mycert:
self.vprint("No client certificate to send!")
self.vprint("Will try and send an empty Certificate message...")
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=1)
def should_handle_ServerKeyExchange_from_ServerCertificate(self):
"""
XXX We should check the ServerKeyExchange attributes for discrepancies
with our own ClientHello, along with the ServerHello and Certificate.
"""
self.raise_on_packet(TLSServerKeyExchange,
self.HANDLED_SERVERKEYEXCHANGE)
@ATMT.state(final=True)
def MISSING_SERVERKEYEXCHANGE(self):
pass
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=2)
def missing_ServerKeyExchange(self):
if not self.cur_session.prcs.key_exchange.no_ske:
raise self.MISSING_SERVERKEYEXCHANGE()
@ATMT.state()
def HANDLED_SERVERKEYEXCHANGE(self):
pass
def should_handle_CertificateRequest(self):
"""
XXX We should check the CertificateRequest attributes for discrepancies
with the cipher suite, etc.
"""
self.raise_on_packet(TLSCertificateRequest,
self.HANDLED_CERTIFICATEREQUEST)
@ATMT.condition(HANDLED_SERVERKEYEXCHANGE, prio=2)
def should_handle_CertificateRequest_from_ServerKeyExchange(self):
self.should_handle_CertificateRequest()
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=3)
def should_handle_CertificateRequest_from_ServerCertificate(self):
self.should_handle_CertificateRequest()
def should_handle_ServerHelloDone(self):
self.raise_on_packet(TLSServerHelloDone,
self.HANDLED_SERVERHELLODONE)
@ATMT.condition(HANDLED_SERVERKEYEXCHANGE, prio=1)
def should_handle_ServerHelloDone_from_ServerKeyExchange(self):
return self.should_handle_ServerHelloDone()
@ATMT.condition(HANDLED_CERTIFICATEREQUEST, prio=4)
def should_handle_ServerHelloDone_from_CertificateRequest(self):
return self.should_handle_ServerHelloDone()
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=4)
def should_handle_ServerHelloDone_from_ServerCertificate(self):
return self.should_handle_ServerHelloDone()
@ATMT.state()
def HANDLED_SERVERHELLODONE(self):
raise self.PREPARE_CLIENTFLIGHT2()
@ATMT.state()
def PREPARE_CLIENTFLIGHT2(self):
self.add_record()
@ATMT.condition(PREPARE_CLIENTFLIGHT2, prio=1)
def should_add_ClientCertificate(self):
"""
If the server sent a CertificateRequest, we send a Certificate message.
If no certificate is available, an empty Certificate message is sent:
- this is a SHOULD in RFC 4346 (Section 7.4.6)
- this is a MUST in RFC 5246 (Section 7.4.6)
XXX We may want to add a complete chain.
"""
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if TLSCertificateRequest not in hs_msg:
return
certs = []
if self.mycert:
certs = [self.mycert]
self.add_msg(TLSCertificate(certs=certs))
raise self.ADDED_CLIENTCERTIFICATE()
@ATMT.state()
def ADDED_CLIENTCERTIFICATE(self):
pass
def should_add_ClientKeyExchange(self):
self.add_msg(TLSClientKeyExchange())
raise self.ADDED_CLIENTKEYEXCHANGE()
@ATMT.condition(PREPARE_CLIENTFLIGHT2, prio=2)
def should_add_ClientKeyExchange_from_ClientFlight2(self):
return self.should_add_ClientKeyExchange()
@ATMT.condition(ADDED_CLIENTCERTIFICATE)
def should_add_ClientKeyExchange_from_ClientCertificate(self):
return self.should_add_ClientKeyExchange()
@ATMT.state()
def ADDED_CLIENTKEYEXCHANGE(self):
pass
@ATMT.condition(ADDED_CLIENTKEYEXCHANGE, prio=1)
def should_add_ClientVerify(self):
"""
XXX Section 7.4.7.1 of RFC 5246 states that the CertificateVerify
message is only sent following a client certificate that has signing
capability (i.e. not those containing fixed DH params).
We should verify that before adding the message. We should also handle
the case when the Certificate message was empty.
"""
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if (TLSCertificateRequest not in hs_msg or
self.mycert is None or
self.mykey is None):
return
self.add_msg(TLSCertificateVerify())
raise self.ADDED_CERTIFICATEVERIFY()
@ATMT.state()
def ADDED_CERTIFICATEVERIFY(self):
pass
@ATMT.condition(ADDED_CERTIFICATEVERIFY)
def should_add_ChangeCipherSpec_from_CertificateVerify(self):
self.add_record()
self.add_msg(TLSChangeCipherSpec())
raise self.ADDED_CHANGECIPHERSPEC()
@ATMT.condition(ADDED_CLIENTKEYEXCHANGE, prio=2)
def should_add_ChangeCipherSpec_from_ClientKeyExchange(self):
self.add_record()
self.add_msg(TLSChangeCipherSpec())
raise self.ADDED_CHANGECIPHERSPEC()
@ATMT.state()
def ADDED_CHANGECIPHERSPEC(self):
pass
@ATMT.condition(ADDED_CHANGECIPHERSPEC)
def should_add_ClientFinished(self):
self.add_record()
self.add_msg(TLSFinished())
raise self.ADDED_CLIENTFINISHED()
@ATMT.state()
def ADDED_CLIENTFINISHED(self):
pass
@ATMT.condition(ADDED_CLIENTFINISHED)
def should_send_ClientFlight2(self):
self.flush_records()
raise self.SENT_CLIENTFLIGHT2()
@ATMT.state()
def SENT_CLIENTFLIGHT2(self):
raise self.WAITING_SERVERFLIGHT2()
@ATMT.state()
def WAITING_SERVERFLIGHT2(self):
self.get_next_msg()
raise self.RECEIVED_SERVERFLIGHT2()
@ATMT.state()
def RECEIVED_SERVERFLIGHT2(self):
pass
@ATMT.condition(RECEIVED_SERVERFLIGHT2)
def should_handle_ChangeCipherSpec(self):
self.raise_on_packet(TLSChangeCipherSpec,
self.HANDLED_CHANGECIPHERSPEC)
@ATMT.state()
def HANDLED_CHANGECIPHERSPEC(self):
pass
@ATMT.condition(HANDLED_CHANGECIPHERSPEC)
def should_handle_Finished(self):
self.raise_on_packet(TLSFinished,
self.HANDLED_SERVERFINISHED)
@ATMT.state()
def HANDLED_SERVERFINISHED(self):
self.vprint("TLS handshake completed!")
self.vprint_sessioninfo()
self.vprint("You may send data or use 'quit'.")
# end of TLS handshake #
@ATMT.condition(HANDLED_SERVERFINISHED)
def should_wait_ClientData(self):
raise self.WAIT_CLIENTDATA()
@ATMT.state()
def WAIT_CLIENTDATA(self):
pass
@ATMT.condition(WAIT_CLIENTDATA, prio=1)
def add_ClientData(self):
"""
The user may type in:
GET / HTTP/1.1\r\nHost: testserver.com\r\n\r\n
Special characters are handled so that it becomes a valid HTTP request.
"""
if not self.data_to_send:
data = six.moves.input().replace('\\r', '\r').replace('\\n', '\n').encode() # noqa: E501
else:
data = self.data_to_send.pop()
if data == b"quit":
return
if self.linebreak:
data += b"\n"
self.add_record()
self.add_msg(TLSApplicationData(data=data))
raise self.ADDED_CLIENTDATA()
@ATMT.condition(WAIT_CLIENTDATA, prio=2)
def no_more_ClientData(self):
raise self.CLOSE_NOTIFY()
@ATMT.state()
def ADDED_CLIENTDATA(self):
pass
@ATMT.condition(ADDED_CLIENTDATA)
def should_send_ClientData(self):
self.flush_records()
raise self.SENT_CLIENTDATA()
@ATMT.state()
def SENT_CLIENTDATA(self):
raise self.WAITING_SERVERDATA()
@ATMT.state()
def WAITING_SERVERDATA(self):
self.get_next_msg(0.3, 1)
raise self.RECEIVED_SERVERDATA()
@ATMT.state()
def RECEIVED_SERVERDATA(self):
pass
@ATMT.condition(RECEIVED_SERVERDATA, prio=1)
def should_handle_ServerData(self):
if not self.buffer_in:
raise self.WAIT_CLIENTDATA()
p = self.buffer_in[0]
if isinstance(p, TLSApplicationData):
print("> Received: %r" % p.data)
elif isinstance(p, TLSAlert):
print("> Received: %r" % p)
raise self.CLOSE_NOTIFY()
else:
print("> Received: %r" % p)
self.buffer_in = self.buffer_in[1:]
raise self.HANDLED_SERVERDATA()
@ATMT.state()
def HANDLED_SERVERDATA(self):
raise self.WAIT_CLIENTDATA()
@ATMT.state()
def CLOSE_NOTIFY(self):
self.vprint()
self.vprint("Trying to send a TLSAlert to the server...")
@ATMT.condition(CLOSE_NOTIFY)
def close_session(self):
self.add_record()
self.add_msg(TLSAlert(level=1, descr=0))
try:
self.flush_records()
except Exception:
self.vprint("Could not send termination Alert, maybe the server stopped?") # noqa: E501
raise self.FINAL()
# SSLv2 handshake #
@ATMT.state()
def SSLv2_PREPARE_CLIENTHELLO(self):
pass
@ATMT.condition(SSLv2_PREPARE_CLIENTHELLO)
def sslv2_should_add_ClientHello(self):
self.add_record(is_sslv2=True)
p = self.client_hello or SSLv2ClientHello(challenge=randstring(16))
self.add_msg(p)
raise self.SSLv2_ADDED_CLIENTHELLO()
@ATMT.state()
def SSLv2_ADDED_CLIENTHELLO(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTHELLO)
def sslv2_should_send_ClientHello(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTHELLO()
@ATMT.state()
def SSLv2_SENT_CLIENTHELLO(self):
raise self.SSLv2_WAITING_SERVERHELLO()
@ATMT.state()
def SSLv2_WAITING_SERVERHELLO(self):
self.get_next_msg()
raise self.SSLv2_RECEIVED_SERVERHELLO()
@ATMT.state()
def SSLv2_RECEIVED_SERVERHELLO(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERHELLO, prio=1)
def sslv2_should_handle_ServerHello(self):
self.raise_on_packet(SSLv2ServerHello,
self.SSLv2_HANDLED_SERVERHELLO)
@ATMT.state()
def SSLv2_HANDLED_SERVERHELLO(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERHELLO, prio=2)
def sslv2_missing_ServerHello(self):
raise self.SSLv2_MISSING_SERVERHELLO()
@ATMT.state()
def SSLv2_MISSING_SERVERHELLO(self):
self.vprint("Missing SSLv2 ServerHello message!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.condition(SSLv2_HANDLED_SERVERHELLO)
def sslv2_should_add_ClientMasterKey(self):
self.add_record(is_sslv2=True)
self.add_msg(SSLv2ClientMasterKey())
raise self.SSLv2_ADDED_CLIENTMASTERKEY()
@ATMT.state()
def SSLv2_ADDED_CLIENTMASTERKEY(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTMASTERKEY)
def sslv2_should_send_ClientMasterKey(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTMASTERKEY()
@ATMT.state()
def SSLv2_SENT_CLIENTMASTERKEY(self):
raise self.SSLv2_WAITING_SERVERVERIFY()
@ATMT.state()
def SSLv2_WAITING_SERVERVERIFY(self):
# We give the server 0.5 second to send his ServerVerify.
# Else we assume that he's waiting for our ClientFinished.
self.get_next_msg(0.5, 0)
raise self.SSLv2_RECEIVED_SERVERVERIFY()
@ATMT.state()
def SSLv2_RECEIVED_SERVERVERIFY(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERVERIFY, prio=1)
def sslv2_should_handle_ServerVerify(self):
self.raise_on_packet(SSLv2ServerVerify,
self.SSLv2_HANDLED_SERVERVERIFY,
get_next_msg=False)
@ATMT.state()
def SSLv2_HANDLED_SERVERVERIFY(self):
pass
def sslv2_should_add_ClientFinished(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if SSLv2ClientFinished in hs_msg:
return
self.add_record(is_sslv2=True)
self.add_msg(SSLv2ClientFinished())
raise self.SSLv2_ADDED_CLIENTFINISHED()
@ATMT.condition(SSLv2_HANDLED_SERVERVERIFY, prio=1)
def sslv2_should_add_ClientFinished_from_ServerVerify(self):
return self.sslv2_should_add_ClientFinished()
@ATMT.condition(SSLv2_HANDLED_SERVERVERIFY, prio=2)
def sslv2_should_wait_ServerFinished_from_ServerVerify(self):
raise self.SSLv2_WAITING_SERVERFINISHED()
@ATMT.condition(SSLv2_RECEIVED_SERVERVERIFY, prio=2)
def sslv2_should_add_ClientFinished_from_NoServerVerify(self):
return self.sslv2_should_add_ClientFinished()
@ATMT.condition(SSLv2_RECEIVED_SERVERVERIFY, prio=3)
def sslv2_missing_ServerVerify(self):
raise self.SSLv2_MISSING_SERVERVERIFY()
@ATMT.state(final=True)
def SSLv2_MISSING_SERVERVERIFY(self):
self.vprint("Missing SSLv2 ServerVerify message!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.state()
def SSLv2_ADDED_CLIENTFINISHED(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTFINISHED)
def sslv2_should_send_ClientFinished(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTFINISHED()
@ATMT.state()
def SSLv2_SENT_CLIENTFINISHED(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if SSLv2ServerVerify in hs_msg:
raise self.SSLv2_WAITING_SERVERFINISHED()
else:
self.get_next_msg()
raise self.SSLv2_RECEIVED_SERVERVERIFY()
@ATMT.state()
def SSLv2_WAITING_SERVERFINISHED(self):
self.get_next_msg()
raise self.SSLv2_RECEIVED_SERVERFINISHED()
@ATMT.state()
def SSLv2_RECEIVED_SERVERFINISHED(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERFINISHED, prio=1)
def sslv2_should_handle_ServerFinished(self):
self.raise_on_packet(SSLv2ServerFinished,
self.SSLv2_HANDLED_SERVERFINISHED)
# SSLv2 client authentication #
@ATMT.condition(SSLv2_RECEIVED_SERVERFINISHED, prio=2)
def sslv2_should_handle_RequestCertificate(self):
self.raise_on_packet(SSLv2RequestCertificate,
self.SSLv2_HANDLED_REQUESTCERTIFICATE)
@ATMT.state()
def SSLv2_HANDLED_REQUESTCERTIFICATE(self):
self.vprint("Server asked for a certificate...")
if not self.mykey or not self.mycert:
self.vprint("No client certificate to send!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.condition(SSLv2_HANDLED_REQUESTCERTIFICATE)
def sslv2_should_add_ClientCertificate(self):
self.add_record(is_sslv2=True)
self.add_msg(SSLv2ClientCertificate(certdata=self.mycert))
raise self.SSLv2_ADDED_CLIENTCERTIFICATE()
@ATMT.state()
def SSLv2_ADDED_CLIENTCERTIFICATE(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTCERTIFICATE)
def sslv2_should_send_ClientCertificate(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTCERTIFICATE()
@ATMT.state()
def SSLv2_SENT_CLIENTCERTIFICATE(self):
raise self.SSLv2_WAITING_SERVERFINISHED()
# end of SSLv2 client authentication #
@ATMT.state()
def SSLv2_HANDLED_SERVERFINISHED(self):
self.vprint("SSLv2 handshake completed!")
self.vprint_sessioninfo()
self.vprint("You may send data or use 'quit'.")
@ATMT.condition(SSLv2_RECEIVED_SERVERFINISHED, prio=3)
def sslv2_missing_ServerFinished(self):
raise self.SSLv2_MISSING_SERVERFINISHED()
@ATMT.state()
def SSLv2_MISSING_SERVERFINISHED(self):
self.vprint("Missing SSLv2 ServerFinished message!")
raise self.SSLv2_CLOSE_NOTIFY()
# end of SSLv2 handshake #
@ATMT.condition(SSLv2_HANDLED_SERVERFINISHED)
def sslv2_should_wait_ClientData(self):
raise self.SSLv2_WAITING_CLIENTDATA()
@ATMT.state()
def SSLv2_WAITING_CLIENTDATA(self):
pass
@ATMT.condition(SSLv2_WAITING_CLIENTDATA, prio=1)
def sslv2_add_ClientData(self):
if not self.data_to_send:
data = six.moves.input().replace('\\r', '\r').replace('\\n', '\n').encode() # noqa: E501
else:
data = self.data_to_send.pop()
self.vprint("> Read from list: %s" % data)
if data == "quit":
return
if self.linebreak:
data += "\n"
self.add_record(is_sslv2=True)
self.add_msg(Raw(data))
raise self.SSLv2_ADDED_CLIENTDATA()
@ATMT.condition(SSLv2_WAITING_CLIENTDATA, prio=2)
def sslv2_no_more_ClientData(self):
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.state()
def SSLv2_ADDED_CLIENTDATA(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTDATA)
def sslv2_should_send_ClientData(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTDATA()
@ATMT.state()
def SSLv2_SENT_CLIENTDATA(self):
raise self.SSLv2_WAITING_SERVERDATA()
@ATMT.state()
def SSLv2_WAITING_SERVERDATA(self):
self.get_next_msg(0.3, 1)
raise self.SSLv2_RECEIVED_SERVERDATA()
@ATMT.state()
def SSLv2_RECEIVED_SERVERDATA(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERDATA)
def sslv2_should_handle_ServerData(self):
if not self.buffer_in:
raise self.SSLv2_WAITING_CLIENTDATA()
p = self.buffer_in[0]
print("> Received: %r" % p.load)
if p.load.startswith(b"goodbye"):
raise self.SSLv2_CLOSE_NOTIFY()
self.buffer_in = self.buffer_in[1:]
raise self.SSLv2_HANDLED_SERVERDATA()
@ATMT.state()
def SSLv2_HANDLED_SERVERDATA(self):
raise self.SSLv2_WAITING_CLIENTDATA()
@ATMT.state()
def SSLv2_CLOSE_NOTIFY(self):
"""
There is no proper way to end an SSLv2 session.
We try and send a 'goodbye' message as a substitute.
"""
self.vprint()
self.vprint("Trying to send a 'goodbye' to the server...")
@ATMT.condition(SSLv2_CLOSE_NOTIFY)
def sslv2_close_session(self):
self.add_record()
self.add_msg(Raw('goodbye'))
try:
self.flush_records()
except Exception:
self.vprint("Could not send our goodbye. The server probably stopped.") # noqa: E501
self.socket.close()
raise self.FINAL()
# TLS 1.3 handshake #
@ATMT.state()
def TLS13_START(self):
pass
@ATMT.condition(TLS13_START)
def tls13_should_add_ClientHello(self):
# we have to use the legacy, plaintext TLS record here
self.add_record(is_tls13=False)
if self.client_hello:
p = self.client_hello
else:
# When trying to connect to a public TLS 1.3 server,
# you will most likely need to provide an SNI extension.
# sn = ServerName(servername="<put server name here>")
ext = [TLS_Ext_SupportedGroups(groups=["secp256r1"]),
# TLS_Ext_ServerName(servernames=[sn]),
TLS_Ext_KeyShare_CH(client_shares=[KeyShareEntry(group=23)]), # noqa: E501
TLS_Ext_SupportedVersions(versions=["TLS 1.3-d18"]),
TLS_Ext_SignatureAlgorithms(sig_algs=["sha256+rsapss",
"sha256+rsa"])]
p = TLSClientHello(ciphers=0x1301, ext=ext)
self.add_msg(p)
raise self.TLS13_ADDED_CLIENTHELLO()
@ATMT.state()
def TLS13_ADDED_CLIENTHELLO(self):
pass
@ATMT.condition(TLS13_ADDED_CLIENTHELLO)
def tls13_should_send_ClientHello(self):
self.flush_records()
raise self.TLS13_SENT_CLIENTHELLO()
@ATMT.state()
def TLS13_SENT_CLIENTHELLO(self):
raise self.TLS13_WAITING_SERVERHELLO()
@ATMT.state()
def TLS13_WAITING_SERVERHELLO(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_SERVERHELLO)
def tls13_should_handle_ServerHello(self):
self.raise_on_packet(TLS13ServerHello,
self.TLS13_WAITING_ENCRYPTEDEXTENSIONS)
@ATMT.state()
def TLS13_WAITING_ENCRYPTEDEXTENSIONS(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_ENCRYPTEDEXTENSIONS)
def tls13_should_handle_EncryptedExtensions(self):
self.raise_on_packet(TLSEncryptedExtensions,
self.TLS13_WAITING_CERTIFICATE)
@ATMT.state()
def TLS13_WAITING_CERTIFICATE(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=1)
def tls13_should_handle_Certificate(self):
self.raise_on_packet(TLS13Certificate,
self.TLS13_WAITING_CERTIFICATEVERIFY)
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=2)
def tls13_should_handle_CertificateRequest(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if TLSCertificateRequest in hs_msg:
self.vprint("TLSCertificateRequest already received!")
self.raise_on_packet(TLSCertificateRequest,
self.TLS13_WAITING_CERTIFICATE)
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=3)
def tls13_should_handle_ServerFinished_from_EncryptedExtensions(self):
self.raise_on_packet(TLSFinished,
self.TLS13_CONNECTED)
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=4)
def tls13_missing_Certificate(self):
self.vprint("Missing TLS 1.3 message after EncryptedExtensions!")
raise self.FINAL()
@ATMT.state()
def TLS13_WAITING_CERTIFICATEVERIFY(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_CERTIFICATEVERIFY)
def tls13_should_handle_CertificateVerify(self):
self.raise_on_packet(TLSCertificateVerify,
self.TLS13_WAITING_SERVERFINISHED)
@ATMT.state()
def TLS13_WAITING_SERVERFINISHED(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_SERVERFINISHED)
def tls13_should_handle_ServerFinished_from_CertificateVerify(self):
self.raise_on_packet(TLSFinished,
self.TLS13_PREPARE_CLIENTFLIGHT2)
@ATMT.state()
def TLS13_PREPARE_CLIENTFLIGHT2(self):
self.add_record(is_tls13=True)
# raise self.FINAL()
@ATMT.condition(TLS13_PREPARE_CLIENTFLIGHT2)
def tls13_should_add_ClientFinished(self):
self.add_msg(TLSFinished())
raise self.TLS13_ADDED_CLIENTFINISHED()
@ATMT.state()
def TLS13_ADDED_CLIENTFINISHED(self):
pass
@ATMT.condition(TLS13_ADDED_CLIENTFINISHED)
def tls13_should_send_ClientFlight2(self):
self.flush_records()
raise self.TLS13_SENT_CLIENTFLIGHT2()
@ATMT.state()
def TLS13_SENT_CLIENTFLIGHT2(self):
raise self.HANDLED_SERVERFINISHED()
@ATMT.state(final=True)
def FINAL(self):
# We might call shutdown, but it may happen that the server
# did not wait for us to shutdown after answering our data query.
# self.socket.shutdown(1)
self.vprint("Closing client socket...")
self.socket.close()
self.vprint("Ending TLS client automaton.")
| 34.464625 | 101 | 0.660549 |
from __future__ import print_function
import socket
from scapy.pton_ntop import inet_pton
from scapy.utils import randstring, repr_hex
from scapy.automaton import ATMT
from scapy.layers.tls.automaton import _TLSAutomaton
from scapy.layers.tls.basefields import _tls_version, _tls_version_options
from scapy.layers.tls.session import tlsSession
from scapy.layers.tls.extensions import TLS_Ext_SupportedGroups, \
TLS_Ext_SupportedVersions, TLS_Ext_SignatureAlgorithms
from scapy.layers.tls.handshake import TLSCertificate, TLSCertificateRequest, \
TLSCertificateVerify, TLSClientHello, TLSClientKeyExchange, \
TLSEncryptedExtensions, TLSFinished, TLSServerHello, TLSServerHelloDone, \
TLSServerKeyExchange, TLS13Certificate, TLS13ServerHello
from scapy.layers.tls.handshake_sslv2 import SSLv2ClientHello, \
SSLv2ServerHello, SSLv2ClientMasterKey, SSLv2ServerVerify, \
SSLv2ClientFinished, SSLv2ServerFinished, SSLv2ClientCertificate, \
SSLv2RequestCertificate
from scapy.layers.tls.keyexchange_tls13 import TLS_Ext_KeyShare_CH, \
KeyShareEntry
from scapy.layers.tls.record import TLSAlert, TLSChangeCipherSpec, \
TLSApplicationData
from scapy.modules import six
from scapy.packet import Raw
from scapy.compat import raw
class TLSClientAutomaton(_TLSAutomaton):
def parse_args(self, server="127.0.0.1", dport=4433, server_name=None,
mycert=None, mykey=None,
client_hello=None, version=None,
data=None,
**kargs):
super(TLSClientAutomaton, self).parse_args(mycert=mycert,
mykey=mykey,
**kargs)
tmp = socket.getaddrinfo(server, dport)
self.remote_name = None
try:
if ':' in server:
inet_pton(socket.AF_INET6, server)
else:
inet_pton(socket.AF_INET, server)
except Exception:
self.remote_name = socket.getfqdn(server)
if self.remote_name != server:
tmp = socket.getaddrinfo(self.remote_name, dport)
if server_name:
self.remote_name = server_name
self.remote_family = tmp[0][0]
self.remote_ip = tmp[0][4][0]
self.remote_port = dport
self.local_ip = None
self.local_port = None
self.socket = None
self.client_hello = client_hello
self.advertised_tls_version = None
if version:
v = _tls_version_options.get(version, None)
if not v:
self.vprint("Unrecognized TLS version option.")
else:
self.advertised_tls_version = v
self.linebreak = False
if isinstance(data, bytes):
self.data_to_send = [data]
elif isinstance(data, six.string_types):
self.data_to_send = [raw(data)]
elif isinstance(data, list):
self.data_to_send = list(raw(d) for d in reversed(data))
else:
self.data_to_send = []
def vprint_sessioninfo(self):
if self.verbose:
s = self.cur_session
v = _tls_version[s.tls_version]
self.vprint("Version : %s" % v)
cs = s.wcs.ciphersuite.name
self.vprint("Cipher suite : %s" % cs)
if s.tls_version >= 0x0304:
ms = s.tls13_master_secret
else:
ms = s.master_secret
self.vprint("Master secret : %s" % repr_hex(ms))
if s.server_certs:
self.vprint("Server certificate chain: %r" % s.server_certs)
self.vprint()
@ATMT.state(initial=True)
def INITIAL(self):
self.vprint("Starting TLS client automaton.")
raise self.INIT_TLS_SESSION()
@ATMT.state()
def INIT_TLS_SESSION(self):
self.cur_session = tlsSession(connection_end="client")
self.cur_session.client_certs = self.mycert
self.cur_session.client_key = self.mykey
v = self.advertised_tls_version
if v:
self.cur_session.advertised_tls_version = v
else:
default_version = self.cur_session.advertised_tls_version
self.advertised_tls_version = default_version
raise self.CONNECT()
@ATMT.state()
def CONNECT(self):
s = socket.socket(self.remote_family, socket.SOCK_STREAM)
self.vprint()
self.vprint("Trying to connect on %s:%d" % (self.remote_ip,
self.remote_port))
s.connect((self.remote_ip, self.remote_port))
self.socket = s
self.local_ip, self.local_port = self.socket.getsockname()[:2]
self.vprint()
if self.cur_session.advertised_tls_version in [0x0200, 0x0002]:
raise self.SSLv2_PREPARE_CLIENTHELLO()
elif self.cur_session.advertised_tls_version >= 0x0304:
raise self.TLS13_START()
else:
raise self.PREPARE_CLIENTFLIGHT1()
@ATMT.state()
def PREPARE_CLIENTFLIGHT1(self):
self.add_record()
@ATMT.condition(PREPARE_CLIENTFLIGHT1)
def should_add_ClientHello(self):
self.add_msg(self.client_hello or TLSClientHello())
raise self.ADDED_CLIENTHELLO()
@ATMT.state()
def ADDED_CLIENTHELLO(self):
pass
@ATMT.condition(ADDED_CLIENTHELLO)
def should_send_ClientFlight1(self):
self.flush_records()
raise self.SENT_CLIENTFLIGHT1()
@ATMT.state()
def SENT_CLIENTFLIGHT1(self):
raise self.WAITING_SERVERFLIGHT1()
@ATMT.state()
def WAITING_SERVERFLIGHT1(self):
self.get_next_msg()
raise self.RECEIVED_SERVERFLIGHT1()
@ATMT.state()
def RECEIVED_SERVERFLIGHT1(self):
pass
@ATMT.condition(RECEIVED_SERVERFLIGHT1, prio=1)
def should_handle_ServerHello(self):
self.raise_on_packet(TLSServerHello,
self.HANDLED_SERVERHELLO)
@ATMT.state()
def HANDLED_SERVERHELLO(self):
pass
@ATMT.condition(RECEIVED_SERVERFLIGHT1, prio=2)
def missing_ServerHello(self):
raise self.MISSING_SERVERHELLO()
@ATMT.state()
def MISSING_SERVERHELLO(self):
self.vprint("Missing TLS ServerHello message!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(HANDLED_SERVERHELLO, prio=1)
def should_handle_ServerCertificate(self):
if not self.cur_session.prcs.key_exchange.anonymous:
self.raise_on_packet(TLSCertificate,
self.HANDLED_SERVERCERTIFICATE)
raise self.HANDLED_SERVERCERTIFICATE()
@ATMT.state()
def HANDLED_SERVERCERTIFICATE(self):
pass
@ATMT.condition(HANDLED_SERVERHELLO, prio=2)
def missing_ServerCertificate(self):
raise self.MISSING_SERVERCERTIFICATE()
@ATMT.state()
def MISSING_SERVERCERTIFICATE(self):
self.vprint("Missing TLS Certificate message!")
raise self.CLOSE_NOTIFY()
@ATMT.state()
def HANDLED_CERTIFICATEREQUEST(self):
self.vprint("Server asked for a certificate...")
if not self.mykey or not self.mycert:
self.vprint("No client certificate to send!")
self.vprint("Will try and send an empty Certificate message...")
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=1)
def should_handle_ServerKeyExchange_from_ServerCertificate(self):
self.raise_on_packet(TLSServerKeyExchange,
self.HANDLED_SERVERKEYEXCHANGE)
@ATMT.state(final=True)
def MISSING_SERVERKEYEXCHANGE(self):
pass
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=2)
def missing_ServerKeyExchange(self):
if not self.cur_session.prcs.key_exchange.no_ske:
raise self.MISSING_SERVERKEYEXCHANGE()
@ATMT.state()
def HANDLED_SERVERKEYEXCHANGE(self):
pass
def should_handle_CertificateRequest(self):
self.raise_on_packet(TLSCertificateRequest,
self.HANDLED_CERTIFICATEREQUEST)
@ATMT.condition(HANDLED_SERVERKEYEXCHANGE, prio=2)
def should_handle_CertificateRequest_from_ServerKeyExchange(self):
self.should_handle_CertificateRequest()
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=3)
def should_handle_CertificateRequest_from_ServerCertificate(self):
self.should_handle_CertificateRequest()
def should_handle_ServerHelloDone(self):
self.raise_on_packet(TLSServerHelloDone,
self.HANDLED_SERVERHELLODONE)
@ATMT.condition(HANDLED_SERVERKEYEXCHANGE, prio=1)
def should_handle_ServerHelloDone_from_ServerKeyExchange(self):
return self.should_handle_ServerHelloDone()
@ATMT.condition(HANDLED_CERTIFICATEREQUEST, prio=4)
def should_handle_ServerHelloDone_from_CertificateRequest(self):
return self.should_handle_ServerHelloDone()
@ATMT.condition(HANDLED_SERVERCERTIFICATE, prio=4)
def should_handle_ServerHelloDone_from_ServerCertificate(self):
return self.should_handle_ServerHelloDone()
@ATMT.state()
def HANDLED_SERVERHELLODONE(self):
raise self.PREPARE_CLIENTFLIGHT2()
@ATMT.state()
def PREPARE_CLIENTFLIGHT2(self):
self.add_record()
@ATMT.condition(PREPARE_CLIENTFLIGHT2, prio=1)
def should_add_ClientCertificate(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if TLSCertificateRequest not in hs_msg:
return
certs = []
if self.mycert:
certs = [self.mycert]
self.add_msg(TLSCertificate(certs=certs))
raise self.ADDED_CLIENTCERTIFICATE()
@ATMT.state()
def ADDED_CLIENTCERTIFICATE(self):
pass
def should_add_ClientKeyExchange(self):
self.add_msg(TLSClientKeyExchange())
raise self.ADDED_CLIENTKEYEXCHANGE()
@ATMT.condition(PREPARE_CLIENTFLIGHT2, prio=2)
def should_add_ClientKeyExchange_from_ClientFlight2(self):
return self.should_add_ClientKeyExchange()
@ATMT.condition(ADDED_CLIENTCERTIFICATE)
def should_add_ClientKeyExchange_from_ClientCertificate(self):
return self.should_add_ClientKeyExchange()
@ATMT.state()
def ADDED_CLIENTKEYEXCHANGE(self):
pass
@ATMT.condition(ADDED_CLIENTKEYEXCHANGE, prio=1)
def should_add_ClientVerify(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if (TLSCertificateRequest not in hs_msg or
self.mycert is None or
self.mykey is None):
return
self.add_msg(TLSCertificateVerify())
raise self.ADDED_CERTIFICATEVERIFY()
@ATMT.state()
def ADDED_CERTIFICATEVERIFY(self):
pass
@ATMT.condition(ADDED_CERTIFICATEVERIFY)
def should_add_ChangeCipherSpec_from_CertificateVerify(self):
self.add_record()
self.add_msg(TLSChangeCipherSpec())
raise self.ADDED_CHANGECIPHERSPEC()
@ATMT.condition(ADDED_CLIENTKEYEXCHANGE, prio=2)
def should_add_ChangeCipherSpec_from_ClientKeyExchange(self):
self.add_record()
self.add_msg(TLSChangeCipherSpec())
raise self.ADDED_CHANGECIPHERSPEC()
@ATMT.state()
def ADDED_CHANGECIPHERSPEC(self):
pass
@ATMT.condition(ADDED_CHANGECIPHERSPEC)
def should_add_ClientFinished(self):
self.add_record()
self.add_msg(TLSFinished())
raise self.ADDED_CLIENTFINISHED()
@ATMT.state()
def ADDED_CLIENTFINISHED(self):
pass
@ATMT.condition(ADDED_CLIENTFINISHED)
def should_send_ClientFlight2(self):
self.flush_records()
raise self.SENT_CLIENTFLIGHT2()
@ATMT.state()
def SENT_CLIENTFLIGHT2(self):
raise self.WAITING_SERVERFLIGHT2()
@ATMT.state()
def WAITING_SERVERFLIGHT2(self):
self.get_next_msg()
raise self.RECEIVED_SERVERFLIGHT2()
@ATMT.state()
def RECEIVED_SERVERFLIGHT2(self):
pass
@ATMT.condition(RECEIVED_SERVERFLIGHT2)
def should_handle_ChangeCipherSpec(self):
self.raise_on_packet(TLSChangeCipherSpec,
self.HANDLED_CHANGECIPHERSPEC)
@ATMT.state()
def HANDLED_CHANGECIPHERSPEC(self):
pass
@ATMT.condition(HANDLED_CHANGECIPHERSPEC)
def should_handle_Finished(self):
self.raise_on_packet(TLSFinished,
self.HANDLED_SERVERFINISHED)
@ATMT.state()
def HANDLED_SERVERFINISHED(self):
self.vprint("TLS handshake completed!")
self.vprint_sessioninfo()
self.vprint("You may send data or use 'quit'.")
@ATMT.condition(HANDLED_SERVERFINISHED)
def should_wait_ClientData(self):
raise self.WAIT_CLIENTDATA()
@ATMT.state()
def WAIT_CLIENTDATA(self):
pass
@ATMT.condition(WAIT_CLIENTDATA, prio=1)
def add_ClientData(self):
if not self.data_to_send:
data = six.moves.input().replace('\\r', '\r').replace('\\n', '\n').encode()
else:
data = self.data_to_send.pop()
if data == b"quit":
return
if self.linebreak:
data += b"\n"
self.add_record()
self.add_msg(TLSApplicationData(data=data))
raise self.ADDED_CLIENTDATA()
@ATMT.condition(WAIT_CLIENTDATA, prio=2)
def no_more_ClientData(self):
raise self.CLOSE_NOTIFY()
@ATMT.state()
def ADDED_CLIENTDATA(self):
pass
@ATMT.condition(ADDED_CLIENTDATA)
def should_send_ClientData(self):
self.flush_records()
raise self.SENT_CLIENTDATA()
@ATMT.state()
def SENT_CLIENTDATA(self):
raise self.WAITING_SERVERDATA()
@ATMT.state()
def WAITING_SERVERDATA(self):
self.get_next_msg(0.3, 1)
raise self.RECEIVED_SERVERDATA()
@ATMT.state()
def RECEIVED_SERVERDATA(self):
pass
@ATMT.condition(RECEIVED_SERVERDATA, prio=1)
def should_handle_ServerData(self):
if not self.buffer_in:
raise self.WAIT_CLIENTDATA()
p = self.buffer_in[0]
if isinstance(p, TLSApplicationData):
print("> Received: %r" % p.data)
elif isinstance(p, TLSAlert):
print("> Received: %r" % p)
raise self.CLOSE_NOTIFY()
else:
print("> Received: %r" % p)
self.buffer_in = self.buffer_in[1:]
raise self.HANDLED_SERVERDATA()
@ATMT.state()
def HANDLED_SERVERDATA(self):
raise self.WAIT_CLIENTDATA()
@ATMT.state()
def CLOSE_NOTIFY(self):
self.vprint()
self.vprint("Trying to send a TLSAlert to the server...")
@ATMT.condition(CLOSE_NOTIFY)
def close_session(self):
self.add_record()
self.add_msg(TLSAlert(level=1, descr=0))
try:
self.flush_records()
except Exception:
self.vprint("Could not send termination Alert, maybe the server stopped?")
raise self.FINAL()
@ATMT.state()
def SSLv2_PREPARE_CLIENTHELLO(self):
pass
@ATMT.condition(SSLv2_PREPARE_CLIENTHELLO)
def sslv2_should_add_ClientHello(self):
self.add_record(is_sslv2=True)
p = self.client_hello or SSLv2ClientHello(challenge=randstring(16))
self.add_msg(p)
raise self.SSLv2_ADDED_CLIENTHELLO()
@ATMT.state()
def SSLv2_ADDED_CLIENTHELLO(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTHELLO)
def sslv2_should_send_ClientHello(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTHELLO()
@ATMT.state()
def SSLv2_SENT_CLIENTHELLO(self):
raise self.SSLv2_WAITING_SERVERHELLO()
@ATMT.state()
def SSLv2_WAITING_SERVERHELLO(self):
self.get_next_msg()
raise self.SSLv2_RECEIVED_SERVERHELLO()
@ATMT.state()
def SSLv2_RECEIVED_SERVERHELLO(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERHELLO, prio=1)
def sslv2_should_handle_ServerHello(self):
self.raise_on_packet(SSLv2ServerHello,
self.SSLv2_HANDLED_SERVERHELLO)
@ATMT.state()
def SSLv2_HANDLED_SERVERHELLO(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERHELLO, prio=2)
def sslv2_missing_ServerHello(self):
raise self.SSLv2_MISSING_SERVERHELLO()
@ATMT.state()
def SSLv2_MISSING_SERVERHELLO(self):
self.vprint("Missing SSLv2 ServerHello message!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.condition(SSLv2_HANDLED_SERVERHELLO)
def sslv2_should_add_ClientMasterKey(self):
self.add_record(is_sslv2=True)
self.add_msg(SSLv2ClientMasterKey())
raise self.SSLv2_ADDED_CLIENTMASTERKEY()
@ATMT.state()
def SSLv2_ADDED_CLIENTMASTERKEY(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTMASTERKEY)
def sslv2_should_send_ClientMasterKey(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTMASTERKEY()
@ATMT.state()
def SSLv2_SENT_CLIENTMASTERKEY(self):
raise self.SSLv2_WAITING_SERVERVERIFY()
@ATMT.state()
def SSLv2_WAITING_SERVERVERIFY(self):
self.get_next_msg(0.5, 0)
raise self.SSLv2_RECEIVED_SERVERVERIFY()
@ATMT.state()
def SSLv2_RECEIVED_SERVERVERIFY(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERVERIFY, prio=1)
def sslv2_should_handle_ServerVerify(self):
self.raise_on_packet(SSLv2ServerVerify,
self.SSLv2_HANDLED_SERVERVERIFY,
get_next_msg=False)
@ATMT.state()
def SSLv2_HANDLED_SERVERVERIFY(self):
pass
def sslv2_should_add_ClientFinished(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if SSLv2ClientFinished in hs_msg:
return
self.add_record(is_sslv2=True)
self.add_msg(SSLv2ClientFinished())
raise self.SSLv2_ADDED_CLIENTFINISHED()
@ATMT.condition(SSLv2_HANDLED_SERVERVERIFY, prio=1)
def sslv2_should_add_ClientFinished_from_ServerVerify(self):
return self.sslv2_should_add_ClientFinished()
@ATMT.condition(SSLv2_HANDLED_SERVERVERIFY, prio=2)
def sslv2_should_wait_ServerFinished_from_ServerVerify(self):
raise self.SSLv2_WAITING_SERVERFINISHED()
@ATMT.condition(SSLv2_RECEIVED_SERVERVERIFY, prio=2)
def sslv2_should_add_ClientFinished_from_NoServerVerify(self):
return self.sslv2_should_add_ClientFinished()
@ATMT.condition(SSLv2_RECEIVED_SERVERVERIFY, prio=3)
def sslv2_missing_ServerVerify(self):
raise self.SSLv2_MISSING_SERVERVERIFY()
@ATMT.state(final=True)
def SSLv2_MISSING_SERVERVERIFY(self):
self.vprint("Missing SSLv2 ServerVerify message!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.state()
def SSLv2_ADDED_CLIENTFINISHED(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTFINISHED)
def sslv2_should_send_ClientFinished(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTFINISHED()
@ATMT.state()
def SSLv2_SENT_CLIENTFINISHED(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if SSLv2ServerVerify in hs_msg:
raise self.SSLv2_WAITING_SERVERFINISHED()
else:
self.get_next_msg()
raise self.SSLv2_RECEIVED_SERVERVERIFY()
@ATMT.state()
def SSLv2_WAITING_SERVERFINISHED(self):
self.get_next_msg()
raise self.SSLv2_RECEIVED_SERVERFINISHED()
@ATMT.state()
def SSLv2_RECEIVED_SERVERFINISHED(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERFINISHED, prio=1)
def sslv2_should_handle_ServerFinished(self):
self.raise_on_packet(SSLv2ServerFinished,
self.SSLv2_HANDLED_SERVERFINISHED)
# SSLv2 client authentication #
@ATMT.condition(SSLv2_RECEIVED_SERVERFINISHED, prio=2)
def sslv2_should_handle_RequestCertificate(self):
self.raise_on_packet(SSLv2RequestCertificate,
self.SSLv2_HANDLED_REQUESTCERTIFICATE)
@ATMT.state()
def SSLv2_HANDLED_REQUESTCERTIFICATE(self):
self.vprint("Server asked for a certificate...")
if not self.mykey or not self.mycert:
self.vprint("No client certificate to send!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.condition(SSLv2_HANDLED_REQUESTCERTIFICATE)
def sslv2_should_add_ClientCertificate(self):
self.add_record(is_sslv2=True)
self.add_msg(SSLv2ClientCertificate(certdata=self.mycert))
raise self.SSLv2_ADDED_CLIENTCERTIFICATE()
@ATMT.state()
def SSLv2_ADDED_CLIENTCERTIFICATE(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTCERTIFICATE)
def sslv2_should_send_ClientCertificate(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTCERTIFICATE()
@ATMT.state()
def SSLv2_SENT_CLIENTCERTIFICATE(self):
raise self.SSLv2_WAITING_SERVERFINISHED()
# end of SSLv2 client authentication #
@ATMT.state()
def SSLv2_HANDLED_SERVERFINISHED(self):
self.vprint("SSLv2 handshake completed!")
self.vprint_sessioninfo()
self.vprint("You may send data or use 'quit'.")
@ATMT.condition(SSLv2_RECEIVED_SERVERFINISHED, prio=3)
def sslv2_missing_ServerFinished(self):
raise self.SSLv2_MISSING_SERVERFINISHED()
@ATMT.state()
def SSLv2_MISSING_SERVERFINISHED(self):
self.vprint("Missing SSLv2 ServerFinished message!")
raise self.SSLv2_CLOSE_NOTIFY()
# end of SSLv2 handshake #
@ATMT.condition(SSLv2_HANDLED_SERVERFINISHED)
def sslv2_should_wait_ClientData(self):
raise self.SSLv2_WAITING_CLIENTDATA()
@ATMT.state()
def SSLv2_WAITING_CLIENTDATA(self):
pass
@ATMT.condition(SSLv2_WAITING_CLIENTDATA, prio=1)
def sslv2_add_ClientData(self):
if not self.data_to_send:
data = six.moves.input().replace('\\r', '\r').replace('\\n', '\n').encode() # noqa: E501
else:
data = self.data_to_send.pop()
self.vprint("> Read from list: %s" % data)
if data == "quit":
return
if self.linebreak:
data += "\n"
self.add_record(is_sslv2=True)
self.add_msg(Raw(data))
raise self.SSLv2_ADDED_CLIENTDATA()
@ATMT.condition(SSLv2_WAITING_CLIENTDATA, prio=2)
def sslv2_no_more_ClientData(self):
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.state()
def SSLv2_ADDED_CLIENTDATA(self):
pass
@ATMT.condition(SSLv2_ADDED_CLIENTDATA)
def sslv2_should_send_ClientData(self):
self.flush_records()
raise self.SSLv2_SENT_CLIENTDATA()
@ATMT.state()
def SSLv2_SENT_CLIENTDATA(self):
raise self.SSLv2_WAITING_SERVERDATA()
@ATMT.state()
def SSLv2_WAITING_SERVERDATA(self):
self.get_next_msg(0.3, 1)
raise self.SSLv2_RECEIVED_SERVERDATA()
@ATMT.state()
def SSLv2_RECEIVED_SERVERDATA(self):
pass
@ATMT.condition(SSLv2_RECEIVED_SERVERDATA)
def sslv2_should_handle_ServerData(self):
if not self.buffer_in:
raise self.SSLv2_WAITING_CLIENTDATA()
p = self.buffer_in[0]
print("> Received: %r" % p.load)
if p.load.startswith(b"goodbye"):
raise self.SSLv2_CLOSE_NOTIFY()
self.buffer_in = self.buffer_in[1:]
raise self.SSLv2_HANDLED_SERVERDATA()
@ATMT.state()
def SSLv2_HANDLED_SERVERDATA(self):
raise self.SSLv2_WAITING_CLIENTDATA()
@ATMT.state()
def SSLv2_CLOSE_NOTIFY(self):
self.vprint()
self.vprint("Trying to send a 'goodbye' to the server...")
@ATMT.condition(SSLv2_CLOSE_NOTIFY)
def sslv2_close_session(self):
self.add_record()
self.add_msg(Raw('goodbye'))
try:
self.flush_records()
except Exception:
self.vprint("Could not send our goodbye. The server probably stopped.") # noqa: E501
self.socket.close()
raise self.FINAL()
# TLS 1.3 handshake #
@ATMT.state()
def TLS13_START(self):
pass
@ATMT.condition(TLS13_START)
def tls13_should_add_ClientHello(self):
# we have to use the legacy, plaintext TLS record here
self.add_record(is_tls13=False)
if self.client_hello:
p = self.client_hello
else:
# When trying to connect to a public TLS 1.3 server,
# you will most likely need to provide an SNI extension.
# sn = ServerName(servername="<put server name here>")
ext = [TLS_Ext_SupportedGroups(groups=["secp256r1"]),
# TLS_Ext_ServerName(servernames=[sn]),
TLS_Ext_KeyShare_CH(client_shares=[KeyShareEntry(group=23)]), # noqa: E501
TLS_Ext_SupportedVersions(versions=["TLS 1.3-d18"]),
TLS_Ext_SignatureAlgorithms(sig_algs=["sha256+rsapss",
"sha256+rsa"])]
p = TLSClientHello(ciphers=0x1301, ext=ext)
self.add_msg(p)
raise self.TLS13_ADDED_CLIENTHELLO()
@ATMT.state()
def TLS13_ADDED_CLIENTHELLO(self):
pass
@ATMT.condition(TLS13_ADDED_CLIENTHELLO)
def tls13_should_send_ClientHello(self):
self.flush_records()
raise self.TLS13_SENT_CLIENTHELLO()
@ATMT.state()
def TLS13_SENT_CLIENTHELLO(self):
raise self.TLS13_WAITING_SERVERHELLO()
@ATMT.state()
def TLS13_WAITING_SERVERHELLO(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_SERVERHELLO)
def tls13_should_handle_ServerHello(self):
self.raise_on_packet(TLS13ServerHello,
self.TLS13_WAITING_ENCRYPTEDEXTENSIONS)
@ATMT.state()
def TLS13_WAITING_ENCRYPTEDEXTENSIONS(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_ENCRYPTEDEXTENSIONS)
def tls13_should_handle_EncryptedExtensions(self):
self.raise_on_packet(TLSEncryptedExtensions,
self.TLS13_WAITING_CERTIFICATE)
@ATMT.state()
def TLS13_WAITING_CERTIFICATE(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=1)
def tls13_should_handle_Certificate(self):
self.raise_on_packet(TLS13Certificate,
self.TLS13_WAITING_CERTIFICATEVERIFY)
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=2)
def tls13_should_handle_CertificateRequest(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if TLSCertificateRequest in hs_msg:
self.vprint("TLSCertificateRequest already received!")
self.raise_on_packet(TLSCertificateRequest,
self.TLS13_WAITING_CERTIFICATE)
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=3)
def tls13_should_handle_ServerFinished_from_EncryptedExtensions(self):
self.raise_on_packet(TLSFinished,
self.TLS13_CONNECTED)
@ATMT.condition(TLS13_WAITING_CERTIFICATE, prio=4)
def tls13_missing_Certificate(self):
self.vprint("Missing TLS 1.3 message after EncryptedExtensions!")
raise self.FINAL()
@ATMT.state()
def TLS13_WAITING_CERTIFICATEVERIFY(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_CERTIFICATEVERIFY)
def tls13_should_handle_CertificateVerify(self):
self.raise_on_packet(TLSCertificateVerify,
self.TLS13_WAITING_SERVERFINISHED)
@ATMT.state()
def TLS13_WAITING_SERVERFINISHED(self):
self.get_next_msg()
@ATMT.condition(TLS13_WAITING_SERVERFINISHED)
def tls13_should_handle_ServerFinished_from_CertificateVerify(self):
self.raise_on_packet(TLSFinished,
self.TLS13_PREPARE_CLIENTFLIGHT2)
@ATMT.state()
def TLS13_PREPARE_CLIENTFLIGHT2(self):
self.add_record(is_tls13=True)
# raise self.FINAL()
@ATMT.condition(TLS13_PREPARE_CLIENTFLIGHT2)
def tls13_should_add_ClientFinished(self):
self.add_msg(TLSFinished())
raise self.TLS13_ADDED_CLIENTFINISHED()
@ATMT.state()
def TLS13_ADDED_CLIENTFINISHED(self):
pass
@ATMT.condition(TLS13_ADDED_CLIENTFINISHED)
def tls13_should_send_ClientFlight2(self):
self.flush_records()
raise self.TLS13_SENT_CLIENTFLIGHT2()
@ATMT.state()
def TLS13_SENT_CLIENTFLIGHT2(self):
raise self.HANDLED_SERVERFINISHED()
@ATMT.state(final=True)
def FINAL(self):
# We might call shutdown, but it may happen that the server
# did not wait for us to shutdown after answering our data query.
# self.socket.shutdown(1)
self.vprint("Closing client socket...")
self.socket.close()
self.vprint("Ending TLS client automaton.")
| true | true |
f73ef1ca33f42dc641ef0eed3b2bcc6482af570d | 1,206 | py | Python | app/scripts/GeopackageCADRG.py | GeoJamesJones/EZ-Admin | 0299f8bc826d3ce8320ca89c2f7ce501698864ea | [
"Apache-2.0"
] | null | null | null | app/scripts/GeopackageCADRG.py | GeoJamesJones/EZ-Admin | 0299f8bc826d3ce8320ca89c2f7ce501698864ea | [
"Apache-2.0"
] | 3 | 2020-03-24T17:28:41.000Z | 2021-02-02T22:09:16.000Z | app/scripts/GeopackageCADRG.py | GeoJamesJones/EZ-Admin | 0299f8bc826d3ce8320ca89c2f7ce501698864ea | [
"Apache-2.0"
] | null | null | null | import arcpy
import os
# Creates an OGC Geopackage from the CADRG Folder
cwd = arcpy.env.workspace = r"C:\services\data\cadrg"
workspaces = arcpy.ListWorkspaces("*")
gpkgs = []
try:
for workspace in workspaces:
gpkg_name = os.path.split(workspace)[1] + ".gpkg"
if arcpy.Exists(gpkg_name) == False:
arcpy.CreateSQLiteDatabase_management(gpkg_name, 'GEOPACKAGE_1.2')
print("Successfully created " + gpkg_name)
else:
print(gpkg_name + " already exists...")
gpkgs.append(gpkg_name)
toc_files = []
for root, dirname, filename in arcpy.da.Walk(workspace):
for file in filename:
if file == "A.TOC":
toc_files.append(os.path.join(root, file))
count = 0
for toc in toc_files:
arcpy.AddRasterToGeoPackage_conversion(toc, gpkg_name, os.path.split(workspace)[
1] + "_" + str(count), "TILED")
count += 1
print("Successfully added files to " + gpkg_name)
except Exception as e:
print("Error: " + str(e))
exit()
finally:
print("Completed Geopackaging CADRG")
| 28.046512 | 92 | 0.580431 | import arcpy
import os
cwd = arcpy.env.workspace = r"C:\services\data\cadrg"
workspaces = arcpy.ListWorkspaces("*")
gpkgs = []
try:
for workspace in workspaces:
gpkg_name = os.path.split(workspace)[1] + ".gpkg"
if arcpy.Exists(gpkg_name) == False:
arcpy.CreateSQLiteDatabase_management(gpkg_name, 'GEOPACKAGE_1.2')
print("Successfully created " + gpkg_name)
else:
print(gpkg_name + " already exists...")
gpkgs.append(gpkg_name)
toc_files = []
for root, dirname, filename in arcpy.da.Walk(workspace):
for file in filename:
if file == "A.TOC":
toc_files.append(os.path.join(root, file))
count = 0
for toc in toc_files:
arcpy.AddRasterToGeoPackage_conversion(toc, gpkg_name, os.path.split(workspace)[
1] + "_" + str(count), "TILED")
count += 1
print("Successfully added files to " + gpkg_name)
except Exception as e:
print("Error: " + str(e))
exit()
finally:
print("Completed Geopackaging CADRG")
| true | true |
f73ef23572dce6b683dc4340a9c67b607b580186 | 431 | py | Python | CPAC/nuisance/__init__.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | 1 | 2021-08-02T23:23:39.000Z | 2021-08-02T23:23:39.000Z | CPAC/nuisance/__init__.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | null | null | null | CPAC/nuisance/__init__.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | 2 | 2021-08-02T23:23:40.000Z | 2022-02-26T12:39:30.000Z | from utils import calc_compcor_components, \
erode_mask
from nuisance import create_nuisance, \
calc_residuals, \
bandpass_voxels, \
extract_tissue_data
__all__ = ['create_nuisance', \
'calc_residuals', \
'bandpass_voxels', \
'calc_compcor_components', \
'erode_mask', \
'extract_tissue_data'] | 30.785714 | 44 | 0.538283 | from utils import calc_compcor_components, \
erode_mask
from nuisance import create_nuisance, \
calc_residuals, \
bandpass_voxels, \
extract_tissue_data
__all__ = ['create_nuisance', \
'calc_residuals', \
'bandpass_voxels', \
'calc_compcor_components', \
'erode_mask', \
'extract_tissue_data'] | true | true |
f73ef3d18da8643d83606f0a870879efe1d93302 | 1,790 | py | Python | tests/sentry/models/test_grouprelease.py | noscripter/sentry | 1c5b1b53e740ffd2747afb7f0995e026be9468d0 | [
"BSD-3-Clause"
] | 1 | 2021-01-13T15:40:03.000Z | 2021-01-13T15:40:03.000Z | tests/sentry/models/test_grouprelease.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/models/test_grouprelease.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 1 | 2019-11-01T14:48:32.000Z | 2019-11-01T14:48:32.000Z | from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import Environment, GroupRelease, Release
from sentry.testutils import TestCase
class GetOrCreateTest(TestCase):
def test_simple(self):
project = self.create_project()
group = self.create_group(project=project)
release = Release.objects.create(version='abc', project=project)
env = Environment.objects.create(project_id=project.id, name='prod')
datetime = timezone.now()
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime,
)
assert grouprelease.project_id == project.id
assert grouprelease.group_id == group.id
assert grouprelease.release_id == release.id
assert grouprelease.environment == 'prod'
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime
datetime_new = timezone.now() + timedelta(days=1)
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime_new,
)
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime_new
datetime_new2 = datetime_new + timedelta(seconds=1)
# this should not update immediately as the window is too close
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime_new2,
)
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime_new
| 31.964286 | 76 | 0.658659 | from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import Environment, GroupRelease, Release
from sentry.testutils import TestCase
class GetOrCreateTest(TestCase):
def test_simple(self):
project = self.create_project()
group = self.create_group(project=project)
release = Release.objects.create(version='abc', project=project)
env = Environment.objects.create(project_id=project.id, name='prod')
datetime = timezone.now()
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime,
)
assert grouprelease.project_id == project.id
assert grouprelease.group_id == group.id
assert grouprelease.release_id == release.id
assert grouprelease.environment == 'prod'
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime
datetime_new = timezone.now() + timedelta(days=1)
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime_new,
)
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime_new
datetime_new2 = datetime_new + timedelta(seconds=1)
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime_new2,
)
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime_new
| true | true |
f73ef48d70537bc6eecfe62bab783fa41409b09d | 11,057 | py | Python | requirements/docutils-0.18/test/test_transforms/test_contents.py | QuentinTournier40/AnimationFreeCAD | 8eaff8356ec68b948a721b83a6888b652278db8a | [
"Apache-2.0"
] | null | null | null | requirements/docutils-0.18/test/test_transforms/test_contents.py | QuentinTournier40/AnimationFreeCAD | 8eaff8356ec68b948a721b83a6888b652278db8a | [
"Apache-2.0"
] | null | null | null | requirements/docutils-0.18/test/test_transforms/test_contents.py | QuentinTournier40/AnimationFreeCAD | 8eaff8356ec68b948a721b83a6888b652278db8a | [
"Apache-2.0"
] | 1 | 2022-02-03T08:03:30.000Z | 2022-02-03T08:03:30.000Z | #! /usr/bin/env python
# $Id: test_contents.py 8771 2021-06-18 18:55:08Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for `docutils.transforms.parts.Contents` (via
`docutils.transforms.universal.LastReaderPending`).
"""
from __future__ import absolute_import
if __name__ == '__main__':
import __init__
from test_transforms import DocutilsTestSupport
from docutils.transforms.references import Substitutions
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['tables_of_contents'] = ((Substitutions,), [
["""\
.. contents::
Title 1
=======
Paragraph 1.
Title_ 2
--------
Paragraph 2.
_`Title` 3
``````````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title
2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-3" refid="title-3">
Title
3
<list_item>
<paragraph>
<reference ids="toc-entry-4" refid="title-4">
Title 4
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title>
<reference name="Title" refname="title">
Title
2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title refid="toc-entry-3">
<target ids="title" names="title">
Title
3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\\ 4">
<title refid="toc-entry-4">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
.. contents:: Table of Contents
Title 1
=======
Paragraph 1.
Title 2
-------
Paragraph 2.
""",
"""\
<document source="test data">
<topic classes="contents" ids="table-of-contents" names="table\\ of\\ contents">
<title>
Table of Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title 2
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-2">
Title 2
<paragraph>
Paragraph 2.
"""],
["""\
.. contents:: There's an image in Title 2
Title 1
=======
Paragraph 1.
|Title 2|
=========
Paragraph 2.
.. |Title 2| image:: title2.png
""",
"""\
<document source="test data">
<topic classes="contents" ids="there-s-an-image-in-title-2" names="there's\\ an\\ image\\ in\\ title\\ 2">
<title>
There's an image in Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title 2
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-2">
<image alt="Title 2" uri="title2.png">
<paragraph>
Paragraph 2.
<substitution_definition names="Title\\ 2">
<image alt="Title 2" uri="title2.png">
"""], # emacs cruft: "
["""\
.. contents::
:depth: 2
Title 1
=======
Paragraph 1.
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title 2
<list_item>
<paragraph>
<reference ids="toc-entry-3" refid="title-4">
Title 4
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-2">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title>
Title 3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\\ 4">
<title refid="toc-entry-3">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
Title 1
=======
.. contents::
:local:
Paragraph 1.
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<section ids="title-1" names="title\\ 1">
<title>
Title 1
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-2">
Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-3">
Title 3
<list_item>
<paragraph>
<reference ids="toc-entry-3" refid="title-4">
Title 4
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-1">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title refid="toc-entry-2">
Title 3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\\ 4">
<title refid="toc-entry-3">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
.. contents::
:local:
Test duplicate name "Contents".
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="section">
Section
<paragraph>
Test duplicate name "Contents".
<section ids="section" names="section">
<title refid="toc-entry-1">
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
:backlinks: top
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="section">
Section
<section ids="section" names="section">
<title refid="contents">
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
:backlinks: none
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="section">
Section
<section ids="section" names="section">
<title>
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
Degenerate case, no table of contents generated.
""",
"""\
<document source="test data">
<paragraph>
Degenerate case, no table of contents generated.
"""],
["""\
Title 1
=======
Paragraph 1.
.. sidebar:: Contents
.. contents::
:local:
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
""",
"""\
<document source="test data">
<section ids="title-1" names="title\\ 1">
<title>
Title 1
<paragraph>
Paragraph 1.
<sidebar>
<title>
Contents
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-2">
Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-3">
Title 3
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-1">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title refid="toc-entry-2">
Title 3
<paragraph>
Paragraph 3.
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| 25.360092 | 110 | 0.450303 |
from __future__ import absolute_import
if __name__ == '__main__':
import __init__
from test_transforms import DocutilsTestSupport
from docutils.transforms.references import Substitutions
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['tables_of_contents'] = ((Substitutions,), [
["""\
.. contents::
Title 1
=======
Paragraph 1.
Title_ 2
--------
Paragraph 2.
_`Title` 3
``````````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title
2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-3" refid="title-3">
Title
3
<list_item>
<paragraph>
<reference ids="toc-entry-4" refid="title-4">
Title 4
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title>
<reference name="Title" refname="title">
Title
2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title refid="toc-entry-3">
<target ids="title" names="title">
Title
3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\\ 4">
<title refid="toc-entry-4">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
.. contents:: Table of Contents
Title 1
=======
Paragraph 1.
Title 2
-------
Paragraph 2.
""",
"""\
<document source="test data">
<topic classes="contents" ids="table-of-contents" names="table\\ of\\ contents">
<title>
Table of Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title 2
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-2">
Title 2
<paragraph>
Paragraph 2.
"""],
["""\
.. contents:: There's an image in Title 2
Title 1
=======
Paragraph 1.
|Title 2|
=========
Paragraph 2.
.. |Title 2| image:: title2.png
""",
"""\
<document source="test data">
<topic classes="contents" ids="there-s-an-image-in-title-2" names="there's\\ an\\ image\\ in\\ title\\ 2">
<title>
There's an image in Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title 2
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-2">
<image alt="Title 2" uri="title2.png">
<paragraph>
Paragraph 2.
<substitution_definition names="Title\\ 2">
<image alt="Title 2" uri="title2.png">
"""], # emacs cruft: "
["""\
.. contents::
:depth: 2
Title 1
=======
Paragraph 1.
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-1">
Title 1
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-2">
Title 2
<list_item>
<paragraph>
<reference ids="toc-entry-3" refid="title-4">
Title 4
<section ids="title-1" names="title\\ 1">
<title refid="toc-entry-1">
Title 1
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-2">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title>
Title 3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\\ 4">
<title refid="toc-entry-3">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
Title 1
=======
.. contents::
:local:
Paragraph 1.
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
Title 4
-------
Paragraph 4.
""",
"""\
<document source="test data">
<section ids="title-1" names="title\\ 1">
<title>
Title 1
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-2">
Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-3">
Title 3
<list_item>
<paragraph>
<reference ids="toc-entry-3" refid="title-4">
Title 4
<paragraph>
Paragraph 1.
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-1">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title refid="toc-entry-2">
Title 3
<paragraph>
Paragraph 3.
<section ids="title-4" names="title\\ 4">
<title refid="toc-entry-3">
Title 4
<paragraph>
Paragraph 4.
"""],
["""\
.. contents::
:local:
Test duplicate name "Contents".
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="section">
Section
<paragraph>
Test duplicate name "Contents".
<section ids="section" names="section">
<title refid="toc-entry-1">
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
:backlinks: top
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="section">
Section
<section ids="section" names="section">
<title refid="contents">
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
:backlinks: none
Section
--------
Paragraph.
""",
"""\
<document source="test data">
<topic classes="contents" ids="contents" names="contents">
<title>
Contents
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="section">
Section
<section ids="section" names="section">
<title>
Section
<paragraph>
Paragraph.
"""],
["""\
.. contents::
Degenerate case, no table of contents generated.
""",
"""\
<document source="test data">
<paragraph>
Degenerate case, no table of contents generated.
"""],
["""\
Title 1
=======
Paragraph 1.
.. sidebar:: Contents
.. contents::
:local:
Title 2
-------
Paragraph 2.
Title 3
```````
Paragraph 3.
""",
"""\
<document source="test data">
<section ids="title-1" names="title\\ 1">
<title>
Title 1
<paragraph>
Paragraph 1.
<sidebar>
<title>
Contents
<topic classes="contents local" ids="contents" names="contents">
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-1" refid="title-2">
Title 2
<bullet_list>
<list_item>
<paragraph>
<reference ids="toc-entry-2" refid="title-3">
Title 3
<section ids="title-2" names="title\\ 2">
<title refid="toc-entry-1">
Title 2
<paragraph>
Paragraph 2.
<section ids="title-3" names="title\\ 3">
<title refid="toc-entry-2">
Title 3
<paragraph>
Paragraph 3.
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| true | true |
f73ef4f88fc8819babe560f8e63665260364827d | 1,176 | py | Python | meal_hack_flask/models/product.py | davidoort/meal_hack | db37db30f8fc34092d6c65af94186609d1e2e5f2 | [
"MIT"
] | null | null | null | meal_hack_flask/models/product.py | davidoort/meal_hack | db37db30f8fc34092d6c65af94186609d1e2e5f2 | [
"MIT"
] | null | null | null | meal_hack_flask/models/product.py | davidoort/meal_hack | db37db30f8fc34092d6c65af94186609d1e2e5f2 | [
"MIT"
] | null | null | null | from mongoengine import (
Document,
DynamicDocument,
StringField,
FloatField,
DateField,
IntField,
EmbeddedDocument,
EmbeddedDocumentField,
ListField
)
import re
class Image(EmbeddedDocument):
original = StringField()
stack = StringField()
class Product(DynamicDocument):
migros_id = IntField()
name = StringField()
description = StringField()
image = EmbeddedDocumentField(Image)
allergen_text = StringField()
ingredient_id = IntField()
def productFromMigros(migrosSource, ingredient_id) -> Product:
stripped_description = ""
if "description" in migrosSource:
stripped_description = re.sub('<[^<]+>', '', migrosSource["description"]["text"])
return Product(
migros_id = migrosSource["id"],
name = migrosSource["name"],
description = stripped_description,
image = Image(
original= migrosSource["image"]["original"],
stack= migrosSource["image"]["stack"],
),
allergen_text = migrosSource.get("allergen_text", "No allergens"),
ingredient_id = ingredient_id
)
| 27.348837 | 89 | 0.629252 | from mongoengine import (
Document,
DynamicDocument,
StringField,
FloatField,
DateField,
IntField,
EmbeddedDocument,
EmbeddedDocumentField,
ListField
)
import re
class Image(EmbeddedDocument):
original = StringField()
stack = StringField()
class Product(DynamicDocument):
migros_id = IntField()
name = StringField()
description = StringField()
image = EmbeddedDocumentField(Image)
allergen_text = StringField()
ingredient_id = IntField()
def productFromMigros(migrosSource, ingredient_id) -> Product:
stripped_description = ""
if "description" in migrosSource:
stripped_description = re.sub('<[^<]+>', '', migrosSource["description"]["text"])
return Product(
migros_id = migrosSource["id"],
name = migrosSource["name"],
description = stripped_description,
image = Image(
original= migrosSource["image"]["original"],
stack= migrosSource["image"]["stack"],
),
allergen_text = migrosSource.get("allergen_text", "No allergens"),
ingredient_id = ingredient_id
)
| true | true |
f73ef504d81d130349b318dd0dea445b6221b659 | 7,444 | py | Python | mythril/laser/smt/bitvecfunc.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | null | null | null | mythril/laser/smt/bitvecfunc.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | null | null | null | mythril/laser/smt/bitvecfunc.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | 1 | 2021-09-06T03:14:58.000Z | 2021-09-06T03:14:58.000Z | from typing import Optional, Union, cast, Callable
import z3
from mythril.laser.smt.bitvec import BitVec, Bool, And, Annotations
from mythril.laser.smt.bool import Or
import operator
def _arithmetic_helper(
a: "BitVecFunc", b: Union[BitVec, int], operation: Callable
) -> "BitVecFunc":
"""
Helper function for arithmetic operations on BitVecFuncs.
:param a: The BitVecFunc to perform the operation on.
:param b: A BitVec or int to perform the operation on.
:param operation: The arithmetic operation to perform.
:return: The resulting BitVecFunc
"""
if isinstance(b, int):
b = BitVec(z3.BitVecVal(b, a.size()))
raw = operation(a.raw, b.raw)
union = a.annotations + b.annotations
if isinstance(b, BitVecFunc):
# TODO: Find better value to set input and name to in this case?
return BitVecFunc(raw=raw, func_name=None, input_=None, annotations=union)
return BitVecFunc(
raw=raw, func_name=a.func_name, input_=a.input_, annotations=union
)
def _comparison_helper(
a: "BitVecFunc",
b: Union[BitVec, int],
operation: Callable,
default_value: bool,
inputs_equal: bool,
) -> Bool:
"""
Helper function for comparison operations with BitVecFuncs.
:param a: The BitVecFunc to compare.
:param b: A BitVec or int to compare to.
:param operation: The comparison operation to perform.
:return: The resulting Bool
"""
# Is there some hack for gt/lt comparisons?
if isinstance(b, int):
b = BitVec(z3.BitVecVal(b, a.size()))
union = a.annotations + b.annotations
if not a.symbolic and not b.symbolic:
return Bool(z3.BoolVal(operation(a.value, b.value)), annotations=union)
if (
not isinstance(b, BitVecFunc)
or not a.func_name
or not a.input_
or not a.func_name == b.func_name
):
return Bool(z3.BoolVal(default_value), annotations=union)
return And(
Bool(cast(z3.BoolRef, operation(a.raw, b.raw)), annotations=union),
a.input_ == b.input_ if inputs_equal else a.input_ != b.input_,
)
class BitVecFunc(BitVec):
"""A bit vector function symbol. Used in place of functions like sha3."""
def __init__(
self,
raw: z3.BitVecRef,
func_name: Optional[str],
input_: Union[int, "BitVec"] = None,
annotations: Optional[Annotations] = None,
):
"""
:param raw: The raw bit vector symbol
:param func_name: The function name. e.g. sha3
:param input: The input to the functions
:param annotations: The annotations the BitVecFunc should start with
"""
self.func_name = func_name
self.input_ = input_
super().__init__(raw, annotations)
def __add__(self, other: Union[int, "BitVec"]) -> "BitVecFunc":
"""Create an addition expression.
:param other: The int or BitVec to add to this BitVecFunc
:return: The resulting BitVecFunc
"""
return _arithmetic_helper(self, other, operator.add)
def __sub__(self, other: Union[int, "BitVec"]) -> "BitVecFunc":
"""Create a subtraction expression.
:param other: The int or BitVec to subtract from this BitVecFunc
:return: The resulting BitVecFunc
"""
return _arithmetic_helper(self, other, operator.sub)
def __mul__(self, other: "BitVec") -> "BitVecFunc":
"""Create a multiplication expression.
:param other: The int or BitVec to multiply to this BitVecFunc
:return: The resulting BitVecFunc
"""
return _arithmetic_helper(self, other, operator.mul)
def __truediv__(self, other: "BitVec") -> "BitVecFunc":
"""Create a signed division expression.
:param other: The int or BitVec to divide this BitVecFunc by
:return: The resulting BitVecFunc
"""
return _arithmetic_helper(self, other, operator.truediv)
def __and__(self, other: Union[int, "BitVec"]) -> "BitVecFunc":
"""Create an and expression.
:param other: The int or BitVec to and with this BitVecFunc
:return: The resulting BitVecFunc
"""
return _arithmetic_helper(self, other, operator.and_)
def __or__(self, other: "BitVec") -> "BitVecFunc":
"""Create an or expression.
:param other: The int or BitVec to or with this BitVecFunc
:return: The resulting BitVecFunc
"""
return _arithmetic_helper(self, other, operator.or_)
def __xor__(self, other: "BitVec") -> "BitVecFunc":
"""Create a xor expression.
:param other: The int or BitVec to xor with this BitVecFunc
:return: The resulting BitVecFunc
"""
return _arithmetic_helper(self, other, operator.xor)
def __lt__(self, other: "BitVec") -> Bool:
"""Create a signed less than expression.
:param other: The int or BitVec to compare to this BitVecFunc
:return: The resulting Bool
"""
return _comparison_helper(
self, other, operator.lt, default_value=False, inputs_equal=False
)
def __gt__(self, other: "BitVec") -> Bool:
"""Create a signed greater than expression.
:param other: The int or BitVec to compare to this BitVecFunc
:return: The resulting Bool
"""
return _comparison_helper(
self, other, operator.gt, default_value=False, inputs_equal=False
)
def __le__(self, other: "BitVec") -> Bool:
"""Create a signed less than or equal to expression.
:param other: The int or BitVec to compare to this BitVecFunc
:return: The resulting Bool
"""
return Or(self < other, self == other)
def __ge__(self, other: "BitVec") -> Bool:
"""Create a signed greater than or equal to expression.
:param other: The int or BitVec to compare to this BitVecFunc
:return: The resulting Bool
"""
return Or(self > other, self == other)
# MYPY: fix complains about overriding __eq__
def __eq__(self, other: Union[int, "BitVec"]) -> Bool: # type: ignore
"""Create an equality expression.
:param other: The int or BitVec to compare to this BitVecFunc
:return: The resulting Bool
"""
return _comparison_helper(
self, other, operator.eq, default_value=False, inputs_equal=True
)
# MYPY: fix complains about overriding __ne__
def __ne__(self, other: Union[int, "BitVec"]) -> Bool: # type: ignore
"""Create an inequality expression.
:param other: The int or BitVec to compare to this BitVecFunc
:return: The resulting Bool
"""
return _comparison_helper(
self, other, operator.eq, default_value=True, inputs_equal=False
)
def __lshift__(self, other: Union[int, "BitVec"]) -> "BitVec":
"""
Left shift operation
:param other: The int or BitVec to shift on
:return The resulting left shifted output
"""
return _arithmetic_helper(self, other, operator.lshift)
def __rshift__(self, other: Union[int, "BitVec"]) -> "BitVec":
"""
Right shift operation
:param other: The int or BitVec to shift on
:return The resulting right shifted output:
"""
return _arithmetic_helper(self, other, operator.rshift)
| 32.938053 | 82 | 0.635277 | from typing import Optional, Union, cast, Callable
import z3
from mythril.laser.smt.bitvec import BitVec, Bool, And, Annotations
from mythril.laser.smt.bool import Or
import operator
def _arithmetic_helper(
a: "BitVecFunc", b: Union[BitVec, int], operation: Callable
) -> "BitVecFunc":
if isinstance(b, int):
b = BitVec(z3.BitVecVal(b, a.size()))
raw = operation(a.raw, b.raw)
union = a.annotations + b.annotations
if isinstance(b, BitVecFunc):
return BitVecFunc(raw=raw, func_name=None, input_=None, annotations=union)
return BitVecFunc(
raw=raw, func_name=a.func_name, input_=a.input_, annotations=union
)
def _comparison_helper(
a: "BitVecFunc",
b: Union[BitVec, int],
operation: Callable,
default_value: bool,
inputs_equal: bool,
) -> Bool:
if isinstance(b, int):
b = BitVec(z3.BitVecVal(b, a.size()))
union = a.annotations + b.annotations
if not a.symbolic and not b.symbolic:
return Bool(z3.BoolVal(operation(a.value, b.value)), annotations=union)
if (
not isinstance(b, BitVecFunc)
or not a.func_name
or not a.input_
or not a.func_name == b.func_name
):
return Bool(z3.BoolVal(default_value), annotations=union)
return And(
Bool(cast(z3.BoolRef, operation(a.raw, b.raw)), annotations=union),
a.input_ == b.input_ if inputs_equal else a.input_ != b.input_,
)
class BitVecFunc(BitVec):
def __init__(
self,
raw: z3.BitVecRef,
func_name: Optional[str],
input_: Union[int, "BitVec"] = None,
annotations: Optional[Annotations] = None,
):
self.func_name = func_name
self.input_ = input_
super().__init__(raw, annotations)
def __add__(self, other: Union[int, "BitVec"]) -> "BitVecFunc":
return _arithmetic_helper(self, other, operator.add)
def __sub__(self, other: Union[int, "BitVec"]) -> "BitVecFunc":
return _arithmetic_helper(self, other, operator.sub)
def __mul__(self, other: "BitVec") -> "BitVecFunc":
return _arithmetic_helper(self, other, operator.mul)
def __truediv__(self, other: "BitVec") -> "BitVecFunc":
return _arithmetic_helper(self, other, operator.truediv)
def __and__(self, other: Union[int, "BitVec"]) -> "BitVecFunc":
return _arithmetic_helper(self, other, operator.and_)
def __or__(self, other: "BitVec") -> "BitVecFunc":
return _arithmetic_helper(self, other, operator.or_)
def __xor__(self, other: "BitVec") -> "BitVecFunc":
return _arithmetic_helper(self, other, operator.xor)
def __lt__(self, other: "BitVec") -> Bool:
return _comparison_helper(
self, other, operator.lt, default_value=False, inputs_equal=False
)
def __gt__(self, other: "BitVec") -> Bool:
return _comparison_helper(
self, other, operator.gt, default_value=False, inputs_equal=False
)
def __le__(self, other: "BitVec") -> Bool:
return Or(self < other, self == other)
def __ge__(self, other: "BitVec") -> Bool:
return Or(self > other, self == other)
def __eq__(self, other: Union[int, "BitVec"]) -> Bool:
return _comparison_helper(
self, other, operator.eq, default_value=False, inputs_equal=True
)
def __ne__(self, other: Union[int, "BitVec"]) -> Bool:
return _comparison_helper(
self, other, operator.eq, default_value=True, inputs_equal=False
)
def __lshift__(self, other: Union[int, "BitVec"]) -> "BitVec":
return _arithmetic_helper(self, other, operator.lshift)
def __rshift__(self, other: Union[int, "BitVec"]) -> "BitVec":
return _arithmetic_helper(self, other, operator.rshift)
| true | true |
f73ef6524eb7f6a2908d2e47235afca6f0976eb2 | 3,902 | py | Python | Previsao_valor_aluguel/app.py | ViniViniAntunes/Prevendo_Valor_de_Aluguel_em_SP | e37d54da0b2c8ce3c6ddb4ec45191b069834427c | [
"MIT"
] | null | null | null | Previsao_valor_aluguel/app.py | ViniViniAntunes/Prevendo_Valor_de_Aluguel_em_SP | e37d54da0b2c8ce3c6ddb4ec45191b069834427c | [
"MIT"
] | null | null | null | Previsao_valor_aluguel/app.py | ViniViniAntunes/Prevendo_Valor_de_Aluguel_em_SP | e37d54da0b2c8ce3c6ddb4ec45191b069834427c | [
"MIT"
] | 1 | 2020-05-24T03:09:26.000Z | 2020-05-24T03:09:26.000Z | # Importando as bibliotecas necessárias
import pandas as pd
import streamlit as st
import plotly.express as px
from sklearn.ensemble import RandomForestRegressor
# Criando uma função para carregar o dataset
#@st.cache # Notação para ficar em cache
def get_data():
return pd.read_csv("model/data_deploy.csv")
# Criando uma função para treinar o nosso modelo
def train_model():
data = get_data()
X = data.drop(["valor", "bairro"], axis=1)
y = data["valor"]
rf_regressor = RandomForestRegressor(n_estimators=100)
rf_regressor.fit(X, y)
return rf_regressor
def get_villages_and_id():
data = get_data()
names_and_id = dict(zip(data['bairro'], data['bairro_id']))
return names_and_id
def return_id_village(village):
return get_villages_and_id()[village]
# Armazenando o dataframe na variável 'data'
data = get_data().drop("bairro_id", axis=1)
# Treinando o modelo
model = train_model()
# Configurando o título do Data App
st.title("Data App - Prevendo Valores de Imóveis")
# Configurando o subtítulo do data app
st.markdown("Este é um Data App utilizado para exibir a solução de Machine Learning que prevê valores de aluguel de apartamentos na capital de São Paulo.")
st.markdown('Criado por: Vini Antunes')
st.markdown('LinkedIn: https://www.linkedin.com/in/vini-antunes')
# Verificando o dataset
st.subheader("Selecionando apenas um pequeno conjunto de atributos")
# Selecionando atributos para serem exibidos por padrão
default_cols = ["quartos","bairro","valor"]
# Defindo atributos a partir do multiselect
cols = st.multiselect("Atributos", data.columns.tolist(), default=default_cols)
# Exibindo os top 10 registros do DataFrame
st.dataframe(data[cols].head(10))
# Configurando outro subtítulo
st.subheader("Distribuição de imóveis por preço do aluguel")
# Definindo a faixa de valores
faixa_valores = st.slider("Faixa de preço", float(data['valor'].min()), float(data['valor'].max()), (1000.0, 2000.0))
# Filtrando os dados
filtered_data = data[data['valor'].between(left=faixa_valores[0], right=faixa_valores[1])]
# Plotando a distribuição dos dados
f = px.histogram(filtered_data, x="valor", nbins=20, title="Distribuição de Preços do Aluguel")
f.update_xaxes(title="valor")
f.update_yaxes(title="Total Imóveis")
st.plotly_chart(f)
# Configurando subtítulo da lateral
st.sidebar.subheader("Defina os atributos do imóvel para predição")
####### Mapeando dados #######
# Armazena os nomes dos bairros e seus respectivos ids
villages = get_villages_and_id().keys()
# Selecionando o bairro
village = st.sidebar.selectbox("Em qual bairro?", sorted(list(villages)))
# Trocando o nome do bairro' pelo seus respectivo id
id_village = return_id_village(village)
# Selecionando a área do apartamento
area = st.sidebar.number_input("Área (em m²)?", min_value=float(data['area'].min()), max_value=float(data['area'].max()), step=1.0, format="%.0f")
# Selecionando a quantidade de quartos
rooms = st.sidebar.number_input("Quantos quartos?", min_value=float(data['quartos'].min()), max_value=float(data['quartos'].max()), step=1.0, format="%.0f")
# Selecionando a quantidade de suites
suites = st.sidebar.number_input("Quantas suítes?", min_value=float(data['suites'].min()), max_value=float(data['suites'].max()), step=1.0, format="%.0f")
# Selecionando a quantidade de vagas de garagem
parking_spaces = st.sidebar.number_input("Quantas vagas de garagem?", min_value=float(data['vagas'].min()), max_value=float(data['vagas'].max()), step=1.0, format="%.0f")
# inserindo um botão na tela
btn_predict = st.sidebar.button("Realizar Predição")
# verifica se o botão foi acionado
if btn_predict:
result = model.predict([[area, rooms, suites, parking_spaces, id_village]])
st.sidebar.subheader("O valor previsto para do aluguel é:")
st.sidebar.subheader("")
result = f"R$ {str(round(result[0], 2))}"
st.sidebar.subheader(result) | 37.519231 | 170 | 0.740902 |
import pandas as pd
import streamlit as st
import plotly.express as px
from sklearn.ensemble import RandomForestRegressor
d.read_csv("model/data_deploy.csv")
def train_model():
data = get_data()
X = data.drop(["valor", "bairro"], axis=1)
y = data["valor"]
rf_regressor = RandomForestRegressor(n_estimators=100)
rf_regressor.fit(X, y)
return rf_regressor
def get_villages_and_id():
data = get_data()
names_and_id = dict(zip(data['bairro'], data['bairro_id']))
return names_and_id
def return_id_village(village):
return get_villages_and_id()[village]
data = get_data().drop("bairro_id", axis=1)
model = train_model()
st.title("Data App - Prevendo Valores de Imóveis")
st.markdown("Este é um Data App utilizado para exibir a solução de Machine Learning que prevê valores de aluguel de apartamentos na capital de São Paulo.")
st.markdown('Criado por: Vini Antunes')
st.markdown('LinkedIn: https://www.linkedin.com/in/vini-antunes')
st.subheader("Selecionando apenas um pequeno conjunto de atributos")
default_cols = ["quartos","bairro","valor"]
cols = st.multiselect("Atributos", data.columns.tolist(), default=default_cols)
st.dataframe(data[cols].head(10))
st.subheader("Distribuição de imóveis por preço do aluguel")
faixa_valores = st.slider("Faixa de preço", float(data['valor'].min()), float(data['valor'].max()), (1000.0, 2000.0))
filtered_data = data[data['valor'].between(left=faixa_valores[0], right=faixa_valores[1])]
f = px.histogram(filtered_data, x="valor", nbins=20, title="Distribuição de Preços do Aluguel")
f.update_xaxes(title="valor")
f.update_yaxes(title="Total Imóveis")
st.plotly_chart(f)
st.sidebar.subheader("Defina os atributos do imóvel para predição")
partamento
area = st.sidebar.number_input("Área (em m²)?", min_value=float(data['area'].min()), max_value=float(data['area'].max()), step=1.0, format="%.0f")
# Selecionando a quantidade de quartos
rooms = st.sidebar.number_input("Quantos quartos?", min_value=float(data['quartos'].min()), max_value=float(data['quartos'].max()), step=1.0, format="%.0f")
# Selecionando a quantidade de suites
suites = st.sidebar.number_input("Quantas suítes?", min_value=float(data['suites'].min()), max_value=float(data['suites'].max()), step=1.0, format="%.0f")
# Selecionando a quantidade de vagas de garagem
parking_spaces = st.sidebar.number_input("Quantas vagas de garagem?", min_value=float(data['vagas'].min()), max_value=float(data['vagas'].max()), step=1.0, format="%.0f")
# inserindo um botão na tela
btn_predict = st.sidebar.button("Realizar Predição")
# verifica se o botão foi acionado
if btn_predict:
result = model.predict([[area, rooms, suites, parking_spaces, id_village]])
st.sidebar.subheader("O valor previsto para do aluguel é:")
st.sidebar.subheader("")
result = f"R$ {str(round(result[0], 2))}"
st.sidebar.subheader(result) | true | true |
f73ef660b4872a5b92efd54cfb0ffe1dd549454d | 50,627 | py | Python | tensorflow/python/keras/layers/core.py | LucasLorenc/tensorflow | 10a7b61cdf55d13c85c2a3cc5ca669e3d9ea8e11 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/layers/core.py | LucasLorenc/tensorflow | 10a7b61cdf55d13c85c2a3cc5ca669e3d9ea8e11 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/layers/core.py | LucasLorenc/tensorflow | 10a7b61cdf55d13c85c2a3cc5ca669e3d9ea8e11 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import types as python_types
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Masking')
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Masking(mask_value=0.,
input_shape=(timesteps, features)))
model.add(tf.keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
See [the masking and padding
guide](https://www.tensorflow.org/guide/keras/masking_and_padding)
for more details.
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
self._compute_output_and_mask_jointly = True
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)
# Compute the mask and outputs simultaneously.
outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Dropout')
class Dropout(Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = array_ops.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return ops.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
output = tf_utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
Call arguments:
inputs: A 3D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@keras_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Call arguments:
inputs: A 4D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@keras_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Call arguments:
inputs: A 5D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
@keras_export('keras.layers.Activation')
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: Activation function, such as `tf.nn.relu`, or string name of
built-in activation function, such as "relu".
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Reshape')
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: Target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, None, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: Shape of array being reshaped
output_shape: Desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises:
ValueError: If the total array size of the output_shape is
different than the input_shape, or more than one unknown dimension
is specified.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
# input shape (partially) unknown? replace -1's with None's
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.reshape(inputs,
(array_ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Permute')
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
'Invalid permutation `dims` for Permute Layer: %s. '
'The set of indices in `dims` must be consecutive and start from 1.' %
(dims,))
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Flatten')
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
If inputs are shaped `(batch,)` without a channel dimension, then flattening
adds an extra channel dimension and output shapes are `(batch, 1)`.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
def call(self, inputs):
if (self.data_format == 'channels_first'
and K.ndim(inputs) is not None and K.ndim(inputs) > 1):
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
input_shape = inputs.shape
if input_shape[1:].is_fully_defined():
flattened_dim = tensor_shape.dimension_value(
np.prod(input_shape[1:], dtype=int))
# Temporary fix for integer overflow issue.
if flattened_dim > np.iinfo(np.int32).max:
shape_dtype = dtypes.int64
else:
shape_dtype = dtypes.int32
outputs = array_ops.reshape(
inputs, constant_op.constant((-1, flattened_dim), dtype=shape_dtype))
else:
batch_size = tensor_shape.dimension_value(inputs.shape[0])
if batch_size:
# Temporary fix for integer overflow issue.
if batch_size > np.iinfo(np.int32).max:
shape_dtype = dtypes.int64
else:
shape_dtype = dtypes.int32
outputs = array_ops.reshape(
inputs, constant_op.constant((batch_size, -1), dtype=shape_dtype))
else:
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.as_shape(input_shape).as_list()
if not input_shape:
output_shape = tensor_shape.TensorShape([1])
else:
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:], dtype=int)]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: Integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Lambda')
class Lambda(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary TensorFlow functions
can be used when constructing `Sequential` and Functional API
models. `Lambda` layers are best suited for simple operations or
quick experimentation. For more advanced usecases, follow
[this guide](https://www.tensorflow.org/alpha/guide/keras/custom_layers_and_models)
for subclassing `tf.keras.layers.Layer`.
The main reason to subclass `tf.keras.layers.Layer` instead of using a
`Lambda` layer is saving and inspecting a Model. `Lambda` layers
are saved by serializing the Python bytecode, whereas subclassed
Layers can be saved via overriding their `get_config` method. Overriding
`get_config` improves the portability of Models. Models that rely on
subclassed Layers are also often easier to visualize and reason about.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Variables can be created within a `Lambda` layer. Like with
other layers, these variables will be created only once and reused
if the `Lambda` layer is called on new inputs. If creating more
than one variable in a given `Lambda` instance, be sure to use
a different name for each variable. Note that calling sublayers
from within a `Lambda` is not supported.
Example of variable creation:
```python
def linear_transform(x):
v1 = tf.Variable(1., name='multiplier')
v2 = tf.Variable(0., name='bias')
return x*v1 + v2
linear_layer = Lambda(linear_transform)
model.add(linear_layer)
model.add(keras.layers.Dense(10, activation='relu'))
model.add(linear_layer) # Reuses existing Variables
```
Note that creating two instances of `Lambda` using the same function
will *not* share Variables between the two instances. Each instance of
`Lambda` will create and manage its own weights.
Arguments:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument can be
inferred if not explicitly provided. Can be a tuple or function. If a
tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input: `output_shape =
(input_shape[0], ) + output_shape` or, the input is `None` and
the sample dimension is also `None`: `output_shape = (None, ) +
output_shape` If a function, it specifies the entire shape as a function
of the
input shape: `output_shape = f(input_shape)`
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor that will be
returned as output mask regardless what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
Input shape: Arbitrary. Use the keyword argument input_shape (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape: Specified by `output_shape` argument
"""
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
self._supports_ragged_inputs = True
self._output_shape = output_shape
self._variable_dict = {}
# These attributes are inherited from `Layer`.
self._trainable_weights = []
self._non_trainable_weights = []
function_args = tf_inspect.getfullargspec(self.function).args
self._fn_expects_training_arg = 'training' in function_args
self._fn_expects_mask_arg = 'mask' in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Make use of existing autocomputation but provide Lambda-specific
# error message. This is always safe to run even when the outer context
# is Graph mode because Lambda layers don't have side effects such as
# `add_loss`.
with context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
# Output shapes are passed directly and don't include batch dimension.
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tensor_shape.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
arguments = self.arguments
if self._fn_expects_mask_arg:
arguments['mask'] = mask
if self._fn_expects_training_arg:
arguments['training'] = training
with variable_scope.variable_creator_scope(self._variable_creator):
return self.function(inputs, **arguments)
def _variable_creator(self, next_creator, **kwargs):
name = kwargs['name']
if name in self._variable_dict:
return self._variable_dict[name]
var = next_creator(**kwargs)
self._variable_dict[name] = var
if var.trainable:
self._trainable_weights.append(var)
else:
self._non_trainable_weights.append(var)
K.track_variable(var)
return var
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(self._output_shape,
allow_raw=True)
config = {
'function': function_config[0],
'function_type': function_config[1],
'module': function_config[2],
'output_shape': output_shape_config[0],
'output_shape_type': output_shape_config[1],
'output_shape_module': output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update({
'mask': mask_config[0],
'mask_type': mask_config[1],
'mask_module': mask_config[2]
})
config['arguments'] = self.arguments
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = 'lambda'
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = 'function'
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = 'raw'
module = None
else:
raise ValueError(
'Invalid input for serialization, type: %s ' % type(inputs))
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(
config, custom_objects, 'function', 'module', 'function_type')
output_shape = cls._parse_function_from_config(
config, custom_objects, 'output_shape', 'output_shape_module',
'output_shape_type')
if 'mask' in config:
mask = cls._parse_function_from_config(
config, custom_objects, 'mask', 'mask_module', 'mask_type')
else:
mask = None
config['function'] = function
config['output_shape'] = output_shape
config['mask'] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
return cls(**config)
@classmethod
def _parse_function_from_config(
cls, config, custom_objects, func_attr_name, module_attr_name,
func_type_attr_name):
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
elif function_type == 'raw':
function = config[func_attr_name]
else:
raise TypeError('Unknown function type:', function_type)
return function
@keras_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: If the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units) if not isinstance(units, int) else units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
rank = len(inputs.shape)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
inputs = math_ops.cast(inputs, self._compute_dtype)
if K.is_sparse(inputs):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.DropConnectDense')
class DropConnectDense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: If the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
kernel_dropout: Float between 0 and 1.
Fraction of the weight units to drop.
unit_dropout: Float between 0 and 1.
Fraction of the inputs to drop.
use_mc_dropout: Bool when True layer always acts like in "train mode"
so dropout can be applied also in inference mode
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
kernel_dropout=0.,
unit_dropout=0.,
use_mc_dropout=False,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(DropConnectDense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units) if not isinstance(units, int) else units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.kernel_dropout = min(1., max(0., kernel_dropout))
self.unit_dropout = min(1., max(0., unit_dropout))
self.use_mc_dropout = use_mc_dropout
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
if self.use_mc_dropout:
training = True
#units dropout
def drop_inputs():
return K.dropout(inputs, self.unit_dropout)
if 0. < self.unit_dropout < 1.:
inputs = K.in_train_phase(drop_inputs, inputs, training=training)
#kernel dropout
ones = array_ops.ones_like(self.kernel)
def dropped_weight_connections():
return K.dropout(ones, self.kernel_dropout) * (1 - self.kernel_dropout)
if 0. < self.kernel_dropout < 1.:
kern_dp_mask = K.in_train_phase(dropped_weight_connections, ones, training=training)
else:
kern_dp_mask = ones
rank = len(inputs.shape)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel * kern_dp_mask, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
inputs = math_ops.cast(inputs, self._compute_dtype)
if K.is_sparse(inputs):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel * kern_dp_mask)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel * kern_dp_mask)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'kernel_dropout': self.kernel_dropout,
'unit_dropout': self.unit_dropout,
'use_mc_dropout': self.use_mc_dropout
}
base_config = super(DropConnectDense, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | 37.308032 | 102 | 0.691054 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import types as python_types
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Masking')
class Masking(Layer):
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
self._compute_output_and_mask_jointly = True
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)
outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Dropout')
class Dropout(Layer):
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return None
concrete_inputs_shape = array_ops.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return ops.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
output = tf_utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@keras_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@keras_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
@keras_export('keras.layers.Activation')
class Activation(Layer):
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Reshape')
class Reshape(Layer):
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.reshape(inputs,
(array_ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Permute')
class Permute(Layer):
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
'Invalid permutation `dims` for Permute Layer: %s. '
'The set of indices in `dims` must be consecutive and start from 1.' %
(dims,))
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Flatten')
class Flatten(Layer):
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
def call(self, inputs):
if (self.data_format == 'channels_first'
and K.ndim(inputs) is not None and K.ndim(inputs) > 1):
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
input_shape = inputs.shape
if input_shape[1:].is_fully_defined():
flattened_dim = tensor_shape.dimension_value(
np.prod(input_shape[1:], dtype=int))
if flattened_dim > np.iinfo(np.int32).max:
shape_dtype = dtypes.int64
else:
shape_dtype = dtypes.int32
outputs = array_ops.reshape(
inputs, constant_op.constant((-1, flattened_dim), dtype=shape_dtype))
else:
batch_size = tensor_shape.dimension_value(inputs.shape[0])
if batch_size:
if batch_size > np.iinfo(np.int32).max:
shape_dtype = dtypes.int64
else:
shape_dtype = dtypes.int32
outputs = array_ops.reshape(
inputs, constant_op.constant((batch_size, -1), dtype=shape_dtype))
else:
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.as_shape(input_shape).as_list()
if not input_shape:
output_shape = tensor_shape.TensorShape([1])
else:
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:], dtype=int)]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Lambda')
class Lambda(Layer):
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
self._supports_ragged_inputs = True
self._output_shape = output_shape
self._variable_dict = {}
self._trainable_weights = []
self._non_trainable_weights = []
function_args = tf_inspect.getfullargspec(self.function).args
self._fn_expects_training_arg = 'training' in function_args
self._fn_expects_mask_arg = 'mask' in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# `add_loss`.
with context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tensor_shape.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
arguments = self.arguments
if self._fn_expects_mask_arg:
arguments['mask'] = mask
if self._fn_expects_training_arg:
arguments['training'] = training
with variable_scope.variable_creator_scope(self._variable_creator):
return self.function(inputs, **arguments)
def _variable_creator(self, next_creator, **kwargs):
name = kwargs['name']
if name in self._variable_dict:
return self._variable_dict[name]
var = next_creator(**kwargs)
self._variable_dict[name] = var
if var.trainable:
self._trainable_weights.append(var)
else:
self._non_trainable_weights.append(var)
K.track_variable(var)
return var
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(self._output_shape,
allow_raw=True)
config = {
'function': function_config[0],
'function_type': function_config[1],
'module': function_config[2],
'output_shape': output_shape_config[0],
'output_shape_type': output_shape_config[1],
'output_shape_module': output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update({
'mask': mask_config[0],
'mask_type': mask_config[1],
'mask_module': mask_config[2]
})
config['arguments'] = self.arguments
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = 'lambda'
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = 'function'
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = 'raw'
module = None
else:
raise ValueError(
'Invalid input for serialization, type: %s ' % type(inputs))
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(
config, custom_objects, 'function', 'module', 'function_type')
output_shape = cls._parse_function_from_config(
config, custom_objects, 'output_shape', 'output_shape_module',
'output_shape_type')
if 'mask' in config:
mask = cls._parse_function_from_config(
config, custom_objects, 'mask', 'mask_module', 'mask_type')
else:
mask = None
config['function'] = function
config['output_shape'] = output_shape
config['mask'] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
return cls(**config)
@classmethod
def _parse_function_from_config(
cls, config, custom_objects, func_attr_name, module_attr_name,
func_type_attr_name):
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
elif function_type == 'raw':
function = config[func_attr_name]
else:
raise TypeError('Unknown function type:', function_type)
return function
@keras_export('keras.layers.Dense')
class Dense(Layer):
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units) if not isinstance(units, int) else units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
rank = len(inputs.shape)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
inputs = math_ops.cast(inputs, self._compute_dtype)
if K.is_sparse(inputs):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.DropConnectDense')
class DropConnectDense(Layer):
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
kernel_dropout=0.,
unit_dropout=0.,
use_mc_dropout=False,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(DropConnectDense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units) if not isinstance(units, int) else units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.kernel_dropout = min(1., max(0., kernel_dropout))
self.unit_dropout = min(1., max(0., unit_dropout))
self.use_mc_dropout = use_mc_dropout
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
if self.use_mc_dropout:
training = True
#units dropout
def drop_inputs():
return K.dropout(inputs, self.unit_dropout)
if 0. < self.unit_dropout < 1.:
inputs = K.in_train_phase(drop_inputs, inputs, training=training)
#kernel dropout
ones = array_ops.ones_like(self.kernel)
def dropped_weight_connections():
return K.dropout(ones, self.kernel_dropout) * (1 - self.kernel_dropout)
if 0. < self.kernel_dropout < 1.:
kern_dp_mask = K.in_train_phase(dropped_weight_connections, ones, training=training)
else:
kern_dp_mask = ones
rank = len(inputs.shape)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel * kern_dp_mask, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
inputs = math_ops.cast(inputs, self._compute_dtype)
if K.is_sparse(inputs):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel * kern_dp_mask)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel * kern_dp_mask)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'kernel_dropout': self.kernel_dropout,
'unit_dropout': self.unit_dropout,
'use_mc_dropout': self.use_mc_dropout
}
base_config = super(DropConnectDense, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | true | true |
f73ef6b63c8bf7a138e8dcaf8a35fc2798322aea | 7,145 | py | Python | youtube_dl/extractor/crackle.py | xiaobailong653/scripts | c68c988f59b8b9428163f687d0116429e9a0d479 | [
"Apache-2.0"
] | 2 | 2019-11-25T01:16:13.000Z | 2022-02-10T17:14:06.000Z | venv/lib/python3.7/site-packages/youtube_dl/extractor/crackle.py | edubaschool/college-de-france-dl | 1b67c489a3c6d427f2674394a28c9e37ab72a017 | [
"MIT"
] | 1 | 2019-01-24T09:33:42.000Z | 2019-01-24T09:33:42.000Z | venv/lib/python3.7/site-packages/youtube_dl/extractor/crackle.py | edubaschool/college-de-france-dl | 1b67c489a3c6d427f2674394a28c9e37ab72a017 | [
"MIT"
] | 1 | 2020-10-13T18:15:09.000Z | 2020-10-13T18:15:09.000Z | # coding: utf-8
from __future__ import unicode_literals, division
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_age_limit,
parse_duration,
url_or_none,
ExtractorError
)
class CrackleIE(InfoExtractor):
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?(?:sony)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
_TESTS = [{
# geo restricted to CA
'url': 'https://www.crackle.com/andromeda/2502343',
'info_dict': {
'id': '2502343',
'ext': 'mp4',
'title': 'Under The Night',
'description': 'md5:d2b8ca816579ae8a7bf28bfff8cefc8a',
'duration': 2583,
'view_count': int,
'average_rating': 0,
'age_limit': 14,
'genre': 'Action, Sci-Fi',
'creator': 'Allan Kroeker',
'artist': 'Keith Hamilton Cobb, Kevin Sorbo, Lisa Ryder, Lexa Doig, Robert Hewitt Wolfe',
'release_year': 2000,
'series': 'Andromeda',
'episode': 'Under The Night',
'season_number': 1,
'episode_number': 1,
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'https://www.sonycrackle.com/andromeda/2502343',
'only_matching': True,
}]
_MEDIA_FILE_SLOTS = {
'360p.mp4': {
'width': 640,
'height': 360,
},
'480p.mp4': {
'width': 768,
'height': 432,
},
'480p_1mbps.mp4': {
'width': 852,
'height': 480,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
country_code = self._downloader.params.get('geo_bypass_country', None)
countries = [country_code] if country_code else (
'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI')
last_e = None
for country in countries:
try:
media = self._download_json(
'https://web-api-us.crackle.com/Service.svc/details/media/%s/%s'
% (video_id, country), video_id,
'Downloading media JSON as %s' % country,
'Unable to download media JSON', query={
'disableProtocols': 'true',
'format': 'json'
})
except ExtractorError as e:
# 401 means geo restriction, trying next country
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
last_e = e
continue
raise
media_urls = media.get('MediaURLs')
if not media_urls or not isinstance(media_urls, list):
continue
title = media['Title']
formats = []
for e in media['MediaURLs']:
if e.get('UseDRM') is True:
continue
format_url = url_or_none(e.get('Path'))
if not format_url:
continue
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False))
elif format_url.endswith('.ism/Manifest'):
formats.extend(self._extract_ism_formats(
format_url, video_id, ism_id='mss', fatal=False))
else:
mfs_path = e.get('Type')
mfs_info = self._MEDIA_FILE_SLOTS.get(mfs_path)
if not mfs_info:
continue
formats.append({
'url': format_url,
'format_id': 'http-' + mfs_path.split('.')[0],
'width': mfs_info['width'],
'height': mfs_info['height'],
})
self._sort_formats(formats)
description = media.get('Description')
duration = int_or_none(media.get(
'DurationInSeconds')) or parse_duration(media.get('Duration'))
view_count = int_or_none(media.get('CountViews'))
average_rating = float_or_none(media.get('UserRating'))
age_limit = parse_age_limit(media.get('Rating'))
genre = media.get('Genre')
release_year = int_or_none(media.get('ReleaseYear'))
creator = media.get('Directors')
artist = media.get('Cast')
if media.get('MediaTypeDisplayValue') == 'Full Episode':
series = media.get('ShowName')
episode = title
season_number = int_or_none(media.get('Season'))
episode_number = int_or_none(media.get('Episode'))
else:
series = episode = season_number = episode_number = None
subtitles = {}
cc_files = media.get('ClosedCaptionFiles')
if isinstance(cc_files, list):
for cc_file in cc_files:
if not isinstance(cc_file, dict):
continue
cc_url = url_or_none(cc_file.get('Path'))
if not cc_url:
continue
lang = cc_file.get('Locale') or 'en'
subtitles.setdefault(lang, []).append({'url': cc_url})
thumbnails = []
images = media.get('Images')
if isinstance(images, list):
for image_key, image_url in images.items():
mobj = re.search(r'Img_(\d+)[xX](\d+)', image_key)
if not mobj:
continue
thumbnails.append({
'url': image_url,
'width': int(mobj.group(1)),
'height': int(mobj.group(2)),
})
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'age_limit': age_limit,
'genre': genre,
'creator': creator,
'artist': artist,
'release_year': release_year,
'series': series,
'episode': episode,
'season_number': season_number,
'episode_number': episode_number,
'thumbnails': thumbnails,
'subtitles': subtitles,
'formats': formats,
}
raise last_e
| 36.641026 | 120 | 0.478097 |
from __future__ import unicode_literals, division
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_age_limit,
parse_duration,
url_or_none,
ExtractorError
)
class CrackleIE(InfoExtractor):
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?(?:sony)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.crackle.com/andromeda/2502343',
'info_dict': {
'id': '2502343',
'ext': 'mp4',
'title': 'Under The Night',
'description': 'md5:d2b8ca816579ae8a7bf28bfff8cefc8a',
'duration': 2583,
'view_count': int,
'average_rating': 0,
'age_limit': 14,
'genre': 'Action, Sci-Fi',
'creator': 'Allan Kroeker',
'artist': 'Keith Hamilton Cobb, Kevin Sorbo, Lisa Ryder, Lexa Doig, Robert Hewitt Wolfe',
'release_year': 2000,
'series': 'Andromeda',
'episode': 'Under The Night',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
}
}, {
'url': 'https://www.sonycrackle.com/andromeda/2502343',
'only_matching': True,
}]
_MEDIA_FILE_SLOTS = {
'360p.mp4': {
'width': 640,
'height': 360,
},
'480p.mp4': {
'width': 768,
'height': 432,
},
'480p_1mbps.mp4': {
'width': 852,
'height': 480,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
country_code = self._downloader.params.get('geo_bypass_country', None)
countries = [country_code] if country_code else (
'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI')
last_e = None
for country in countries:
try:
media = self._download_json(
'https://web-api-us.crackle.com/Service.svc/details/media/%s/%s'
% (video_id, country), video_id,
'Downloading media JSON as %s' % country,
'Unable to download media JSON', query={
'disableProtocols': 'true',
'format': 'json'
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
last_e = e
continue
raise
media_urls = media.get('MediaURLs')
if not media_urls or not isinstance(media_urls, list):
continue
title = media['Title']
formats = []
for e in media['MediaURLs']:
if e.get('UseDRM') is True:
continue
format_url = url_or_none(e.get('Path'))
if not format_url:
continue
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False))
elif format_url.endswith('.ism/Manifest'):
formats.extend(self._extract_ism_formats(
format_url, video_id, ism_id='mss', fatal=False))
else:
mfs_path = e.get('Type')
mfs_info = self._MEDIA_FILE_SLOTS.get(mfs_path)
if not mfs_info:
continue
formats.append({
'url': format_url,
'format_id': 'http-' + mfs_path.split('.')[0],
'width': mfs_info['width'],
'height': mfs_info['height'],
})
self._sort_formats(formats)
description = media.get('Description')
duration = int_or_none(media.get(
'DurationInSeconds')) or parse_duration(media.get('Duration'))
view_count = int_or_none(media.get('CountViews'))
average_rating = float_or_none(media.get('UserRating'))
age_limit = parse_age_limit(media.get('Rating'))
genre = media.get('Genre')
release_year = int_or_none(media.get('ReleaseYear'))
creator = media.get('Directors')
artist = media.get('Cast')
if media.get('MediaTypeDisplayValue') == 'Full Episode':
series = media.get('ShowName')
episode = title
season_number = int_or_none(media.get('Season'))
episode_number = int_or_none(media.get('Episode'))
else:
series = episode = season_number = episode_number = None
subtitles = {}
cc_files = media.get('ClosedCaptionFiles')
if isinstance(cc_files, list):
for cc_file in cc_files:
if not isinstance(cc_file, dict):
continue
cc_url = url_or_none(cc_file.get('Path'))
if not cc_url:
continue
lang = cc_file.get('Locale') or 'en'
subtitles.setdefault(lang, []).append({'url': cc_url})
thumbnails = []
images = media.get('Images')
if isinstance(images, list):
for image_key, image_url in images.items():
mobj = re.search(r'Img_(\d+)[xX](\d+)', image_key)
if not mobj:
continue
thumbnails.append({
'url': image_url,
'width': int(mobj.group(1)),
'height': int(mobj.group(2)),
})
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'age_limit': age_limit,
'genre': genre,
'creator': creator,
'artist': artist,
'release_year': release_year,
'series': series,
'episode': episode,
'season_number': season_number,
'episode_number': episode_number,
'thumbnails': thumbnails,
'subtitles': subtitles,
'formats': formats,
}
raise last_e
| true | true |
f73ef6fa474f506787e08bba7e9f951f15ccd11b | 1,264 | py | Python | gg_custom/doc_events/payment_entry.py | libermatic/gg_custom | 3aa14165dc19cccedb651adb196f06882fc123e4 | [
"MIT"
] | 1 | 2021-07-07T12:43:15.000Z | 2021-07-07T12:43:15.000Z | gg_custom/doc_events/payment_entry.py | libermatic/gg_custom | 3aa14165dc19cccedb651adb196f06882fc123e4 | [
"MIT"
] | null | null | null | gg_custom/doc_events/payment_entry.py | libermatic/gg_custom | 3aa14165dc19cccedb651adb196f06882fc123e4 | [
"MIT"
] | 1 | 2021-07-07T12:43:15.000Z | 2021-07-07T12:43:15.000Z | from __future__ import unicode_literals
import frappe
from toolz.curried import compose, unique, map, filter
def on_submit(doc, method):
_update_booking_orders(
[x for x in doc.references if x.reference_doctype == "Sales Invoice"]
)
def on_cancel(doc, method):
_update_booking_orders(
[x for x in doc.references if x.reference_doctype == "Sales Invoice"]
)
def _update_booking_orders(references):
get_booking_orders = compose(
map(lambda x: frappe.get_doc("Booking Order", x)),
filter(None),
unique,
map(
lambda x: frappe.get_cached_value(
"Sales Invoice", x.reference_name, "gg_booking_order"
)
),
)
for bo in get_booking_orders(references):
invoices = frappe.get_all(
"Sales Invoice",
filters={"docstatus": 1, "gg_booking_order": bo.name},
fields=["total", "outstanding_amount"],
)
if sum([x.get("total") for x in invoices]) < bo.total_amount:
bo.payment_status = "Unbilled"
elif sum([x.get("outstanding_amount") for x in invoices]) == 0:
bo.payment_status = "Paid"
else:
bo.payment_status = "Unpaid"
bo.save()
| 29.395349 | 77 | 0.606804 | from __future__ import unicode_literals
import frappe
from toolz.curried import compose, unique, map, filter
def on_submit(doc, method):
_update_booking_orders(
[x for x in doc.references if x.reference_doctype == "Sales Invoice"]
)
def on_cancel(doc, method):
_update_booking_orders(
[x for x in doc.references if x.reference_doctype == "Sales Invoice"]
)
def _update_booking_orders(references):
get_booking_orders = compose(
map(lambda x: frappe.get_doc("Booking Order", x)),
filter(None),
unique,
map(
lambda x: frappe.get_cached_value(
"Sales Invoice", x.reference_name, "gg_booking_order"
)
),
)
for bo in get_booking_orders(references):
invoices = frappe.get_all(
"Sales Invoice",
filters={"docstatus": 1, "gg_booking_order": bo.name},
fields=["total", "outstanding_amount"],
)
if sum([x.get("total") for x in invoices]) < bo.total_amount:
bo.payment_status = "Unbilled"
elif sum([x.get("outstanding_amount") for x in invoices]) == 0:
bo.payment_status = "Paid"
else:
bo.payment_status = "Unpaid"
bo.save()
| true | true |
f73ef70cbe103a2be2e2f515a8652492733571ee | 29 | py | Python | src/python-import-from-url/tests/fixture/agent/bad_execution_code.py | ivangeorgiev/gems | 823076051695029b4d699744dc76c959a8476230 | [
"CC0-1.0"
] | 10 | 2020-11-12T23:45:31.000Z | 2022-03-25T07:29:42.000Z | src/python-import-from-url/tests/fixture/agent/bad_execution_code.py | ivangeorgiev/gems | 823076051695029b4d699744dc76c959a8476230 | [
"CC0-1.0"
] | null | null | null | src/python-import-from-url/tests/fixture/agent/bad_execution_code.py | ivangeorgiev/gems | 823076051695029b4d699744dc76c959a8476230 | [
"CC0-1.0"
] | 7 | 2020-12-15T20:40:00.000Z | 2022-03-18T01:41:48.000Z |
do_you_know_this_function()
| 9.666667 | 27 | 0.862069 |
do_you_know_this_function()
| true | true |
f73ef74024fcd0c7728702b93dde518f191d6d9a | 3,815 | py | Python | torch_audiomentations/augmentations/impulse_response.py | KentoNishi/torch-audiomentations | d7d3ae5787a354f720da7e9abfea8ba91900cab3 | [
"MIT"
] | 430 | 2020-06-22T16:30:11.000Z | 2022-03-29T10:14:56.000Z | torch_audiomentations/augmentations/impulse_response.py | keunwoochoi/torch-audiomentations | e8547601f3336fea478bce97a0fc6b1e06744a8c | [
"MIT"
] | 106 | 2020-09-23T10:21:21.000Z | 2022-03-31T13:59:27.000Z | torch_audiomentations/augmentations/impulse_response.py | keunwoochoi/torch-audiomentations | e8547601f3336fea478bce97a0fc6b1e06744a8c | [
"MIT"
] | 48 | 2020-09-23T12:34:07.000Z | 2022-03-28T09:53:56.000Z | import random
from pathlib import Path
from typing import Union, List
import torch
from torch.nn.utils.rnn import pad_sequence
from ..core.transforms_interface import BaseWaveformTransform, EmptyPathException
from ..utils.convolution import convolve
from ..utils.file import find_audio_files
from ..utils.io import Audio
class ApplyImpulseResponse(BaseWaveformTransform):
"""
Convolve the given audio with impulse responses.
"""
# Note: This transform has only partial support for multichannel audio. IRs that are not
# mono get mixed down to mono before they are convolved with all channels in the input.
supports_multichannel = True
requires_sample_rate = True
def __init__(
self,
ir_paths: Union[List[Path], List[str], Path, str],
convolve_mode: str = "full",
compensate_for_propagation_delay: bool = False,
mode: str = "per_example",
p: float = 0.5,
p_mode: str = None,
sample_rate: int = None,
):
"""
:param ir_paths: Either a path to a folder with audio files or a list of paths to audio files.
:param convolve_mode:
:param compensate_for_propagation_delay: Convolving audio with a RIR normally
introduces a bit of delay, especially when the peak absolute amplitude in the
RIR is not in the very beginning. When compensate_for_propagation_delay is
set to True, the returned slices of audio will be offset to compensate for
this delay.
:param mode:
:param p:
:param p_mode:
:param sample_rate:
"""
super().__init__(mode, p, p_mode, sample_rate)
if isinstance(ir_paths, (list, tuple, set)):
# TODO: check that one can read audio files
self.ir_paths = list(ir_paths)
else:
self.ir_paths = find_audio_files(ir_paths)
if sample_rate is not None:
self.audio = Audio(sample_rate=sample_rate, mono=True)
if len(self.ir_paths) == 0:
raise EmptyPathException("There are no supported audio files found.")
self.convolve_mode = convolve_mode
self.compensate_for_propagation_delay = compensate_for_propagation_delay
def randomize_parameters(self, selected_samples, sample_rate: int = None):
batch_size, _, _ = selected_samples.shape
audio = self.audio if hasattr(self, "audio") else Audio(sample_rate, mono=True)
self.transform_parameters["ir"] = pad_sequence(
[
audio(ir_path).transpose(0, 1)
for ir_path in random.choices(self.ir_paths, k=batch_size)
],
batch_first=True,
padding_value=0.0,
).transpose(1, 2)
def apply_transform(self, selected_samples, sample_rate: int = None):
batch_size, num_channels, num_samples = selected_samples.shape
# (batch_size, 1, max_ir_length)
ir = self.transform_parameters["ir"].to(selected_samples.device)
convolved_samples = convolve(
selected_samples, ir.expand(-1, num_channels, -1), mode=self.convolve_mode
)
if self.compensate_for_propagation_delay:
propagation_delays = ir.abs().argmax(dim=2, keepdim=False)[:, 0]
convolved_samples = torch.stack(
[
convolved_sample[
:, propagation_delay : propagation_delay + num_samples
]
for convolved_sample, propagation_delay in zip(
convolved_samples, propagation_delays
)
],
dim=0,
)
return convolved_samples
else:
return convolved_samples[..., :num_samples]
| 35 | 102 | 0.62962 | import random
from pathlib import Path
from typing import Union, List
import torch
from torch.nn.utils.rnn import pad_sequence
from ..core.transforms_interface import BaseWaveformTransform, EmptyPathException
from ..utils.convolution import convolve
from ..utils.file import find_audio_files
from ..utils.io import Audio
class ApplyImpulseResponse(BaseWaveformTransform):
supports_multichannel = True
requires_sample_rate = True
def __init__(
self,
ir_paths: Union[List[Path], List[str], Path, str],
convolve_mode: str = "full",
compensate_for_propagation_delay: bool = False,
mode: str = "per_example",
p: float = 0.5,
p_mode: str = None,
sample_rate: int = None,
):
super().__init__(mode, p, p_mode, sample_rate)
if isinstance(ir_paths, (list, tuple, set)):
self.ir_paths = list(ir_paths)
else:
self.ir_paths = find_audio_files(ir_paths)
if sample_rate is not None:
self.audio = Audio(sample_rate=sample_rate, mono=True)
if len(self.ir_paths) == 0:
raise EmptyPathException("There are no supported audio files found.")
self.convolve_mode = convolve_mode
self.compensate_for_propagation_delay = compensate_for_propagation_delay
def randomize_parameters(self, selected_samples, sample_rate: int = None):
batch_size, _, _ = selected_samples.shape
audio = self.audio if hasattr(self, "audio") else Audio(sample_rate, mono=True)
self.transform_parameters["ir"] = pad_sequence(
[
audio(ir_path).transpose(0, 1)
for ir_path in random.choices(self.ir_paths, k=batch_size)
],
batch_first=True,
padding_value=0.0,
).transpose(1, 2)
def apply_transform(self, selected_samples, sample_rate: int = None):
batch_size, num_channels, num_samples = selected_samples.shape
ir = self.transform_parameters["ir"].to(selected_samples.device)
convolved_samples = convolve(
selected_samples, ir.expand(-1, num_channels, -1), mode=self.convolve_mode
)
if self.compensate_for_propagation_delay:
propagation_delays = ir.abs().argmax(dim=2, keepdim=False)[:, 0]
convolved_samples = torch.stack(
[
convolved_sample[
:, propagation_delay : propagation_delay + num_samples
]
for convolved_sample, propagation_delay in zip(
convolved_samples, propagation_delays
)
],
dim=0,
)
return convolved_samples
else:
return convolved_samples[..., :num_samples]
| true | true |
f73ef7df12e6fe398210177da34bb6af7697172c | 117 | py | Python | python/demo_queries/publication_details/taxa_by_authority.py | jocelynpender/fna-query | e538563f63eaea7b4bc84b7446e7ed7b53001774 | [
"MIT"
] | null | null | null | python/demo_queries/publication_details/taxa_by_authority.py | jocelynpender/fna-query | e538563f63eaea7b4bc84b7446e7ed7b53001774 | [
"MIT"
] | 6 | 2020-01-30T16:52:47.000Z | 2021-06-02T00:59:48.000Z | python/demo_queries/publication_details/taxa_by_authority.py | jocelynpender/fna-query | e538563f63eaea7b4bc84b7446e7ed7b53001774 | [
"MIT"
] | null | null | null | from src.query import *
if __name__ == '__main__':
ask_query("[[Authority::Linnaeus]]", "taxa_by_linnaeus.csv")
| 23.4 | 64 | 0.692308 | from src.query import *
if __name__ == '__main__':
ask_query("[[Authority::Linnaeus]]", "taxa_by_linnaeus.csv")
| true | true |
f73ef8407e5a79d4fe07a150e0bf9f61ccbcabb9 | 2,925 | py | Python | meeting/hooks.py | Zlash65/meeting | 20f54ceb184b66d7814dd0e1fa686e6627d0a408 | [
"MIT"
] | null | null | null | meeting/hooks.py | Zlash65/meeting | 20f54ceb184b66d7814dd0e1fa686e6627d0a408 | [
"MIT"
] | null | null | null | meeting/hooks.py | Zlash65/meeting | 20f54ceb184b66d7814dd0e1fa686e6627d0a408 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "meeting"
app_title = "Meeting"
app_publisher = "Zlash65"
app_description = "Set up Meetings and stuff"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "zarrar65@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/meeting/css/meeting.css"
# app_include_js = "/assets/meeting/js/meeting.js"
# include js, css files in header of web template
# web_include_css = "/assets/meeting/css/meeting.css"
# web_include_js = "/assets/meeting/js/meeting.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "meeting.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "meeting.install.before_install"
# after_install = "meeting.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "meeting.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
doc_events = {
"User": {
"after_insert": "meeting.api.make_orientation_meeting"
},
"ToDo": {
"on_update": "meeting.api.update_minute_status"
}
}
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "meeting.tasks.all"
# ],
# "daily": [
# "meeting.tasks.daily"
# ],
# "hourly": [
# "meeting.tasks.hourly"
# ],
# "weekly": [
# "meeting.tasks.weekly"
# ]
# "monthly": [
# "meeting.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "meeting.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "meeting.event.get_events"
# }
| 22.159091 | 78 | 0.664274 |
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "meeting"
app_title = "Meeting"
app_publisher = "Zlash65"
app_description = "Set up Meetings and stuff"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "zarrar65@gmail.com"
app_license = "MIT"
doc_events = {
"User": {
"after_insert": "meeting.api.make_orientation_meeting"
},
"ToDo": {
"on_update": "meeting.api.update_minute_status"
}
}
| true | true |
f73ef9358094273864da2ab9a75a2dcad7f01d96 | 10,289 | py | Python | mcmaps/mc/biomes/zoom.py | LanetheGreat/mcmaps | 823c4d267f8838140723cb3802c8e846e8060403 | [
"Apache-2.0"
] | null | null | null | mcmaps/mc/biomes/zoom.py | LanetheGreat/mcmaps | 823c4d267f8838140723cb3802c8e846e8060403 | [
"Apache-2.0"
] | null | null | null | mcmaps/mc/biomes/zoom.py | LanetheGreat/mcmaps | 823c4d267f8838140723cb3802c8e846e8060403 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Lane Shaw
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Layers used for zooming during biome generation. '''
from abc import abstractmethod
from ._abc import BaseLayer
from mcmaps.mc.constants import BIOME_ID
__all__ = [
'FuzzyZoomLayer', 'VoronoiZoomLayer', 'ZoomLayer',
]
class _BaseZoomLayer(BaseLayer):
def get_area(self, x_pos, z_pos, x_width, z_depth):
child_x_pos = x_pos >> 1
child_z_pos = z_pos >> 1
child_x_width = (x_width >> 1) + 3
child_z_depth = (z_depth >> 1) + 3
zoom_z_depth = child_z_depth << 1
biome_values = [[BIOME_ID.OCEAN] * z_depth for _ in range(x_width)]
zoom_values = [[BIOME_ID.OCEAN] * zoom_z_depth for _ in range(child_x_width << 1)]
child_values = self.child_layer.get_area(
child_x_pos, child_z_pos, child_x_width, child_z_depth,
)
for z in range(child_z_depth - 1):
# T=Top, B=Bottom
# L=Left, R=Right
#
# [TL | TR]
# [---+---]
# [BL | BR]
# Take each row from the child and place it every second row in the zoomed values.
z2 = z << 1
# Values are accumulated left to right along the X axis.
top_accl = child_values[0][z + 0] # Child TL
bot_accl = child_values[0][z + 1] # Child BL
for x in range(child_x_width - 1):
self.init_chunk_seed(child_x_pos + x << 1, child_z_pos + z << 1)
top_next = child_values[x + 1][z + 0] # Child TR
bot_next = child_values[x + 1][z + 1] # Child BR
# Take each column from the child and place it every second column in the zoomed values.
x2 = x << 1
zoom_values[x2 + 0][z2 + 0] = top_accl # Zoom TL
zoom_values[x2 + 0][z2 + 1] = self.choose(top_accl, bot_accl) # Zoom BL
zoom_values[x2 + 1][z2 + 0] = self.choose(top_accl, top_next) # Zoom TR
zoom_values[x2 + 1][z2 + 1] = self.diagonal_func( # Zoom BR
top_accl, top_next, bot_accl, bot_next,
)
top_accl = top_next
bot_accl = bot_next
# Extract the inner square, a subset of the zoomed values.
x_offset = x_pos & 1
z_offset = z_pos & 1
for z in range(z_depth):
for x in range(x_width):
biome_values[x][z] = zoom_values[x + x_offset][z + z_offset]
return biome_values
def choose(self, *args):
return args[self.nextInt(len(args))]
@abstractmethod
def diagonal_func(self, value_tl, value_tr, value_bl, value_br):
''' Calculates the result of using all 4 corner values for BR's values. '''
class FuzzyZoomLayer(_BaseZoomLayer):
diagonal_func = _BaseZoomLayer.choose
def get_area(self, x_pos, z_pos, x_width, z_depth):
biome_values = super().get_area(x_pos, z_pos, x_width, z_depth)
if self._debug:
self._output_debug_data('FuzzyZoom', x_pos, z_pos, x_width, z_depth, biome_values)
return biome_values
class ZoomLayer(_BaseZoomLayer):
@classmethod
def zoom(cls, layer_seed, child, zoom_count, _debug=None):
layer = child
for zoom in range(zoom_count):
layer = cls(layer_seed + zoom, layer, _debug=_debug)
return layer
def get_area(self, x_pos, z_pos, x_width, z_depth):
biome_values = super().get_area(x_pos, z_pos, x_width, z_depth)
if self._debug:
self._output_debug_data('Zoom', x_pos, z_pos, x_width, z_depth, biome_values)
return biome_values
def diagonal_func(self, value_tl, value_tr, value_bl, value_br):
if value_tr == value_bl and value_bl == value_br:
return value_tr
if value_tl == value_tr and value_tl == value_bl:
return value_tl
if value_tl == value_tr and value_tl == value_br:
return value_tl
if value_tl == value_bl and value_tl == value_br:
return value_tl
if value_tl == value_tr and value_bl != value_br:
return value_tl
if value_tl == value_bl and value_tr != value_br:
return value_tl
if value_tl == value_br and value_tr != value_bl:
return value_tl
if value_tr == value_tl and value_bl != value_br:
return value_tr
if value_tr == value_bl and value_tl != value_br:
return value_tr
if value_tr == value_br and value_tl != value_bl:
return value_tr
if value_bl == value_tl and value_tr != value_br:
return value_bl
if value_bl == value_tr and value_tl != value_br:
return value_bl
if value_bl == value_br and value_tl != value_tr:
return value_bl
if value_br == value_tl and value_tr != value_bl:
return value_bl
if value_br == value_tr and value_tl != value_bl:
return value_bl
if value_br == value_bl and value_tl != value_tr:
return value_bl
return self.choose(value_tl, value_tr, value_bl, value_br)
class VoronoiZoomLayer(BaseLayer):
def get_area(self, x_pos, z_pos, x_width, z_depth):
x_pos -= 2
z_pos -= 2
child_x_pos = x_pos >> 2
child_z_pos = z_pos >> 2
child_x_width = (x_width >> 2) + 3
child_z_depth = (z_depth >> 2) + 3
zoom_z_depth = child_z_depth << 2
biome_values = [[BIOME_ID.OCEAN] * z_depth for _ in range(x_width)]
zoom_values = [[BIOME_ID.OCEAN] * zoom_z_depth for _ in range(child_x_width << 2)]
child_values = self.child_layer.get_area(
child_x_pos, child_z_pos, child_x_width, child_z_depth,
)
for z in range(child_z_depth - 1):
# T=Top, B=Bottom
# L=Left, R=Right
#
# [TL | TR]
# [---+---]
# [BL | BR]
# Take each pixel in a row from the child and stretch it across a 4x4 cell.
z2 = z << 2
# Values are accumulated left to right along the X axis.
top_accl = child_values[0][z + 0] # Child TL
bot_accl = child_values[0][z + 1] # Child BL
for x in range(child_x_width - 1):
# Random TL Corner X/Z
self.init_chunk_seed(child_x_pos + x + 0 << 2, child_z_pos + z + 0 << 2)
corner_tl_x = self.nextDouble(1024) * 3.6
corner_tl_z = self.nextDouble(1024) * 3.6
# Random TR Corner X/Z
self.init_chunk_seed(child_x_pos + x + 1 << 2, child_z_pos + z + 0 << 2)
corner_tr_x = self.nextDouble(1024) * 3.6 + 4.0
corner_tr_z = self.nextDouble(1024) * 3.6
# Random BL Corner X/Z
self.init_chunk_seed(child_x_pos + x + 0 << 2, child_z_pos + z + 1 << 2)
corner_bl_x = self.nextDouble(1024) * 3.6
corner_bl_z = self.nextDouble(1024) * 3.6 + 4.0
# Random BR Corner X/Z
self.init_chunk_seed(child_x_pos + x + 1 << 2, child_z_pos + z + 1 << 2)
corner_br_x = self.nextDouble(1024) * 3.6 + 4.0
corner_br_z = self.nextDouble(1024) * 3.6 + 4.0
top_next = child_values[x + 1][z + 0] # Child TR
bot_next = child_values[x + 1][z + 1] # Child BR
# Take each pixel in a column from the child and stretch it across a 4x4 cell.
x2 = x << 2
for cell_z in range(4):
for cell_x in range(4):
# Calculate pseudo-distances from each generated corner.
dist_tl = (cell_z-corner_tl_z) * (cell_z-corner_tl_z) + (cell_x-corner_tl_x) * (cell_x-corner_tl_x)
dist_tr = (cell_z-corner_tr_z) * (cell_z-corner_tr_z) + (cell_x-corner_tr_x) * (cell_x-corner_tr_x)
dist_bl = (cell_z-corner_bl_z) * (cell_z-corner_bl_z) + (cell_x-corner_bl_x) * (cell_x-corner_bl_x)
dist_br = (cell_z-corner_br_z) * (cell_z-corner_br_z) + (cell_x-corner_br_x) * (cell_x-corner_br_x)
if all(dist_tl < dist for dist in (dist_tr, dist_bl, dist_br)):
# Use the TL corner if it's closest.
zoom_values[x2 + cell_x][z2 + cell_z] = top_accl
elif all(dist_tr < dist for dist in (dist_tl, dist_bl, dist_br)):
# Use the TR corner if it's closest.
zoom_values[x2 + cell_x][z2 + cell_z] = top_next
elif all(dist_bl < dist for dist in (dist_tl, dist_tr, dist_br)):
# Use the BL corner if it's closest.
zoom_values[x2 + cell_x][z2 + cell_z] = bot_accl
else:
# Use the BR corner if all others fail.
zoom_values[x2 + cell_x][z2 + cell_z] = bot_next
top_accl = top_next
bot_accl = bot_next
# Extract the inner square, a subset of the zoomed values.
x_offset = x_pos & 3
z_offset = z_pos & 3
for z in range(z_depth):
for x in range(x_width):
biome_values[x][z] = zoom_values[x + x_offset][z + z_offset]
if self._debug:
self._output_debug_data('VoronoiZoom', x_pos, z_pos, x_width, z_depth, biome_values)
return biome_values
def nextDouble(self, precision):
return self.nextInt(precision) / precision - 0.5
| 39.725869 | 123 | 0.570318 |
hod
from ._abc import BaseLayer
from mcmaps.mc.constants import BIOME_ID
__all__ = [
'FuzzyZoomLayer', 'VoronoiZoomLayer', 'ZoomLayer',
]
class _BaseZoomLayer(BaseLayer):
def get_area(self, x_pos, z_pos, x_width, z_depth):
child_x_pos = x_pos >> 1
child_z_pos = z_pos >> 1
child_x_width = (x_width >> 1) + 3
child_z_depth = (z_depth >> 1) + 3
zoom_z_depth = child_z_depth << 1
biome_values = [[BIOME_ID.OCEAN] * z_depth for _ in range(x_width)]
zoom_values = [[BIOME_ID.OCEAN] * zoom_z_depth for _ in range(child_x_width << 1)]
child_values = self.child_layer.get_area(
child_x_pos, child_z_pos, child_x_width, child_z_depth,
)
for z in range(child_z_depth - 1):
z2 = z << 1
top_accl = child_values[0][z + 0]
bot_accl = child_values[0][z + 1]
for x in range(child_x_width - 1):
self.init_chunk_seed(child_x_pos + x << 1, child_z_pos + z << 1)
top_next = child_values[x + 1][z + 0]
bot_next = child_values[x + 1][z + 1]
x2 = x << 1
zoom_values[x2 + 0][z2 + 0] = top_accl
zoom_values[x2 + 0][z2 + 1] = self.choose(top_accl, bot_accl)
zoom_values[x2 + 1][z2 + 0] = self.choose(top_accl, top_next)
zoom_values[x2 + 1][z2 + 1] = self.diagonal_func(
top_accl, top_next, bot_accl, bot_next,
)
top_accl = top_next
bot_accl = bot_next
x_offset = x_pos & 1
z_offset = z_pos & 1
for z in range(z_depth):
for x in range(x_width):
biome_values[x][z] = zoom_values[x + x_offset][z + z_offset]
return biome_values
def choose(self, *args):
return args[self.nextInt(len(args))]
@abstractmethod
def diagonal_func(self, value_tl, value_tr, value_bl, value_br):
class FuzzyZoomLayer(_BaseZoomLayer):
diagonal_func = _BaseZoomLayer.choose
def get_area(self, x_pos, z_pos, x_width, z_depth):
biome_values = super().get_area(x_pos, z_pos, x_width, z_depth)
if self._debug:
self._output_debug_data('FuzzyZoom', x_pos, z_pos, x_width, z_depth, biome_values)
return biome_values
class ZoomLayer(_BaseZoomLayer):
@classmethod
def zoom(cls, layer_seed, child, zoom_count, _debug=None):
layer = child
for zoom in range(zoom_count):
layer = cls(layer_seed + zoom, layer, _debug=_debug)
return layer
def get_area(self, x_pos, z_pos, x_width, z_depth):
biome_values = super().get_area(x_pos, z_pos, x_width, z_depth)
if self._debug:
self._output_debug_data('Zoom', x_pos, z_pos, x_width, z_depth, biome_values)
return biome_values
def diagonal_func(self, value_tl, value_tr, value_bl, value_br):
if value_tr == value_bl and value_bl == value_br:
return value_tr
if value_tl == value_tr and value_tl == value_bl:
return value_tl
if value_tl == value_tr and value_tl == value_br:
return value_tl
if value_tl == value_bl and value_tl == value_br:
return value_tl
if value_tl == value_tr and value_bl != value_br:
return value_tl
if value_tl == value_bl and value_tr != value_br:
return value_tl
if value_tl == value_br and value_tr != value_bl:
return value_tl
if value_tr == value_tl and value_bl != value_br:
return value_tr
if value_tr == value_bl and value_tl != value_br:
return value_tr
if value_tr == value_br and value_tl != value_bl:
return value_tr
if value_bl == value_tl and value_tr != value_br:
return value_bl
if value_bl == value_tr and value_tl != value_br:
return value_bl
if value_bl == value_br and value_tl != value_tr:
return value_bl
if value_br == value_tl and value_tr != value_bl:
return value_bl
if value_br == value_tr and value_tl != value_bl:
return value_bl
if value_br == value_bl and value_tl != value_tr:
return value_bl
return self.choose(value_tl, value_tr, value_bl, value_br)
class VoronoiZoomLayer(BaseLayer):
def get_area(self, x_pos, z_pos, x_width, z_depth):
x_pos -= 2
z_pos -= 2
child_x_pos = x_pos >> 2
child_z_pos = z_pos >> 2
child_x_width = (x_width >> 2) + 3
child_z_depth = (z_depth >> 2) + 3
zoom_z_depth = child_z_depth << 2
biome_values = [[BIOME_ID.OCEAN] * z_depth for _ in range(x_width)]
zoom_values = [[BIOME_ID.OCEAN] * zoom_z_depth for _ in range(child_x_width << 2)]
child_values = self.child_layer.get_area(
child_x_pos, child_z_pos, child_x_width, child_z_depth,
)
for z in range(child_z_depth - 1):
z2 = z << 2
top_accl = child_values[0][z + 0]
bot_accl = child_values[0][z + 1]
for x in range(child_x_width - 1):
self.init_chunk_seed(child_x_pos + x + 0 << 2, child_z_pos + z + 0 << 2)
corner_tl_x = self.nextDouble(1024) * 3.6
corner_tl_z = self.nextDouble(1024) * 3.6
self.init_chunk_seed(child_x_pos + x + 1 << 2, child_z_pos + z + 0 << 2)
corner_tr_x = self.nextDouble(1024) * 3.6 + 4.0
corner_tr_z = self.nextDouble(1024) * 3.6
self.init_chunk_seed(child_x_pos + x + 0 << 2, child_z_pos + z + 1 << 2)
corner_bl_x = self.nextDouble(1024) * 3.6
corner_bl_z = self.nextDouble(1024) * 3.6 + 4.0
self.init_chunk_seed(child_x_pos + x + 1 << 2, child_z_pos + z + 1 << 2)
corner_br_x = self.nextDouble(1024) * 3.6 + 4.0
corner_br_z = self.nextDouble(1024) * 3.6 + 4.0
top_next = child_values[x + 1][z + 0]
bot_next = child_values[x + 1][z + 1]
x2 = x << 2
for cell_z in range(4):
for cell_x in range(4):
dist_tl = (cell_z-corner_tl_z) * (cell_z-corner_tl_z) + (cell_x-corner_tl_x) * (cell_x-corner_tl_x)
dist_tr = (cell_z-corner_tr_z) * (cell_z-corner_tr_z) + (cell_x-corner_tr_x) * (cell_x-corner_tr_x)
dist_bl = (cell_z-corner_bl_z) * (cell_z-corner_bl_z) + (cell_x-corner_bl_x) * (cell_x-corner_bl_x)
dist_br = (cell_z-corner_br_z) * (cell_z-corner_br_z) + (cell_x-corner_br_x) * (cell_x-corner_br_x)
if all(dist_tl < dist for dist in (dist_tr, dist_bl, dist_br)):
zoom_values[x2 + cell_x][z2 + cell_z] = top_accl
elif all(dist_tr < dist for dist in (dist_tl, dist_bl, dist_br)):
# Use the TR corner if it's closest.
zoom_values[x2 + cell_x][z2 + cell_z] = top_next
elif all(dist_bl < dist for dist in (dist_tl, dist_tr, dist_br)):
zoom_values[x2 + cell_x][z2 + cell_z] = bot_accl
else:
# Use the BR corner if all others fail.
zoom_values[x2 + cell_x][z2 + cell_z] = bot_next
top_accl = top_next
bot_accl = bot_next
# Extract the inner square, a subset of the zoomed values.
x_offset = x_pos & 3
z_offset = z_pos & 3
for z in range(z_depth):
for x in range(x_width):
biome_values[x][z] = zoom_values[x + x_offset][z + z_offset]
if self._debug:
self._output_debug_data('VoronoiZoom', x_pos, z_pos, x_width, z_depth, biome_values)
return biome_values
def nextDouble(self, precision):
return self.nextInt(precision) / precision - 0.5
| true | true |
f73ef93efcbdb5fa4801e735cf6f19b216c824a0 | 9,922 | py | Python | robel/dclaw/turn.py | Del9fina/robel | 63dfac65932757134e5766f1e20a339efe281bc7 | [
"Apache-2.0"
] | 109 | 2019-08-29T22:55:41.000Z | 2022-03-19T18:26:37.000Z | robel/dclaw/turn.py | Del9fina/robel | 63dfac65932757134e5766f1e20a339efe281bc7 | [
"Apache-2.0"
] | 12 | 2019-11-14T05:16:00.000Z | 2021-02-21T07:49:32.000Z | robel/dclaw/turn.py | Del9fina/robel | 63dfac65932757134e5766f1e20a339efe281bc7 | [
"Apache-2.0"
] | 40 | 2019-09-29T06:50:44.000Z | 2022-03-19T18:34:20.000Z | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Turn tasks with DClaw robots.
This is a single rotation of an object from an initial angle to a target angle.
"""
import abc
import collections
from typing import Dict, Optional, Sequence
import numpy as np
from transforms3d.euler import euler2quat
from robel.components.robot.dynamixel_robot import DynamixelRobotState
from robel.dclaw.base_env import BaseDClawObjectEnv
from robel.simulation.randomize import SimRandomizer
from robel.utils.configurable import configurable
from robel.utils.resources import get_asset_path
# The observation keys that are concatenated as the environment observation.
DEFAULT_OBSERVATION_KEYS = (
'claw_qpos',
'object_x',
'object_y',
'last_action',
'target_error',
)
# Reset pose for the claw joints.
RESET_POSE = [0, -np.pi / 3, np.pi / 3] * 3
DCLAW3_ASSET_PATH = 'robel/dclaw/assets/dclaw3xh_valve3_v0.xml'
class BaseDClawTurn(BaseDClawObjectEnv, metaclass=abc.ABCMeta):
"""Shared logic for DClaw turn tasks."""
def __init__(self,
asset_path: str = DCLAW3_ASSET_PATH,
observation_keys: Sequence[str] = DEFAULT_OBSERVATION_KEYS,
frame_skip: int = 40,
interactive: bool = False,
success_threshold: float = 0.1,
**kwargs):
"""Initializes the environment.
Args:
asset_path: The XML model file to load.
observation_keys: The keys in `get_obs_dict` to concatenate as the
observations returned by `step` and `reset`.
frame_skip: The number of simulation steps per environment step.
interactive: If True, allows the hardware guide motor to freely
rotate and its current angle is used as the goal.
success_threshold: The difference threshold (in radians) of the
object position and the goal position within which we consider
as a sucesss.
"""
super().__init__(
sim_model=get_asset_path(asset_path),
observation_keys=observation_keys,
frame_skip=frame_skip,
**kwargs)
self._interactive = interactive
self._success_threshold = success_threshold
self._desired_claw_pos = RESET_POSE
self._target_bid = self.model.body_name2id('target')
# The following are modified (possibly every reset) by subclasses.
self._initial_object_pos = 0
self._initial_object_vel = 0
self._set_target_object_pos(0)
def _reset(self):
"""Resets the environment."""
self._reset_dclaw_and_object(
claw_pos=RESET_POSE,
object_pos=self._initial_object_pos,
object_vel=self._initial_object_vel,
guide_pos=self._target_object_pos)
# Disengage the motor.
if self._interactive and self.robot.is_hardware:
self.robot.set_motors_engaged('guide', False)
def _step(self, action: np.ndarray):
"""Applies an action to the robot."""
self.robot.step({
'dclaw': action,
'guide': np.atleast_1d(self._target_object_pos),
})
def get_obs_dict(self) -> Dict[str, np.ndarray]:
"""Returns the current observation of the environment.
Returns:
A dictionary of observation values. This should be an ordered
dictionary if `observation_keys` isn't set.
"""
claw_state, object_state, guide_state = self.robot.get_state(
['dclaw', 'object', 'guide'])
# If in interactive mode, use the guide motor position as the goal.
if self._interactive:
self._set_target_object_pos(guide_state.qpos)
# Calculate the signed angle difference to the target in [-pi, pi].
target_error = self._target_object_pos - object_state.qpos
target_error = np.mod(target_error + np.pi, 2 * np.pi) - np.pi
obs_dict = collections.OrderedDict((
('claw_qpos', claw_state.qpos),
('claw_qvel', claw_state.qvel),
('object_x', np.cos(object_state.qpos)),
('object_y', np.sin(object_state.qpos)),
('object_qvel', object_state.qvel),
('last_action', self._get_last_action()),
('target_error', target_error),
))
# Add hardware-specific state if present.
if isinstance(claw_state, DynamixelRobotState):
obs_dict['claw_current'] = claw_state.current
return obs_dict
def get_reward_dict(
self,
action: np.ndarray,
obs_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns the reward for the given action and observation."""
target_dist = np.abs(obs_dict['target_error'])
claw_vel = obs_dict['claw_qvel']
reward_dict = collections.OrderedDict((
# Penalty for distance away from goal.
('target_dist_cost', -5 * target_dist),
# Penalty for difference with nomimal pose.
('pose_diff_cost',
-1 * np.linalg.norm(obs_dict['claw_qpos'] - self._desired_claw_pos)
),
# Penality for high velocities.
('joint_vel_cost',
-1 * np.linalg.norm(claw_vel[np.abs(claw_vel) >= 0.5])),
# Reward for close proximity with goal.
('bonus_small', 10 * (target_dist < 0.25)),
('bonus_big', 50 * (target_dist < 0.10)),
))
return reward_dict
def get_score_dict(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns a standardized measure of success for the environment."""
target_dist = np.abs(obs_dict['target_error'])
score_dict = collections.OrderedDict((
('points', 1.0 - target_dist / np.pi),
('success', target_dist < self._success_threshold),
))
score_dict.update(
self._get_safety_scores(
pos=obs_dict['claw_qpos'],
vel=obs_dict['claw_qvel'],
current=obs_dict.get('claw_current'),
))
return score_dict
def _set_target_object_pos(self, target_pos: float,
unbounded: bool = False):
"""Sets the goal angle to the given position."""
# Modulo to [-pi, pi].
if not unbounded:
target_pos = np.mod(target_pos + np.pi, 2 * np.pi) - np.pi
self._target_object_pos = np.asarray(target_pos, dtype=np.float32)
# Mark the target position in sim.
# WARNING: euler2quat will mutate a passed numpy array.
self.model.body_quat[self._target_bid] = euler2quat(
0, 0, float(target_pos))
@configurable(pickleable=True)
class DClawTurnFixed(BaseDClawTurn):
"""Turns the object with a fixed initial and fixed target position."""
def _reset(self):
# Turn from 0 degrees to 180 degrees.
self._initial_object_pos = 0
self._set_target_object_pos(np.pi)
super()._reset()
@configurable(pickleable=True)
class DClawTurnRandom(BaseDClawTurn):
"""Turns the object with a random initial and random target position."""
def _reset(self):
# Initial position is +/- 60 degrees.
self._initial_object_pos = self.np_random.uniform(
low=-np.pi / 3, high=np.pi / 3)
# Target position is 180 +/- 60 degrees.
self._set_target_object_pos(
np.pi + self.np_random.uniform(low=-np.pi / 3, high=np.pi / 3))
super()._reset()
@configurable(pickleable=True)
class DClawTurnRandomDynamics(DClawTurnRandom):
"""Turns the object with a random initial and random target position.
The dynamics of the simulation are randomized each episode.
"""
def __init__(self,
*args,
sim_observation_noise: Optional[float] = 0.05,
**kwargs):
super().__init__(
*args, sim_observation_noise=sim_observation_noise, **kwargs)
self._randomizer = SimRandomizer(self)
self._dof_indices = (
self.robot.get_config('dclaw').qvel_indices.tolist() +
self.robot.get_config('object').qvel_indices.tolist())
def _reset(self):
# Randomize joint dynamics.
self._randomizer.randomize_dofs(
self._dof_indices,
damping_range=(0.005, 0.1),
friction_loss_range=(0.001, 0.005),
)
self._randomizer.randomize_actuators(
all_same=True,
kp_range=(1, 3),
)
# Randomize friction on all geoms in the scene.
self._randomizer.randomize_geoms(
all_same=True,
friction_slide_range=(0.8, 1.2),
friction_spin_range=(0.003, 0.007),
friction_roll_range=(0.00005, 0.00015),
)
self._randomizer.randomize_bodies(
['mount'],
position_perturb_range=(-0.01, 0.01),
)
self._randomizer.randomize_geoms(
['mount'],
color_range=(0.2, 0.9),
)
self._randomizer.randomize_geoms(
parent_body_names=['valve'],
color_range=(0.2, 0.9),
)
super()._reset()
| 36.344322 | 80 | 0.621447 |
import abc
import collections
from typing import Dict, Optional, Sequence
import numpy as np
from transforms3d.euler import euler2quat
from robel.components.robot.dynamixel_robot import DynamixelRobotState
from robel.dclaw.base_env import BaseDClawObjectEnv
from robel.simulation.randomize import SimRandomizer
from robel.utils.configurable import configurable
from robel.utils.resources import get_asset_path
DEFAULT_OBSERVATION_KEYS = (
'claw_qpos',
'object_x',
'object_y',
'last_action',
'target_error',
)
RESET_POSE = [0, -np.pi / 3, np.pi / 3] * 3
DCLAW3_ASSET_PATH = 'robel/dclaw/assets/dclaw3xh_valve3_v0.xml'
class BaseDClawTurn(BaseDClawObjectEnv, metaclass=abc.ABCMeta):
def __init__(self,
asset_path: str = DCLAW3_ASSET_PATH,
observation_keys: Sequence[str] = DEFAULT_OBSERVATION_KEYS,
frame_skip: int = 40,
interactive: bool = False,
success_threshold: float = 0.1,
**kwargs):
super().__init__(
sim_model=get_asset_path(asset_path),
observation_keys=observation_keys,
frame_skip=frame_skip,
**kwargs)
self._interactive = interactive
self._success_threshold = success_threshold
self._desired_claw_pos = RESET_POSE
self._target_bid = self.model.body_name2id('target')
self._initial_object_pos = 0
self._initial_object_vel = 0
self._set_target_object_pos(0)
def _reset(self):
self._reset_dclaw_and_object(
claw_pos=RESET_POSE,
object_pos=self._initial_object_pos,
object_vel=self._initial_object_vel,
guide_pos=self._target_object_pos)
if self._interactive and self.robot.is_hardware:
self.robot.set_motors_engaged('guide', False)
def _step(self, action: np.ndarray):
self.robot.step({
'dclaw': action,
'guide': np.atleast_1d(self._target_object_pos),
})
def get_obs_dict(self) -> Dict[str, np.ndarray]:
claw_state, object_state, guide_state = self.robot.get_state(
['dclaw', 'object', 'guide'])
if self._interactive:
self._set_target_object_pos(guide_state.qpos)
target_error = self._target_object_pos - object_state.qpos
target_error = np.mod(target_error + np.pi, 2 * np.pi) - np.pi
obs_dict = collections.OrderedDict((
('claw_qpos', claw_state.qpos),
('claw_qvel', claw_state.qvel),
('object_x', np.cos(object_state.qpos)),
('object_y', np.sin(object_state.qpos)),
('object_qvel', object_state.qvel),
('last_action', self._get_last_action()),
('target_error', target_error),
))
if isinstance(claw_state, DynamixelRobotState):
obs_dict['claw_current'] = claw_state.current
return obs_dict
def get_reward_dict(
self,
action: np.ndarray,
obs_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
target_dist = np.abs(obs_dict['target_error'])
claw_vel = obs_dict['claw_qvel']
reward_dict = collections.OrderedDict((
('target_dist_cost', -5 * target_dist),
('pose_diff_cost',
-1 * np.linalg.norm(obs_dict['claw_qpos'] - self._desired_claw_pos)
),
('joint_vel_cost',
-1 * np.linalg.norm(claw_vel[np.abs(claw_vel) >= 0.5])),
('bonus_small', 10 * (target_dist < 0.25)),
('bonus_big', 50 * (target_dist < 0.10)),
))
return reward_dict
def get_score_dict(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
target_dist = np.abs(obs_dict['target_error'])
score_dict = collections.OrderedDict((
('points', 1.0 - target_dist / np.pi),
('success', target_dist < self._success_threshold),
))
score_dict.update(
self._get_safety_scores(
pos=obs_dict['claw_qpos'],
vel=obs_dict['claw_qvel'],
current=obs_dict.get('claw_current'),
))
return score_dict
def _set_target_object_pos(self, target_pos: float,
unbounded: bool = False):
if not unbounded:
target_pos = np.mod(target_pos + np.pi, 2 * np.pi) - np.pi
self._target_object_pos = np.asarray(target_pos, dtype=np.float32)
self.model.body_quat[self._target_bid] = euler2quat(
0, 0, float(target_pos))
@configurable(pickleable=True)
class DClawTurnFixed(BaseDClawTurn):
def _reset(self):
self._initial_object_pos = 0
self._set_target_object_pos(np.pi)
super()._reset()
@configurable(pickleable=True)
class DClawTurnRandom(BaseDClawTurn):
def _reset(self):
self._initial_object_pos = self.np_random.uniform(
low=-np.pi / 3, high=np.pi / 3)
self._set_target_object_pos(
np.pi + self.np_random.uniform(low=-np.pi / 3, high=np.pi / 3))
super()._reset()
@configurable(pickleable=True)
class DClawTurnRandomDynamics(DClawTurnRandom):
def __init__(self,
*args,
sim_observation_noise: Optional[float] = 0.05,
**kwargs):
super().__init__(
*args, sim_observation_noise=sim_observation_noise, **kwargs)
self._randomizer = SimRandomizer(self)
self._dof_indices = (
self.robot.get_config('dclaw').qvel_indices.tolist() +
self.robot.get_config('object').qvel_indices.tolist())
def _reset(self):
self._randomizer.randomize_dofs(
self._dof_indices,
damping_range=(0.005, 0.1),
friction_loss_range=(0.001, 0.005),
)
self._randomizer.randomize_actuators(
all_same=True,
kp_range=(1, 3),
)
self._randomizer.randomize_geoms(
all_same=True,
friction_slide_range=(0.8, 1.2),
friction_spin_range=(0.003, 0.007),
friction_roll_range=(0.00005, 0.00015),
)
self._randomizer.randomize_bodies(
['mount'],
position_perturb_range=(-0.01, 0.01),
)
self._randomizer.randomize_geoms(
['mount'],
color_range=(0.2, 0.9),
)
self._randomizer.randomize_geoms(
parent_body_names=['valve'],
color_range=(0.2, 0.9),
)
super()._reset()
| true | true |
f73efa7be5b2e2b12fb7644ab7921f9729c9b268 | 19,270 | py | Python | ophyd/ophydobj.py | NSLS-II/ophyd | d5fc722eef4d3d83845b1d523004302ec3aadb78 | [
"BSD-3-Clause"
] | 16 | 2015-05-20T20:48:25.000Z | 2019-04-24T21:12:59.000Z | ophyd/ophydobj.py | NSLS-II/ophyd | d5fc722eef4d3d83845b1d523004302ec3aadb78 | [
"BSD-3-Clause"
] | 594 | 2015-01-05T21:55:21.000Z | 2019-05-10T02:05:24.000Z | ophyd/ophydobj.py | NSLS-II/ophyd | d5fc722eef4d3d83845b1d523004302ec3aadb78 | [
"BSD-3-Clause"
] | 34 | 2015-01-23T19:50:58.000Z | 2019-05-07T05:38:57.000Z | import functools
import time
import weakref
from enum import IntFlag
from itertools import count
from logging import LoggerAdapter, getLogger
from typing import ClassVar, FrozenSet
from .log import control_layer_logger
def select_version(cls, version):
"""Select closest compatible version to requested version
Compatible is defined as ``class_version <= requested_version``
as defined by the types used to denote the versions.
Parameters
----------
cls : type
The base class to find a version of
version : any
Must be the same type as used to define the class versions.
"""
all_versions = cls._class_info_['versions']
matched_version = max(ver for ver in all_versions if ver <= version)
return all_versions[matched_version]
try:
from enum import KEEP
class IFBase(IntFlag, boundary=KEEP):
...
except ImportError:
IFBase = IntFlag
class Kind(IFBase):
"""
This is used in the .kind attribute of all OphydObj (Signals, Devices).
A Device examines its components' .kind atttribute to decide whether to
traverse it in read(), read_configuration(), or neither. Additionally, if
decides whether to include its name in `hints['fields']`.
"""
omitted = 0b000
normal = 0b001
config = 0b010
hinted = 0b101 # Notice that bool(hinted & normal) is True.
class UnknownSubscription(KeyError):
"Subclass of KeyError. Raised for unknown event type"
...
def register_instances_keyed_on_name(fail_if_late=False):
"""Register OphydObj instances in a WeakValueDictionary keyed on name.
Be advised that ophyd does not require 'name' to be unique and is
configurable by the user at run-time so this should
not be relied on unless name uniqueness is enforced by other means.
Parameters
----------
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
Returns
-------
WeakValueDictionary
"""
weak_dict = weakref.WeakValueDictionary()
def register(instance):
weak_dict[instance.name] = instance
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_dict
def register_instances_in_weakset(fail_if_late=False):
"""Register OphydObj instances in a WeakSet.
Be advised that OphydObj may not always be hashable.
Parameters
----------
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
Returns
-------
WeakSet
"""
weak_set = weakref.WeakSet()
def register(instance):
weak_set.add(instance)
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_set
class OphydObject:
'''The base class for all objects in Ophyd
Handles:
* Subscription/callback mechanism
Parameters
----------
name : str, optional
The name of the object.
attr_name : str, optional
The attr name on it's parent (if it has one)
ex ``getattr(self.parent, self.attr_name) is self``
parent : parent, optional
The object's parent, if it exists in a hierarchy
kind : a member of the :class:`~ophydobj.Kind` :class:`~enum.IntEnum`
(or equivalent integer), optional
Default is ``Kind.normal``. See :class:`~ophydobj.Kind` for options.
Attributes
----------
name
'''
# Any callables appended to this mutable class variable will be notified
# one time when a new instance of OphydObj is instantiated. See
# OphydObject.add_instantiation_callback().
__instantiation_callbacks = []
_default_sub = None
# This is set to True when the first OphydObj is instantiated. This may be
# of interest to code that adds something to instantiation_callbacks, which
# may want to know whether it has already "missed" any instances.
__any_instantiated = False
subscriptions: ClassVar[FrozenSet[str]] = frozenset()
def __init__(self, *, name=None, attr_name='', parent=None, labels=None,
kind=None):
if labels is None:
labels = set()
self._ophyd_labels_ = set(labels)
if kind is None:
kind = Kind.normal
self.kind = kind
super().__init__()
# base name and ref to parent, these go with properties
if name is None:
name = ''
self._attr_name = attr_name
if not isinstance(name, str):
raise ValueError("name must be a string.")
self._name = name
self._parent = parent
# dictionary of wrapped callbacks
self._callbacks = {k: {} for k in self.subscriptions}
# this is to maintain api on clear_sub
self._unwrapped_callbacks = {k: {} for k in self.subscriptions}
# map cid -> back to which event it is in
self._cid_to_event_mapping = dict()
# cache of last inputs to _run_subs, the semi-private way
# to trigger the callbacks for a given subscription to be run
self._args_cache = {k: None for k in self.subscriptions}
# count of subscriptions we have handed out, used to give unique ids
self._cb_count = count()
self.log = LoggerAdapter(getLogger('ophyd.objects'), {'ophyd_object_name': name})
self.control_layer_log = LoggerAdapter(control_layer_logger, {'ophyd_object_name': name})
if not self.__any_instantiated:
self.log.debug("first instance of OphydObject: id=%s", id(self))
OphydObject._mark_as_instantiated()
self.__register_instance(self)
@classmethod
def _mark_as_instantiated(cls):
cls.__any_instantiated = True
@classmethod
def add_instantiation_callback(cls, callback, fail_if_late=False):
"""
Register a callback which will receive each OphydObject instance.
Parameters
----------
callback : callable
Expected signature: ``f(ophydobj_instance)``
fail_if_late : boolean
If True, verify that OphydObj has not yet been instantiated and raise
``RuntimeError`` if it has, as a way of verify that no instances will
be "missed" by this registry. False by default.
"""
if fail_if_late and OphydObject.__any_instantiated:
raise RuntimeError(
"OphydObject has already been instantiated at least once, and "
"this callback will not be notified of those instances that "
"have already been created. If that is acceptable for this "
"application, set fail_if_false=False.")
# This is a class variable.
cls.__instantiation_callbacks.append(callback)
@classmethod
def __register_instance(cls, instance):
"""
Notify the callbacks in OphydObject.instantiation_callbacks of an instance.
"""
for callback in cls.__instantiation_callbacks:
callback(instance)
def __init_subclass__(cls, version=None, version_of=None,
version_type=None, **kwargs):
'This is called automatically in Python for all subclasses of OphydObject'
super().__init_subclass__(**kwargs)
cls.subscriptions = frozenset(
{
getattr(cls, key)
for key in dir(cls)
if key.startswith('SUB') or key.startswith('_SUB')
}
)
if version is None:
if version_of is not None:
raise RuntimeError('Must specify a version if `version_of` '
'is specified')
if version_type is None:
return
# Allow specification of version_type without specifying a version,
# for use in a base class
cls._class_info_ = dict(
versions={},
version=None,
version_type=version_type,
version_of=version_of
)
return
if version_of is None:
versions = {}
version_of = cls
else:
versions = version_of._class_info_['versions']
if version_type is None:
version_type = version_of._class_info_['version_type']
elif version_type != version_of._class_info_['version_type']:
raise RuntimeError(
"version_type with in a family must be consistent, "
f"you passed in {version_type}, to {cls.__name__} "
f"but {version_of.__name__} has version_type "
f"{version_of._class_info_['version_type']}")
if not issubclass(cls, version_of):
raise RuntimeError(
f'Versions are only valid for classes in the same '
f'hierarchy. {cls.__name__} is not a subclass of '
f'{version_of.__name__}.'
)
if versions is not None and version in versions:
getLogger('ophyd.object').warning(
'Redefining %r version %s: old=%r new=%r',
version_of, version, versions[version], cls
)
versions[version] = cls
cls._class_info_ = dict(
versions=versions,
version=version,
version_type=version_type,
version_of=version_of
)
def _validate_kind(self, val):
if isinstance(val, str):
return Kind[val.lower()]
return Kind(val)
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, val):
self._kind = self._validate_kind(val)
@property
def dotted_name(self) -> str:
"""Return the dotted name
"""
names = []
obj = self
while obj.parent is not None:
names.append(obj.attr_name)
obj = obj.parent
return '.'.join(names[::-1])
@property
def name(self):
'''name of the device'''
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attr_name(self):
return self._attr_name
@property
def connected(self):
'''If the device is connected.
Subclasses should override this'''
return True
def destroy(self):
'''Disconnect the object from the underlying control layer'''
self.unsubscribe_all()
@property
def parent(self):
'''The parent of the ophyd object.
If at the top of its hierarchy, `parent` will be None
'''
return self._parent
@property
def root(self):
"Walk parents to find ultimate ancestor (parent's parent...)."
root = self
while True:
if root.parent is None:
return root
root = root.parent
@property
def report(self):
'''A report on the object.'''
return {}
@property
def event_types(self):
'''Events that can be subscribed to via `obj.subscribe`
'''
return tuple(self.subscriptions)
def _run_subs(self, *args, sub_type, **kwargs):
'''Run a set of subscription callbacks
Only the kwarg ``sub_type`` is required, indicating
the type of callback to perform. All other positional arguments
and kwargs are passed directly to the callback function.
The host object will be injected into kwargs as 'obj' unless that key
already exists.
If the `timestamp` is None, then it will be replaced by the current
time.
No exceptions are raised if the callback functions fail.
'''
if sub_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(sub_type, self.subscriptions))
kwargs['sub_type'] = sub_type
# Guarantee that the object will be in the kwargs
kwargs.setdefault('obj', self)
# And if a timestamp key exists, but isn't filled -- supply it with
# a new timestamp
if 'timestamp' in kwargs and kwargs['timestamp'] is None:
kwargs['timestamp'] = time.time()
# Shallow-copy the callback arguments for replaying the
# callback at a later time (e.g., when a new subscription is made)
self._args_cache[sub_type] = (tuple(args), dict(kwargs))
for cb in list(self._callbacks[sub_type].values()):
cb(*args, **kwargs)
def subscribe(self, callback, event_type=None, run=True):
'''Subscribe to events this event_type generates.
The callback will be called as ``cb(*args, **kwargs)`` with
the values passed to `_run_subs` with the following additional keys:
sub_type : the string value of the event_type
obj : the host object, added if 'obj' not already in kwargs
if the key 'timestamp' is in kwargs _and_ is None, then it will
be replaced with the current time before running the callback.
The ``*args``, ``**kwargs`` passed to _run_subs will be cached as
shallow copies, be aware of passing in mutable data.
.. warning::
If the callback raises any exceptions when run they will be
silently ignored.
Parameters
----------
callback : callable
A callable function (that takes kwargs) to be run when the event is
generated. The expected signature is ::
def cb(*args, obj: OphydObject, sub_type: str, **kwargs) -> None:
The exact args/kwargs passed are whatever are passed to
``_run_subs``
event_type : str, optional
The name of the event to subscribe to (if None, defaults to
the default sub for the instance - obj._default_sub)
This maps to the ``sub_type`` kwargs in `_run_subs`
run : bool, optional
Run the callback now
See Also
--------
clear_sub, _run_subs
Returns
-------
cid : int
id of callback, can be passed to `unsubscribe` to remove the
callback
'''
if not callable(callback):
raise ValueError("callback must be callable")
# do default event type
if event_type is None:
# warnings.warn("Please specify which call back you wish to "
# "attach to defaulting to {}"
# .format(self._default_sub), stacklevel=2)
event_type = self._default_sub
if event_type is None:
raise ValueError('Subscription type not set and object {} of class'
' {} has no default subscription set'
''.format(self.name, self.__class__.__name__))
# check that this is a valid event type
if event_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(event_type, self.subscriptions))
# wrapper for callback to snarf exceptions
def wrap_cb(cb):
@functools.wraps(cb)
def inner(*args, **kwargs):
try:
cb(*args, **kwargs)
except Exception:
sub_type = kwargs['sub_type']
self.log.exception(
'Subscription %s callback exception (%s)',
sub_type, self)
return inner
# get next cid
cid = next(self._cb_count)
wrapped = wrap_cb(callback)
self._unwrapped_callbacks[event_type][cid] = callback
self._callbacks[event_type][cid] = wrapped
self._cid_to_event_mapping[cid] = event_type
if run:
cached = self._args_cache[event_type]
if cached is not None:
args, kwargs = cached
wrapped(*args, **kwargs)
return cid
def _reset_sub(self, event_type):
'''Remove all subscriptions in an event type'''
self._callbacks[event_type].clear()
self._unwrapped_callbacks[event_type].clear()
def clear_sub(self, cb, event_type=None):
'''Remove a subscription, given the original callback function
See also :meth:`subscribe`, :meth:`unsubscribe`
Parameters
----------
cb : callable
The callback
event_type : str, optional
The event to unsubscribe from (if None, removes it from all event
types)
'''
if event_type is None:
event_types = self.event_types
else:
event_types = [event_type]
cid_list = []
for et in event_types:
for cid, target in self._unwrapped_callbacks[et].items():
if cb == target:
cid_list.append(cid)
for cid in cid_list:
self.unsubscribe(cid)
def unsubscribe(self, cid):
"""Remove a subscription
See also :meth:`subscribe`, :meth:`clear_sub`
Parameters
----------
cid : int
token return by :meth:`subscribe`
"""
ev_type = self._cid_to_event_mapping.pop(cid, None)
if ev_type is None:
return
del self._unwrapped_callbacks[ev_type][cid]
del self._callbacks[ev_type][cid]
def unsubscribe_all(self):
for ev_type in self._callbacks:
self._reset_sub(ev_type)
def check_value(self, value, **kwargs):
'''Check if the value is valid for this object
This function does no normalization, but may raise if the
value is invalid.
Raises
------
ValueError
'''
pass
def __repr__(self):
info = self._repr_info()
info = ', '.join('{}={!r}'.format(key, value) for key, value in info)
return '{}({})'.format(self.__class__.__name__, info)
def _repr_info(self):
'Yields pairs of (key, value) to generate the object repr'
if self.name is not None:
yield ('name', self.name)
if self._parent is not None:
yield ('parent', self.parent.name)
def __copy__(self):
'''Copy the ophyd object
Shallow copying ophyd objects uses the repr information from the
_repr_info method to create a new object.
'''
kwargs = dict(self._repr_info())
return self.__class__(**kwargs)
def __getnewargs_ex__(self):
'''Used by pickle to serialize an ophyd object
Returns
-------
(args, kwargs)
Arguments to be passed to __init__, necessary to recreate this
object
'''
kwargs = dict(self._repr_info())
return ((), kwargs)
| 32.441077 | 97 | 0.59891 | import functools
import time
import weakref
from enum import IntFlag
from itertools import count
from logging import LoggerAdapter, getLogger
from typing import ClassVar, FrozenSet
from .log import control_layer_logger
def select_version(cls, version):
all_versions = cls._class_info_['versions']
matched_version = max(ver for ver in all_versions if ver <= version)
return all_versions[matched_version]
try:
from enum import KEEP
class IFBase(IntFlag, boundary=KEEP):
...
except ImportError:
IFBase = IntFlag
class Kind(IFBase):
omitted = 0b000
normal = 0b001
config = 0b010
hinted = 0b101
class UnknownSubscription(KeyError):
...
def register_instances_keyed_on_name(fail_if_late=False):
weak_dict = weakref.WeakValueDictionary()
def register(instance):
weak_dict[instance.name] = instance
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_dict
def register_instances_in_weakset(fail_if_late=False):
weak_set = weakref.WeakSet()
def register(instance):
weak_set.add(instance)
OphydObject.add_instantiation_callback(register, fail_if_late)
return weak_set
class OphydObject:
__instantiation_callbacks = []
_default_sub = None
__any_instantiated = False
subscriptions: ClassVar[FrozenSet[str]] = frozenset()
def __init__(self, *, name=None, attr_name='', parent=None, labels=None,
kind=None):
if labels is None:
labels = set()
self._ophyd_labels_ = set(labels)
if kind is None:
kind = Kind.normal
self.kind = kind
super().__init__()
if name is None:
name = ''
self._attr_name = attr_name
if not isinstance(name, str):
raise ValueError("name must be a string.")
self._name = name
self._parent = parent
self._callbacks = {k: {} for k in self.subscriptions}
self._unwrapped_callbacks = {k: {} for k in self.subscriptions}
self._cid_to_event_mapping = dict()
self._args_cache = {k: None for k in self.subscriptions}
self._cb_count = count()
self.log = LoggerAdapter(getLogger('ophyd.objects'), {'ophyd_object_name': name})
self.control_layer_log = LoggerAdapter(control_layer_logger, {'ophyd_object_name': name})
if not self.__any_instantiated:
self.log.debug("first instance of OphydObject: id=%s", id(self))
OphydObject._mark_as_instantiated()
self.__register_instance(self)
@classmethod
def _mark_as_instantiated(cls):
cls.__any_instantiated = True
@classmethod
def add_instantiation_callback(cls, callback, fail_if_late=False):
if fail_if_late and OphydObject.__any_instantiated:
raise RuntimeError(
"OphydObject has already been instantiated at least once, and "
"this callback will not be notified of those instances that "
"have already been created. If that is acceptable for this "
"application, set fail_if_false=False.")
cls.__instantiation_callbacks.append(callback)
@classmethod
def __register_instance(cls, instance):
for callback in cls.__instantiation_callbacks:
callback(instance)
def __init_subclass__(cls, version=None, version_of=None,
version_type=None, **kwargs):
super().__init_subclass__(**kwargs)
cls.subscriptions = frozenset(
{
getattr(cls, key)
for key in dir(cls)
if key.startswith('SUB') or key.startswith('_SUB')
}
)
if version is None:
if version_of is not None:
raise RuntimeError('Must specify a version if `version_of` '
'is specified')
if version_type is None:
return
cls._class_info_ = dict(
versions={},
version=None,
version_type=version_type,
version_of=version_of
)
return
if version_of is None:
versions = {}
version_of = cls
else:
versions = version_of._class_info_['versions']
if version_type is None:
version_type = version_of._class_info_['version_type']
elif version_type != version_of._class_info_['version_type']:
raise RuntimeError(
"version_type with in a family must be consistent, "
f"you passed in {version_type}, to {cls.__name__} "
f"but {version_of.__name__} has version_type "
f"{version_of._class_info_['version_type']}")
if not issubclass(cls, version_of):
raise RuntimeError(
f'Versions are only valid for classes in the same '
f'hierarchy. {cls.__name__} is not a subclass of '
f'{version_of.__name__}.'
)
if versions is not None and version in versions:
getLogger('ophyd.object').warning(
'Redefining %r version %s: old=%r new=%r',
version_of, version, versions[version], cls
)
versions[version] = cls
cls._class_info_ = dict(
versions=versions,
version=version,
version_type=version_type,
version_of=version_of
)
def _validate_kind(self, val):
if isinstance(val, str):
return Kind[val.lower()]
return Kind(val)
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, val):
self._kind = self._validate_kind(val)
@property
def dotted_name(self) -> str:
names = []
obj = self
while obj.parent is not None:
names.append(obj.attr_name)
obj = obj.parent
return '.'.join(names[::-1])
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attr_name(self):
return self._attr_name
@property
def connected(self):
return True
def destroy(self):
self.unsubscribe_all()
@property
def parent(self):
return self._parent
@property
def root(self):
root = self
while True:
if root.parent is None:
return root
root = root.parent
@property
def report(self):
return {}
@property
def event_types(self):
return tuple(self.subscriptions)
def _run_subs(self, *args, sub_type, **kwargs):
if sub_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(sub_type, self.subscriptions))
kwargs['sub_type'] = sub_type
kwargs.setdefault('obj', self)
# a new timestamp
if 'timestamp' in kwargs and kwargs['timestamp'] is None:
kwargs['timestamp'] = time.time()
# Shallow-copy the callback arguments for replaying the
# callback at a later time (e.g., when a new subscription is made)
self._args_cache[sub_type] = (tuple(args), dict(kwargs))
for cb in list(self._callbacks[sub_type].values()):
cb(*args, **kwargs)
def subscribe(self, callback, event_type=None, run=True):
if not callable(callback):
raise ValueError("callback must be callable")
# do default event type
if event_type is None:
# warnings.warn("Please specify which call back you wish to "
# "attach to defaulting to {}"
# .format(self._default_sub), stacklevel=2)
event_type = self._default_sub
if event_type is None:
raise ValueError('Subscription type not set and object {} of class'
' {} has no default subscription set'
''.format(self.name, self.__class__.__name__))
# check that this is a valid event type
if event_type not in self.subscriptions:
raise UnknownSubscription(
"Unknown subscription {!r}, must be one of {!r}"
.format(event_type, self.subscriptions))
# wrapper for callback to snarf exceptions
def wrap_cb(cb):
@functools.wraps(cb)
def inner(*args, **kwargs):
try:
cb(*args, **kwargs)
except Exception:
sub_type = kwargs['sub_type']
self.log.exception(
'Subscription %s callback exception (%s)',
sub_type, self)
return inner
# get next cid
cid = next(self._cb_count)
wrapped = wrap_cb(callback)
self._unwrapped_callbacks[event_type][cid] = callback
self._callbacks[event_type][cid] = wrapped
self._cid_to_event_mapping[cid] = event_type
if run:
cached = self._args_cache[event_type]
if cached is not None:
args, kwargs = cached
wrapped(*args, **kwargs)
return cid
def _reset_sub(self, event_type):
self._callbacks[event_type].clear()
self._unwrapped_callbacks[event_type].clear()
def clear_sub(self, cb, event_type=None):
if event_type is None:
event_types = self.event_types
else:
event_types = [event_type]
cid_list = []
for et in event_types:
for cid, target in self._unwrapped_callbacks[et].items():
if cb == target:
cid_list.append(cid)
for cid in cid_list:
self.unsubscribe(cid)
def unsubscribe(self, cid):
ev_type = self._cid_to_event_mapping.pop(cid, None)
if ev_type is None:
return
del self._unwrapped_callbacks[ev_type][cid]
del self._callbacks[ev_type][cid]
def unsubscribe_all(self):
for ev_type in self._callbacks:
self._reset_sub(ev_type)
def check_value(self, value, **kwargs):
pass
def __repr__(self):
info = self._repr_info()
info = ', '.join('{}={!r}'.format(key, value) for key, value in info)
return '{}({})'.format(self.__class__.__name__, info)
def _repr_info(self):
if self.name is not None:
yield ('name', self.name)
if self._parent is not None:
yield ('parent', self.parent.name)
def __copy__(self):
kwargs = dict(self._repr_info())
return self.__class__(**kwargs)
def __getnewargs_ex__(self):
kwargs = dict(self._repr_info())
return ((), kwargs)
| true | true |
f73efa905368690c9968c1900733ad9c5b029954 | 4,885 | py | Python | scripts/create_headers.py | Nikita240/LaTeX2AI | f76e9f1709ad93774358bcc13e6bdc4e7fcc8f40 | [
"MIT"
] | null | null | null | scripts/create_headers.py | Nikita240/LaTeX2AI | f76e9f1709ad93774358bcc13e6bdc4e7fcc8f40 | [
"MIT"
] | null | null | null | scripts/create_headers.py | Nikita240/LaTeX2AI | f76e9f1709ad93774358bcc13e6bdc4e7fcc8f40 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2020 Ivo Steinbrecher
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""
Create the header containing version information of the plugin.
"""
# Import python modules.
import sys
import subprocess
import os
from check_license import (get_license_text, license_to_source,
get_repository_dir)
def get_git_sha():
"""
Return the git sha of the repository.
"""
# Get the git sha of the repository.
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE)
out, _err = process.communicate()
return out.decode('UTF-8').strip()
def create_cpp_headers():
"""
Create the headers for the c++ files.
"""
# Create the headers.
license_text = get_license_text()
license_c = license_to_source(license_text, 'c')
license_tex = license_to_source(license_text, 'tex')
version_lines = [
'\n',
'// Automatic generated header with version information.',
'#ifndef VERSION_H_',
'#define VERSION_H_',
'#define L2A_VERSION_GIT_SHA_HEAD_ "{}"'.format(get_git_sha()),
'#endif',
''
]
license_lines = [
'\n',
'// Automatic generated header with license information.',
'#ifndef LICENSE_H_',
'#define LICENSE_H_',
'namespace L2A',
'{',
' namespace LICENSE',
' {',
' static const char* tex_license_ ='
]
for line in license_tex.split('\n'):
license_lines.append(' "{}\\n"'.format(line.replace('"', '\\"')))
license_lines[-1] = license_lines[-1] + ';'
license_lines.extend([
' }',
'}',
'#endif',
''])
# If it does not exist, create the directory for the header.
dir_path = os.path.join(get_repository_dir(), 'l2a/src/auto_generated')
os.makedirs(dir_path, exist_ok=True)
# The script is caled form the base repository directory.
with open(os.path.join(dir_path, 'version.h'), 'w') as version_header:
version_header.write(license_c + '\n'.join(version_lines))
with open(os.path.join(dir_path, 'license.h'), 'w') as license_header:
license_header.write(license_c + '\n'.join(license_lines))
def create_cs_headers():
"""
Create the headers for the c# files.
"""
# Create the header.
license_text = get_license_text()
license_c = license_to_source(license_text, 'c')
version_lines = [
'\n',
'// Automatic generated header with version information.',
'// This header will be overwritten at each build!',
'namespace L2A',
'{',
' class Constants',
' {',
' public const string l2a_version_git_sha_head_ = "{}";'.format(
get_git_sha()),
' }',
'}',
''
]
# If it does not exist, create the directory for the header.
dir_path = os.path.join(get_repository_dir(),
'forms/src/auto_generated')
os.makedirs(dir_path, exist_ok=True)
# The script is caled form the base repository directory.
with open(os.path.join(dir_path, 'version.cs'), 'w') as version_header:
version_header.write(license_c + '\n'.join(version_lines))
if __name__ == '__main__':
"""Execution part of script"""
if len(sys.argv) != 2:
raise ValueError('Wrong number of system arguments.')
# Change working directory to script directory.
os.chdir(os.path.dirname(__file__))
if sys.argv[1] == 'cpp':
create_cpp_headers()
elif sys.argv[1] == 'cs':
create_cs_headers()
else:
raise ValueError('Got unexpected comandline argument.')
| 32.785235 | 80 | 0.624565 |
import sys
import subprocess
import os
from check_license import (get_license_text, license_to_source,
get_repository_dir)
def get_git_sha():
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE)
out, _err = process.communicate()
return out.decode('UTF-8').strip()
def create_cpp_headers():
license_text = get_license_text()
license_c = license_to_source(license_text, 'c')
license_tex = license_to_source(license_text, 'tex')
version_lines = [
'\n',
'// Automatic generated header with version information.',
'#ifndef VERSION_H_',
'#define VERSION_H_',
'#define L2A_VERSION_GIT_SHA_HEAD_ "{}"'.format(get_git_sha()),
'#endif',
''
]
license_lines = [
'\n',
'// Automatic generated header with license information.',
'#ifndef LICENSE_H_',
'#define LICENSE_H_',
'namespace L2A',
'{',
' namespace LICENSE',
' {',
' static const char* tex_license_ ='
]
for line in license_tex.split('\n'):
license_lines.append(' "{}\\n"'.format(line.replace('"', '\\"')))
license_lines[-1] = license_lines[-1] + ';'
license_lines.extend([
' }',
'}',
'#endif',
''])
dir_path = os.path.join(get_repository_dir(), 'l2a/src/auto_generated')
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, 'version.h'), 'w') as version_header:
version_header.write(license_c + '\n'.join(version_lines))
with open(os.path.join(dir_path, 'license.h'), 'w') as license_header:
license_header.write(license_c + '\n'.join(license_lines))
def create_cs_headers():
license_text = get_license_text()
license_c = license_to_source(license_text, 'c')
version_lines = [
'\n',
'// Automatic generated header with version information.',
'// This header will be overwritten at each build!',
'namespace L2A',
'{',
' class Constants',
' {',
' public const string l2a_version_git_sha_head_ = "{}";'.format(
get_git_sha()),
' }',
'}',
''
]
dir_path = os.path.join(get_repository_dir(),
'forms/src/auto_generated')
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, 'version.cs'), 'w') as version_header:
version_header.write(license_c + '\n'.join(version_lines))
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError('Wrong number of system arguments.')
os.chdir(os.path.dirname(__file__))
if sys.argv[1] == 'cpp':
create_cpp_headers()
elif sys.argv[1] == 'cs':
create_cs_headers()
else:
raise ValueError('Got unexpected comandline argument.')
| true | true |
f73efab3d92188e4f727f0e4a1f216c9e517ec78 | 16,438 | py | Python | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_params.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | 2 | 2020-07-22T18:53:05.000Z | 2021-09-11T05:52:33.000Z | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_params.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_params.py | 0cool321/azure-cli | fd8e6d46d5cee682aff51e262c06bc40c01636ba | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import argparse
import getpass
from argcomplete.completers import FilesCompleter
from azure.mgmt.compute.models import (VirtualHardDisk,
CachingTypes,
ContainerServiceOchestratorTypes,
UpgradeMode)
from azure.mgmt.storage.models import SkuName
from azure.cli.core.commands import register_cli_argument, CliArgumentType, register_extra_cli_argument
from azure.cli.core.commands.arm import is_valid_resource_id
from azure.cli.core.commands.template_create import register_folded_cli_argument
from azure.cli.core.commands.parameters import \
(location_type, get_location_completion_list, get_one_of_subscription_locations,
get_resource_name_completion_list, tags_type, enum_choice_list, ignore_type)
from azure.cli.command_modules.vm._actions import \
(VMImageFieldAction, VMSSHFieldAction, VMDNSNameAction, load_images_from_aliases_doc,
get_vm_sizes, PrivateIpAction, _resource_not_exists)
from azure.cli.command_modules.vm._validators import \
(validate_nsg_name, validate_vm_nics, validate_vm_nic, validate_vm_create_nics,
validate_default_os_disk, validate_default_vnet, validate_default_storage_account,
validate_storage_suffix)
def get_urn_aliases_completion_list(prefix, **kwargs):#pylint: disable=unused-argument
images = load_images_from_aliases_doc()
return [i['urnAlias'] for i in images]
def get_vm_size_completion_list(prefix, action, parsed_args, **kwargs):#pylint: disable=unused-argument
try:
location = parsed_args.location
except AttributeError:
location = get_one_of_subscription_locations()
result = get_vm_sizes(location)
return [r.name for r in result]
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CliArgumentType(options_list=('--name', '-n'), metavar='NAME')
multi_ids_type = CliArgumentType(nargs='+')
admin_username_type = CliArgumentType(options_list=('--admin-username',), default=getpass.getuser(), required=False)
existing_vm_name = CliArgumentType(overrides=name_arg_type, help='The name of the virtual machine', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
vmss_name_type = CliArgumentType(name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help='Scale set name.', id_part='name')
# ARGUMENT REGISTRATION
register_cli_argument('vm', 'vm_name', existing_vm_name)
register_cli_argument('vm', 'size', completer=get_vm_size_completion_list)
register_cli_argument('vm', 'tags', tags_type)
register_cli_argument('vm', 'name', arg_type=name_arg_type)
register_cli_argument('vm disk', 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',))
register_cli_argument('vm disk', 'disk_name', options_list=('--name', '-n'), help='The data disk name. If missing, will retrieve from vhd uri')
register_cli_argument('vm disk', 'disk_size', help='Size of disk (GiB)', default=1023, type=int)
register_cli_argument('vm disk', 'lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virutal Machine size.')
register_cli_argument('vm disk', 'vhd', type=VirtualHardDisk, help='virtual hard disk\'s uri. For example:https://mystorage.blob.core.windows.net/vhds/d1.vhd')
register_cli_argument('vm disk', 'caching', help='Host caching policy', default=CachingTypes.none.value, **enum_choice_list(CachingTypes))
for item in ['attach-existing', 'attach-new', 'detach']:
register_cli_argument('vm disk {}'.format(item), 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',), id_part=None)
register_cli_argument('vm availability-set', 'availability_set_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
register_cli_argument('vm access', 'username', options_list=('--username', '-u'), help='The user name')
register_cli_argument('vm access', 'password', options_list=('--password', '-p'), help='The user password')
register_cli_argument('acs', 'name', arg_type=name_arg_type)
register_cli_argument('acs', 'orchestrator_type', **enum_choice_list(ContainerServiceOchestratorTypes))
#some admin names are prohibited in acs, such as root, admin, etc. Because we have no control on the orchestrators, so default to a safe name.
register_cli_argument('acs', 'admin_username', options_list=('--admin-username',), default='azureuser', required=False)
register_cli_argument('acs', 'dns_name_prefix', options_list=('--dns-prefix', '-d'))
register_extra_cli_argument('acs create', 'generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing')
register_cli_argument('acs', 'container_service_name', options_list=('--name', '-n'), help='The name of the container service', completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
register_cli_argument('acs create', 'agent_vm_size', completer=get_vm_size_completion_list)
register_cli_argument('acs scale', 'new_agent_count', type=int, help='The number of agents for the cluster')
register_cli_argument('acs create', 'service_principal', help='Service principal for making calls into Azure APIs')
register_cli_argument('acs create', 'client_secret', help='Client secret to use with the service principal for making calls to Azure APIs')
register_cli_argument('vm capture', 'overwrite', action='store_true')
register_cli_argument('vm diagnostics', 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',))
register_cli_argument('vm diagnostics set', 'storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
register_cli_argument('vm extension', 'vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), id_part='child_name')
register_cli_argument('vm extension', 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',), id_part='name')
register_cli_argument('vm extension image', 'image_location', options_list=('--location', '-l'))
register_cli_argument('vm extension image', 'publisher_name', options_list=('--publisher',))
register_cli_argument('vm extension image', 'type', options_list=('--name', '-n'))
register_cli_argument('vm extension image', 'latest', action='store_true')
for dest in ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']:
register_cli_argument('vmss', dest, vmss_name_type)
register_cli_argument('vmss deallocate', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss delete-instances', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss restart', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss start', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss stop', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss update-instances', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss', 'instance_id', id_part='child_name')
register_cli_argument('vmss', 'instance_ids', multi_ids_type, help='Space separated list of IDs (ex: 1 2 3 ...) or * for all instances')
register_cli_argument('vmss', 'tags', tags_type)
register_cli_argument('vmss extension', 'extension_name', name_arg_type, help='Name of the extension.')
register_cli_argument('vmss extension', 'vmss_name', id_part=None)
register_cli_argument('vmss diagnostics', 'vmss_name', id_part=None, help='Scale set name')
register_cli_argument('vmss extension image', 'publisher_name', options_list=('--publisher',), help='Image publisher name')
register_cli_argument('vmss extension image', 'type', options_list=('--name', '-n'), help='Extension name')
register_cli_argument('vmss extension image', 'latest', action='store_true')
register_cli_argument('vmss extension image', 'image_name', help='Image name')
register_cli_argument('vmss extension image', 'orderby', help='The sort to apply on the operation')
register_cli_argument('vmss extension image', 'top', help='Return top number of records')
register_cli_argument('vmss extension image', 'version', help='Extension version')
for scope in ['vm diagnostics', 'vmss diagnostics']:
register_cli_argument(scope, 'version', help='version of the diagnostics extension. Will use the latest if not specfied')
register_cli_argument(scope, 'settings', help='json string or a file path, which defines data to be collected.')
register_cli_argument(scope, 'protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.')
for scope in ['vm', 'vmss']:
register_cli_argument(scope, 'no_auto_upgrade', action='store_true', help='by doing this, extension system will not pick the highest minor version for the specified version number, and will not auto update to the latest build/revision number on any scale set updates in future.')
register_cli_argument('vm image list', 'image_location', location_type)
register_cli_argument('vm open-port', 'vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
register_cli_argument('vm open-port', 'network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
register_cli_argument('vm open-port', 'apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
register_cli_argument('vm nic', 'vm_name', existing_vm_name, options_list=('--vm-name',), id_part=None)
register_cli_argument('vm nic', 'nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
register_cli_argument('vm nic show', 'nic', help='NIC name or ID.', validator=validate_vm_nic)
register_cli_argument('vmss nic', 'virtual_machine_scale_set_name', options_list=('--vmss-name',), help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
register_cli_argument('vmss nic', 'virtualmachine_index', options_list=('--instance-id',), id_part='child_name')
register_cli_argument('vmss nic', 'network_interface_name', options_list=('--name', '-n'), metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='grandchild_name')
register_cli_argument('network nic scale-set list', 'virtual_machine_scale_set_name', options_list=('--vmss-name',), completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
# VM CREATE PARAMETER CONFIGURATION
authentication_type = CliArgumentType(
default=None,
help='Password or SSH public key authentication. Defaults to password for Windows and SSH public key for Linux.',
**enum_choice_list(['ssh', 'password'])
)
nsg_rule_type = CliArgumentType(
default=None,
help='Network security group rule to create. Defaults open ports for allowing RDP on Windows and allowing SSH on Linux.',
**enum_choice_list(['RDP', 'SSH'])
)
register_cli_argument('vm create', 'network_interface_type', help=argparse.SUPPRESS)
register_cli_argument('vm create', 'network_interface_ids', options_list=('--nics',), nargs='+', help='Names or IDs of existing NICs to reference. The first NIC will be the primary NIC.', type=lambda val: val if (not '/' in val or is_valid_resource_id(val, ValueError)) else '', validator=validate_vm_create_nics)
register_cli_argument('vm create', 'name', name_arg_type, validator=_resource_not_exists('Microsoft.Compute/virtualMachines'))
register_cli_argument('vmss create', 'name', name_arg_type)
register_cli_argument('vmss create', 'nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
for scope in ['vm create', 'vmss create']:
register_cli_argument(scope, 'location', completer=get_location_completion_list, help='Location in which to create the VM and related resources. If not specified, defaults to the resource group\'s location.')
register_cli_argument(scope, 'custom_os_disk_uri', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_disk_type', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_disk_name', validator=validate_default_os_disk)
register_cli_argument(scope, 'overprovision', action='store_false', default=None, options_list=('--disable-overprovision',))
register_cli_argument(scope, 'upgrade_policy_mode', help=None, **enum_choice_list(UpgradeMode))
register_cli_argument(scope, 'os_disk_uri', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_offer', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_publisher', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_sku', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_type', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_version', help=argparse.SUPPRESS)
register_cli_argument(scope, 'dns_name_type', help=argparse.SUPPRESS)
register_cli_argument(scope, 'admin_username', admin_username_type)
register_cli_argument(scope, 'storage_type', help='The VM storage type.', **enum_choice_list(SkuName))
register_cli_argument(scope, 'storage_suffix', ignore_type, validator=validate_storage_suffix)
register_cli_argument(scope, 'subnet_name', help='The subnet name. Creates if creating a new VNet, references if referencing an existing VNet.')
register_cli_argument(scope, 'admin_password', help='Password for the Virtual Machine if Authentication Type is Password.')
register_cli_argument(scope, 'ssh_key_value', action=VMSSHFieldAction)
register_cli_argument(scope, 'ssh_dest_key_path', completer=FilesCompleter())
register_cli_argument(scope, 'dns_name_for_public_ip', action=VMDNSNameAction, options_list=('--public-ip-address-dns-name',), help='Globally unique DNS Name for the Public IP.')
register_cli_argument(scope, 'authentication_type', authentication_type)
register_folded_cli_argument(scope, 'availability_set', 'Microsoft.Compute/availabilitySets', new_flag_value=None, default_value_flag='none')
register_cli_argument(scope, 'private_ip_address_allocation', help=argparse.SUPPRESS)
register_cli_argument(scope, 'virtual_network_ip_address_prefix', options_list=('--vnet-ip-address-prefix',))
register_cli_argument(scope, 'subnet_ip_address_prefix', options_list=('--subnet-ip-address-prefix',))
register_cli_argument(scope, 'private_ip_address', help='Static private IP address (e.g. 10.0.0.5).', options_list=('--private-ip-address',), action=PrivateIpAction)
register_cli_argument(scope, 'public_ip_address_allocation', help='', default='dynamic', **enum_choice_list(['dynamic', 'static']))
register_folded_cli_argument(scope, 'public_ip_address', 'Microsoft.Network/publicIPAddresses')
register_folded_cli_argument(scope, 'storage_account', 'Microsoft.Storage/storageAccounts', validator=validate_default_storage_account, none_flag_value=None, default_value_flag='existingId')
register_folded_cli_argument(scope, 'virtual_network', 'Microsoft.Network/virtualNetworks', options_list=('--vnet',), validator=validate_default_vnet, none_flag_value=None, default_value_flag='existingId')
register_folded_cli_argument(scope, 'network_security_group', 'Microsoft.Network/networkSecurityGroups', options_list=('--nsg',))
register_folded_cli_argument(scope, 'load_balancer', 'Microsoft.Network/loadBalancers')
register_cli_argument(scope, 'network_security_group_rule', nsg_rule_type, options_list=('--nsg-rule',))
register_extra_cli_argument(scope, 'image', options_list=('--image',), action=VMImageFieldAction, completer=get_urn_aliases_completion_list, required=True)
| 80.185366 | 314 | 0.773817 |
import argparse
import getpass
from argcomplete.completers import FilesCompleter
from azure.mgmt.compute.models import (VirtualHardDisk,
CachingTypes,
ContainerServiceOchestratorTypes,
UpgradeMode)
from azure.mgmt.storage.models import SkuName
from azure.cli.core.commands import register_cli_argument, CliArgumentType, register_extra_cli_argument
from azure.cli.core.commands.arm import is_valid_resource_id
from azure.cli.core.commands.template_create import register_folded_cli_argument
from azure.cli.core.commands.parameters import \
(location_type, get_location_completion_list, get_one_of_subscription_locations,
get_resource_name_completion_list, tags_type, enum_choice_list, ignore_type)
from azure.cli.command_modules.vm._actions import \
(VMImageFieldAction, VMSSHFieldAction, VMDNSNameAction, load_images_from_aliases_doc,
get_vm_sizes, PrivateIpAction, _resource_not_exists)
from azure.cli.command_modules.vm._validators import \
(validate_nsg_name, validate_vm_nics, validate_vm_nic, validate_vm_create_nics,
validate_default_os_disk, validate_default_vnet, validate_default_storage_account,
validate_storage_suffix)
def get_urn_aliases_completion_list(prefix, **kwargs):
images = load_images_from_aliases_doc()
return [i['urnAlias'] for i in images]
def get_vm_size_completion_list(prefix, action, parsed_args, **kwargs):
try:
location = parsed_args.location
except AttributeError:
location = get_one_of_subscription_locations()
result = get_vm_sizes(location)
return [r.name for r in result]
name_arg_type = CliArgumentType(options_list=('--name', '-n'), metavar='NAME')
multi_ids_type = CliArgumentType(nargs='+')
admin_username_type = CliArgumentType(options_list=('--admin-username',), default=getpass.getuser(), required=False)
existing_vm_name = CliArgumentType(overrides=name_arg_type, help='The name of the virtual machine', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
vmss_name_type = CliArgumentType(name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help='Scale set name.', id_part='name')
register_cli_argument('vm', 'vm_name', existing_vm_name)
register_cli_argument('vm', 'size', completer=get_vm_size_completion_list)
register_cli_argument('vm', 'tags', tags_type)
register_cli_argument('vm', 'name', arg_type=name_arg_type)
register_cli_argument('vm disk', 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',))
register_cli_argument('vm disk', 'disk_name', options_list=('--name', '-n'), help='The data disk name. If missing, will retrieve from vhd uri')
register_cli_argument('vm disk', 'disk_size', help='Size of disk (GiB)', default=1023, type=int)
register_cli_argument('vm disk', 'lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virutal Machine size.')
register_cli_argument('vm disk', 'vhd', type=VirtualHardDisk, help='virtual hard disk\'s uri. For example:https://mystorage.blob.core.windows.net/vhds/d1.vhd')
register_cli_argument('vm disk', 'caching', help='Host caching policy', default=CachingTypes.none.value, **enum_choice_list(CachingTypes))
for item in ['attach-existing', 'attach-new', 'detach']:
register_cli_argument('vm disk {}'.format(item), 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',), id_part=None)
register_cli_argument('vm availability-set', 'availability_set_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
register_cli_argument('vm access', 'username', options_list=('--username', '-u'), help='The user name')
register_cli_argument('vm access', 'password', options_list=('--password', '-p'), help='The user password')
register_cli_argument('acs', 'name', arg_type=name_arg_type)
register_cli_argument('acs', 'orchestrator_type', **enum_choice_list(ContainerServiceOchestratorTypes))
#some admin names are prohibited in acs, such as root, admin, etc. Because we have no control on the orchestrators, so default to a safe name.
register_cli_argument('acs', 'admin_username', options_list=('--admin-username',), default='azureuser', required=False)
register_cli_argument('acs', 'dns_name_prefix', options_list=('--dns-prefix', '-d'))
register_extra_cli_argument('acs create', 'generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing')
register_cli_argument('acs', 'container_service_name', options_list=('--name', '-n'), help='The name of the container service', completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
register_cli_argument('acs create', 'agent_vm_size', completer=get_vm_size_completion_list)
register_cli_argument('acs scale', 'new_agent_count', type=int, help='The number of agents for the cluster')
register_cli_argument('acs create', 'service_principal', help='Service principal for making calls into Azure APIs')
register_cli_argument('acs create', 'client_secret', help='Client secret to use with the service principal for making calls to Azure APIs')
register_cli_argument('vm capture', 'overwrite', action='store_true')
register_cli_argument('vm diagnostics', 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',))
register_cli_argument('vm diagnostics set', 'storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
register_cli_argument('vm extension', 'vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), id_part='child_name')
register_cli_argument('vm extension', 'vm_name', arg_type=existing_vm_name, options_list=('--vm-name',), id_part='name')
register_cli_argument('vm extension image', 'image_location', options_list=('--location', '-l'))
register_cli_argument('vm extension image', 'publisher_name', options_list=('--publisher',))
register_cli_argument('vm extension image', 'type', options_list=('--name', '-n'))
register_cli_argument('vm extension image', 'latest', action='store_true')
for dest in ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']:
register_cli_argument('vmss', dest, vmss_name_type)
register_cli_argument('vmss deallocate', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss delete-instances', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss restart', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss start', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss stop', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss update-instances', dest, vmss_name_type, id_part=None) # due to instance-ids parameter
register_cli_argument('vmss', 'instance_id', id_part='child_name')
register_cli_argument('vmss', 'instance_ids', multi_ids_type, help='Space separated list of IDs (ex: 1 2 3 ...) or * for all instances')
register_cli_argument('vmss', 'tags', tags_type)
register_cli_argument('vmss extension', 'extension_name', name_arg_type, help='Name of the extension.')
register_cli_argument('vmss extension', 'vmss_name', id_part=None)
register_cli_argument('vmss diagnostics', 'vmss_name', id_part=None, help='Scale set name')
register_cli_argument('vmss extension image', 'publisher_name', options_list=('--publisher',), help='Image publisher name')
register_cli_argument('vmss extension image', 'type', options_list=('--name', '-n'), help='Extension name')
register_cli_argument('vmss extension image', 'latest', action='store_true')
register_cli_argument('vmss extension image', 'image_name', help='Image name')
register_cli_argument('vmss extension image', 'orderby', help='The sort to apply on the operation')
register_cli_argument('vmss extension image', 'top', help='Return top number of records')
register_cli_argument('vmss extension image', 'version', help='Extension version')
for scope in ['vm diagnostics', 'vmss diagnostics']:
register_cli_argument(scope, 'version', help='version of the diagnostics extension. Will use the latest if not specfied')
register_cli_argument(scope, 'settings', help='json string or a file path, which defines data to be collected.')
register_cli_argument(scope, 'protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.')
for scope in ['vm', 'vmss']:
register_cli_argument(scope, 'no_auto_upgrade', action='store_true', help='by doing this, extension system will not pick the highest minor version for the specified version number, and will not auto update to the latest build/revision number on any scale set updates in future.')
register_cli_argument('vm image list', 'image_location', location_type)
register_cli_argument('vm open-port', 'vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
register_cli_argument('vm open-port', 'network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
register_cli_argument('vm open-port', 'apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
register_cli_argument('vm nic', 'vm_name', existing_vm_name, options_list=('--vm-name',), id_part=None)
register_cli_argument('vm nic', 'nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
register_cli_argument('vm nic show', 'nic', help='NIC name or ID.', validator=validate_vm_nic)
register_cli_argument('vmss nic', 'virtual_machine_scale_set_name', options_list=('--vmss-name',), help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
register_cli_argument('vmss nic', 'virtualmachine_index', options_list=('--instance-id',), id_part='child_name')
register_cli_argument('vmss nic', 'network_interface_name', options_list=('--name', '-n'), metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='grandchild_name')
register_cli_argument('network nic scale-set list', 'virtual_machine_scale_set_name', options_list=('--vmss-name',), completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
# VM CREATE PARAMETER CONFIGURATION
authentication_type = CliArgumentType(
default=None,
help='Password or SSH public key authentication. Defaults to password for Windows and SSH public key for Linux.',
**enum_choice_list(['ssh', 'password'])
)
nsg_rule_type = CliArgumentType(
default=None,
help='Network security group rule to create. Defaults open ports for allowing RDP on Windows and allowing SSH on Linux.',
**enum_choice_list(['RDP', 'SSH'])
)
register_cli_argument('vm create', 'network_interface_type', help=argparse.SUPPRESS)
register_cli_argument('vm create', 'network_interface_ids', options_list=('--nics',), nargs='+', help='Names or IDs of existing NICs to reference. The first NIC will be the primary NIC.', type=lambda val: val if (not '/' in val or is_valid_resource_id(val, ValueError)) else '', validator=validate_vm_create_nics)
register_cli_argument('vm create', 'name', name_arg_type, validator=_resource_not_exists('Microsoft.Compute/virtualMachines'))
register_cli_argument('vmss create', 'name', name_arg_type)
register_cli_argument('vmss create', 'nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
for scope in ['vm create', 'vmss create']:
register_cli_argument(scope, 'location', completer=get_location_completion_list, help='Location in which to create the VM and related resources. If not specified, defaults to the resource group\'s location.')
register_cli_argument(scope, 'custom_os_disk_uri', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_disk_type', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_disk_name', validator=validate_default_os_disk)
register_cli_argument(scope, 'overprovision', action='store_false', default=None, options_list=('--disable-overprovision',))
register_cli_argument(scope, 'upgrade_policy_mode', help=None, **enum_choice_list(UpgradeMode))
register_cli_argument(scope, 'os_disk_uri', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_offer', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_publisher', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_sku', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_type', help=argparse.SUPPRESS)
register_cli_argument(scope, 'os_version', help=argparse.SUPPRESS)
register_cli_argument(scope, 'dns_name_type', help=argparse.SUPPRESS)
register_cli_argument(scope, 'admin_username', admin_username_type)
register_cli_argument(scope, 'storage_type', help='The VM storage type.', **enum_choice_list(SkuName))
register_cli_argument(scope, 'storage_suffix', ignore_type, validator=validate_storage_suffix)
register_cli_argument(scope, 'subnet_name', help='The subnet name. Creates if creating a new VNet, references if referencing an existing VNet.')
register_cli_argument(scope, 'admin_password', help='Password for the Virtual Machine if Authentication Type is Password.')
register_cli_argument(scope, 'ssh_key_value', action=VMSSHFieldAction)
register_cli_argument(scope, 'ssh_dest_key_path', completer=FilesCompleter())
register_cli_argument(scope, 'dns_name_for_public_ip', action=VMDNSNameAction, options_list=('--public-ip-address-dns-name',), help='Globally unique DNS Name for the Public IP.')
register_cli_argument(scope, 'authentication_type', authentication_type)
register_folded_cli_argument(scope, 'availability_set', 'Microsoft.Compute/availabilitySets', new_flag_value=None, default_value_flag='none')
register_cli_argument(scope, 'private_ip_address_allocation', help=argparse.SUPPRESS)
register_cli_argument(scope, 'virtual_network_ip_address_prefix', options_list=('--vnet-ip-address-prefix',))
register_cli_argument(scope, 'subnet_ip_address_prefix', options_list=('--subnet-ip-address-prefix',))
register_cli_argument(scope, 'private_ip_address', help='Static private IP address (e.g. 10.0.0.5).', options_list=('--private-ip-address',), action=PrivateIpAction)
register_cli_argument(scope, 'public_ip_address_allocation', help='', default='dynamic', **enum_choice_list(['dynamic', 'static']))
register_folded_cli_argument(scope, 'public_ip_address', 'Microsoft.Network/publicIPAddresses')
register_folded_cli_argument(scope, 'storage_account', 'Microsoft.Storage/storageAccounts', validator=validate_default_storage_account, none_flag_value=None, default_value_flag='existingId')
register_folded_cli_argument(scope, 'virtual_network', 'Microsoft.Network/virtualNetworks', options_list=('--vnet',), validator=validate_default_vnet, none_flag_value=None, default_value_flag='existingId')
register_folded_cli_argument(scope, 'network_security_group', 'Microsoft.Network/networkSecurityGroups', options_list=('--nsg',))
register_folded_cli_argument(scope, 'load_balancer', 'Microsoft.Network/loadBalancers')
register_cli_argument(scope, 'network_security_group_rule', nsg_rule_type, options_list=('--nsg-rule',))
register_extra_cli_argument(scope, 'image', options_list=('--image',), action=VMImageFieldAction, completer=get_urn_aliases_completion_list, required=True)
| true | true |
f73efad578328f021962b73bff802e4dbb8abc0d | 1,323 | py | Python | python/calculator/__init__.py | kmhsonnenkind/thrift-example | 8abfee8d390ee24c131428f38d7c32034f462945 | [
"MIT"
] | null | null | null | python/calculator/__init__.py | kmhsonnenkind/thrift-example | 8abfee8d390ee24c131428f38d7c32034f462945 | [
"MIT"
] | null | null | null | python/calculator/__init__.py | kmhsonnenkind/thrift-example | 8abfee8d390ee24c131428f38d7c32034f462945 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2021 Martin Kloesch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Example thrift service and client.
'''
__all__ = ['Client', 'Server', 'DivideByZeroException']
from .gen.calculator.ttypes import DivideByZeroException
from .client import Client
from .server import Server
| 40.090909 | 79 | 0.77551 |
__all__ = ['Client', 'Server', 'DivideByZeroException']
from .gen.calculator.ttypes import DivideByZeroException
from .client import Client
from .server import Server
| true | true |
f73efafeeba2c32476651fcf1c16adf137ba29d8 | 417 | py | Python | PythonRearrangement/setup.py | QuantumQuadrate/Rearrangement | 5f8d64bd18a471a488747ed8d17b00304b4ab293 | [
"MIT"
] | null | null | null | PythonRearrangement/setup.py | QuantumQuadrate/Rearrangement | 5f8d64bd18a471a488747ed8d17b00304b4ab293 | [
"MIT"
] | 1 | 2019-06-18T23:13:45.000Z | 2019-06-18T23:13:45.000Z | PythonRearrangement/setup.py | QuantumQuadrate/Rearrangement | 5f8d64bd18a471a488747ed8d17b00304b4ab293 | [
"MIT"
] | 2 | 2019-05-23T15:52:20.000Z | 2021-07-03T15:25:19.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 13:22:01 2018
@author: Cody
"""
from setuptools import setup
from setuptools import Extension
from Cython.Distutils import build_ext
import numpy as np
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("Rearranger", sources= ["pyRearranger.pyx","../CPPrearrangement/Rearrangement.cpp"],language='c++',include_dirs=[np.get_include()])])
| 24.529412 | 162 | 0.717026 |
from setuptools import setup
from setuptools import Extension
from Cython.Distutils import build_ext
import numpy as np
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("Rearranger", sources= ["pyRearranger.pyx","../CPPrearrangement/Rearrangement.cpp"],language='c++',include_dirs=[np.get_include()])])
| true | true |
f73efbddcc46b0af04ccaa3518cffb2c82d731c7 | 41 | py | Python | code/abc082_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc082_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc082_a_02.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | print(-~sum(map(int,input().split()))//2) | 41 | 41 | 0.609756 | print(-~sum(map(int,input().split()))//2) | true | true |
f73efd40fffd193c0a2234d000f69837b308ca6b | 301 | py | Python | src/OFS/tests/testEtagSupport.py | CMYanko/Zope | bbd2f4ce565740bfc9d9cae00c147f963ba49085 | [
"ZPL-2.1"
] | 289 | 2015-01-05T12:38:21.000Z | 2022-03-05T21:20:39.000Z | src/OFS/tests/testEtagSupport.py | CMYanko/Zope | bbd2f4ce565740bfc9d9cae00c147f963ba49085 | [
"ZPL-2.1"
] | 732 | 2015-02-09T23:35:57.000Z | 2022-03-31T09:10:13.000Z | src/OFS/tests/testEtagSupport.py | CMYanko/Zope | bbd2f4ce565740bfc9d9cae00c147f963ba49085 | [
"ZPL-2.1"
] | 102 | 2015-01-12T14:03:35.000Z | 2022-03-30T11:02:44.000Z | import unittest
class TestEtagSupport(unittest.TestCase):
def test_interfaces(self):
from OFS.EtagSupport import EtagBaseInterface
from OFS.EtagSupport import EtagSupport
from zope.interface.verify import verifyClass
verifyClass(EtagBaseInterface, EtagSupport)
| 25.083333 | 53 | 0.754153 | import unittest
class TestEtagSupport(unittest.TestCase):
def test_interfaces(self):
from OFS.EtagSupport import EtagBaseInterface
from OFS.EtagSupport import EtagSupport
from zope.interface.verify import verifyClass
verifyClass(EtagBaseInterface, EtagSupport)
| true | true |
f73efdeb30de1d72a08bebac223e212a621df029 | 96 | py | Python | py_function/fn_.py | rajkubp020/helloword | 4bd22691de24b30a0f5b73821c35a7ac0666b034 | [
"MIT"
] | null | null | null | py_function/fn_.py | rajkubp020/helloword | 4bd22691de24b30a0f5b73821c35a7ac0666b034 | [
"MIT"
] | null | null | null | py_function/fn_.py | rajkubp020/helloword | 4bd22691de24b30a0f5b73821c35a7ac0666b034 | [
"MIT"
] | null | null | null | import dat_NABS
import importlib
importlib.reload(dat_NABS)
dat_NABS.printme(str="al10.dat")
| 12 | 32 | 0.802083 | import dat_NABS
import importlib
importlib.reload(dat_NABS)
dat_NABS.printme(str="al10.dat")
| true | true |
f73efe1c11faebf1039b899ddb6e160016089932 | 210 | py | Python | westworld/objects/__init__.py | TheoLvs/westworld | 7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b | [
"MIT"
] | null | null | null | westworld/objects/__init__.py | TheoLvs/westworld | 7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b | [
"MIT"
] | 3 | 2021-09-06T23:12:23.000Z | 2021-09-17T01:04:34.000Z | westworld/objects/__init__.py | TheoLvs/westworld | 7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b | [
"MIT"
] | null | null | null | from .base_object import BaseObject
from .rectangle import BaseRectangle
from .collectible import BaseCollectible
from .trigger import BaseTrigger
from .obstacle import BaseObstacle
from .layer import BaseLayer | 35 | 40 | 0.861905 | from .base_object import BaseObject
from .rectangle import BaseRectangle
from .collectible import BaseCollectible
from .trigger import BaseTrigger
from .obstacle import BaseObstacle
from .layer import BaseLayer | true | true |
f73efe8cda7157490bc554bfb0da84e8ef3c6d77 | 11,719 | py | Python | passes.py | singlautsav/two-pass-assembler | 0c4eab227da9b1ea0810eda2914af88c008ad297 | [
"MIT"
] | 1 | 2020-02-27T16:32:47.000Z | 2020-02-27T16:32:47.000Z | passes.py | singlautsav/two-pass-assembler | 0c4eab227da9b1ea0810eda2914af88c008ad297 | [
"MIT"
] | null | null | null | passes.py | singlautsav/two-pass-assembler | 0c4eab227da9b1ea0810eda2914af88c008ad297 | [
"MIT"
] | null | null | null | programCounterX = 0
lines = []
symbol_Table=[]
ErrorFlag = False
ErrorList = []
opCode_Table = {'CLA': 0, 'LAC': 1, 'SAC': 2, 'ADD': 3, 'SUB': 4, 'BRZ': 5, 'BRN': 6, 'BRP': 7, 'INP': 8, 'DSP': 9, 'MUL': 10, 'DIV': 11, 'STP': 12, 'DW':13}
def lineCheck(line):
# used to check if line[0] is a label or symbol
if line[0][-1] == ':': # it will have ':' at its end for label
return 1
else:
return 2 # else it would be a symbol
def passOne(text):
global programCounterX
STP_found = 0 # flag to check STP is present in the code
# print("PASS 1")
for i in text:
if i == '':
text.remove(i)
# programCounter = 0
for line in text:
if line.startswith('//'):
text.remove(line)
# print(text)
for line in text:
foundFlag = False
flag = True
line = line.split(' ') # done splitting about " "(space)
# print(programCounterX)
# print(len(line))
if len(line)!=0:
for i in range(len(line)):
# print(i, line[i])
if line[i] == '':
# print("here")
line.remove(i)
if line[i].startswith('//'):
line = line[:i]
break
# print(line)
lines.append(line) # here we are inserting the line list in the main list
'''The len of the line can be 1,2,3 and we will now proceed to them'''
if len(line) == 2:
# here all instruction of two words will be handled like 'ADD l1' , 'SUB l2' etc
# print("2"+ str(line))
'''Check for line[1] in the list if its a variable add to symbol table or label'''
val = lineCheck(line)
# print(val)
if val == 1:
'''check label with STP and CLA'''
label_name = line[0][:len(line[0])-1]
# print(label_name)
for i in symbol_Table:
if i['name'] == label_name: # check if symbol is already present in the symbol table
if i['isFound'] == False: # as it is a label its 'isFound' must be false and then make it true else error
i['isFound'] = True
i['variableAddress'] = programCounterX # here we get the address and save it
foundFlag = True # make the foundFlag true
else:
ErrorFlag = True # error if isfound is already true because its address can't be redecclareed again
ErrorList.append('Label Cannot Be declared Again in Line: ' + str(programCounterX))
if foundFlag==False:
# if the foundflag is false then add the new symbol in the table
symbol_Table.append({'name': label_name, 'isUsed': False, 'isFound': True, 'variableAddress': programCounterX})
if line[1] == 'STP':
STP_found = 1 # shows that STP is present in file else it would be an error
elif val == 2:
# it is a symbol and check if its already in the symTable else add it
for i in symbol_Table:
if line[1] == i['name']:
flag = False
if flag:
symbol_Table.append({'name': line[1], 'isUsed': True, 'isFound': False, 'variableAddress': -1}) # symbol added to the symbolTable
elif len(line) == 3:
'''Check two if's either it has ':' or it has DW in line(1) either way program counter will add to symbol'''
# print(line)
if line[0][-1] == ':':
# print("hello We are here :P found a label")
label_name = line[0][:len(line[0])-1] # check for label
# print(label_name)
for i in symbol_Table:
if i['name'] == label_name: # check if already in symTable
if i['isFound'] == False: # isFound must be false else error
i['isFound'] = True
i['variableAddress'] = programCounterX
foundFlag = True
else:
ErrorFlag = True # error because symbol can't be redeclared
ErrorList.append('Label Cannot Be declared Again in Line: ' + str(programCounterX))
if foundFlag == False: # if not found, add in the symbol table
symbol_Table.append({'name': label_name, 'isUsed': False, 'isFound': True, 'variableAddress': programCounterX})
if line[1]=='STP':
STP_found=1
elif line[1] == 'DW':
'''DW statement is used to assign the variableAddress to symbol and instruction len will be 3'''
label_name = line[0]
# print(label_name)
for i in symbol_Table:
if i['name'] == label_name:
if i['isFound'] == False:
i['isFound'] = True
i['variableAddress'] = programCounterX
else:
ErrorFlag = True
ErrorList.append('Label Cannot Be declared Again in Line: ' + str(programCounterX))
# else:
# print('error - undefined label')
elif len(line) == 1:
'''check Stp command, if not found will give error'''
# print(line[0])
if line[0] == 'CLA':
pass
elif line[0] == 'STP':
STP_found = 1
else:
ErrorFlag = True
ErrorList.append("Invalid Command in Line:" + str(programCounterX))
# print("Er)
else:
ErrorFlag = True
ErrorList.append("Extra/ Invalid Arguements at " + str(programCounterX))
# print(symbol_Table)
programCounterX += 1
for i in symbol_Table:
if i['isFound']==False:
i['isFound']=True
i['variableAddress']=programCounterX
programCounterX+=1
for i in symbol_Table:
try:
z = int(i['name'])
i['variableAddress'] = z
except ValueError:
pass
return STP_found
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
programCounterP2 = 0
ErrorFlagPass2 = False
ErrorListPass2 = []
finalOutput = [] # list to display the final output
def checkSTP_CLA(line, lineY):
if line[0] == 'CLA': # check CLA
lineY += convertbin(str(bin(programCounterP2)[2:]),1) + " " # convert programCounter to binary and add to the line
lineY += convertbin(str(bin(opCode_Table['CLA'])[2:]),2) # convert opCode_Table 'CLA' into binary as machine code conatins the oppcode
return False, lineY
elif line[0] == 'STP':
lineY += convertbin(str(bin(programCounterP2)[2:]),1) + " " # convert programCounter to binary and add to the line
lineY += convertbin(str(bin(opCode_Table['STP'])[2:]),2) # convert opCode_Table 'CLA' into binary as machine code conatins the oppcode
return False, lineY
return True, lineY
def convertbin(line,value):
if value == 1: # convert the binary value to 12 bit
alen = len(line)
b = ''
c = 8 - alen
for i in range(c):
b += str(0)
b += line
line = b
return line
elif value == 2 : # convert binary value to 4 bit
alen = len(line)
b = ''
c = 4 - alen
for i in range(c):
b += str(0)
b += line
line = b
return line
def checkTwo(line, lineX):
global ErrorFlagPass2
if line[0] == 'CLA' or line[0] == 'STP': # check whether CLA and STP are present else error
lineX = ''
ErrorFlagPass2 = True
ErrorListPass2.append("Inavalid opCode with extra Argument at: " + convertbin(str(bin(programCounterP2)[2:]),1))
else:
if line[0][-1] == ':':
# print("checking this")
boolX, lineX = checkSTP_CLA(line[1:], lineX)
# print(lineX, boolX)
if boolX:
# lineX = ''
# print("error")
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid Opcode or Extra Arguements at:" + convertbin(str(bin(programCounterP2)[2:]),1))
else:
try:
lineX += convertbin(str(bin(programCounterP2)[2:]), 1) + " " # convert pc2 to binary
lineX += convertbin(str(bin(opCode_Table[line[0]])[2:]), 2) + " " # convet oppcode to binary
if RepresentsInt(line[1]):
lineX += convertbin(str(bin(int(line[1]))[2:]), 1)
else:
for symbol in symbol_Table:
if symbol['name'] == line[1]: # check for the symbol and if true
lineX += convertbin(str(bin(symbol['variableAddress'])[2:]),1) # add the binary of variableAdd to lineX
foundSymbol = True
if foundSymbol == False:
lineX = ''
ErrorFlagPass2 = True # else error as symbol couldn't be found
ErrorListPass2.append("Could not Find Symbol in the Table:" + line[1])
except KeyError:
lineX = ''
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid Opcode at: " + convertbin(str(bin(programCounterP2)[2:])),1)
return lineX
def passTwo():
global programCounterP2
global ErrorFlagPass2
for line in lines:
# print(line)
lineX = ''
foundSymbol = False
if len(line) == 1: # for len = 1 checkX - if true - error
# print("here is len 1")
boolX, lineX = checkSTP_CLA(line, lineX)
if boolX:
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid OpCode in at: " + convertbin(str(bin(programCounterP2))[2:],1))
elif len(line) == 2:
# print("Here is len 2")
lineX = checkTwo(line, lineX)
elif len(line) == 3:
# print("Here is len 3")
if line[0][-1] == ':':
# print("here")
lineX = checkTwo(line[1:], lineX)
elif line[1] == 'DW':
lineX = ''
else:
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid Opcode or Extra Arguements at:" + convertbin(str(bin(programCounterP2))[2:],1))
else:
ErrorFlagPass2 = True
ErrorListPass2.append("More than Required Arguements")
finalOutput.append(lineX)
programCounterP2 += 1
# lineX = ''
| 42.306859 | 157 | 0.478966 | programCounterX = 0
lines = []
symbol_Table=[]
ErrorFlag = False
ErrorList = []
opCode_Table = {'CLA': 0, 'LAC': 1, 'SAC': 2, 'ADD': 3, 'SUB': 4, 'BRZ': 5, 'BRN': 6, 'BRP': 7, 'INP': 8, 'DSP': 9, 'MUL': 10, 'DIV': 11, 'STP': 12, 'DW':13}
def lineCheck(line):
if line[0][-1] == ':':
return 1
else:
return 2
def passOne(text):
global programCounterX
STP_found = 0
for i in text:
if i == '':
text.remove(i)
for line in text:
if line.startswith('//'):
text.remove(line)
for line in text:
foundFlag = False
flag = True
line = line.split(' ')
if len(line)!=0:
for i in range(len(line)):
if line[i] == '':
line.remove(i)
if line[i].startswith('//'):
line = line[:i]
break
lines.append(line)
if len(line) == 2:
val = lineCheck(line)
if val == 1:
label_name = line[0][:len(line[0])-1]
for i in symbol_Table:
if i['name'] == label_name:
if i['isFound'] == False:
i['isFound'] = True
i['variableAddress'] = programCounterX
foundFlag = True
else:
ErrorFlag = True
ErrorList.append('Label Cannot Be declared Again in Line: ' + str(programCounterX))
if foundFlag==False:
# if the foundflag is false then add the new symbol in the table
symbol_Table.append({'name': label_name, 'isUsed': False, 'isFound': True, 'variableAddress': programCounterX})
if line[1] == 'STP':
STP_found = 1 # shows that STP is present in file else it would be an error
elif val == 2:
# it is a symbol and check if its already in the symTable else add it
for i in symbol_Table:
if line[1] == i['name']:
flag = False
if flag:
symbol_Table.append({'name': line[1], 'isUsed': True, 'isFound': False, 'variableAddress': -1}) # symbol added to the symbolTable
elif len(line) == 3:
'''Check two if's either it has ':' or it has DW in line(1) either way program counter will add to symbol'''
if line[0][-1] == ':':
label_name = line[0][:len(line[0])-1]
for i in symbol_Table:
if i['name'] == label_name:
if i['isFound'] == False:
i['isFound'] = True
i['variableAddress'] = programCounterX
foundFlag = True
else:
ErrorFlag = True
ErrorList.append('Label Cannot Be declared Again in Line: ' + str(programCounterX))
if foundFlag == False: # if not found, add in the symbol table
symbol_Table.append({'name': label_name, 'isUsed': False, 'isFound': True, 'variableAddress': programCounterX})
if line[1]=='STP':
STP_found=1
elif line[1] == 'DW':
'''DW statement is used to assign the variableAddress to symbol and instruction len will be 3'''
label_name = line[0]
# print(label_name)
for i in symbol_Table:
if i['name'] == label_name:
if i['isFound'] == False:
i['isFound'] = True
i['variableAddress'] = programCounterX
else:
ErrorFlag = True
ErrorList.append('Label Cannot Be declared Again in Line: ' + str(programCounterX))
# else:
# print('error - undefined label')
elif len(line) == 1:
'''check Stp command, if not found will give error'''
# print(line[0])
if line[0] == 'CLA':
pass
elif line[0] == 'STP':
STP_found = 1
else:
ErrorFlag = True
ErrorList.append("Invalid Command in Line:" + str(programCounterX))
# print("Er)
else:
ErrorFlag = True
ErrorList.append("Extra/ Invalid Arguements at " + str(programCounterX))
# print(symbol_Table)
programCounterX += 1
for i in symbol_Table:
if i['isFound']==False:
i['isFound']=True
i['variableAddress']=programCounterX
programCounterX+=1
for i in symbol_Table:
try:
z = int(i['name'])
i['variableAddress'] = z
except ValueError:
pass
return STP_found
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
programCounterP2 = 0
ErrorFlagPass2 = False
ErrorListPass2 = []
finalOutput = [] # list to display the final output
def checkSTP_CLA(line, lineY):
if line[0] == 'CLA': # check CLA
lineY += convertbin(str(bin(programCounterP2)[2:]),1) + " " # convert programCounter to binary and add to the line
lineY += convertbin(str(bin(opCode_Table['CLA'])[2:]),2) # convert opCode_Table 'CLA' into binary as machine code conatins the oppcode
return False, lineY
elif line[0] == 'STP':
lineY += convertbin(str(bin(programCounterP2)[2:]),1) + " " # convert programCounter to binary and add to the line
lineY += convertbin(str(bin(opCode_Table['STP'])[2:]),2) # convert opCode_Table 'CLA' into binary as machine code conatins the oppcode
return False, lineY
return True, lineY
def convertbin(line,value):
if value == 1: # convert the binary value to 12 bit
alen = len(line)
b = ''
c = 8 - alen
for i in range(c):
b += str(0)
b += line
line = b
return line
elif value == 2 : # convert binary value to 4 bit
alen = len(line)
b = ''
c = 4 - alen
for i in range(c):
b += str(0)
b += line
line = b
return line
def checkTwo(line, lineX):
global ErrorFlagPass2
if line[0] == 'CLA' or line[0] == 'STP': # check whether CLA and STP are present else error
lineX = ''
ErrorFlagPass2 = True
ErrorListPass2.append("Inavalid opCode with extra Argument at: " + convertbin(str(bin(programCounterP2)[2:]),1))
else:
if line[0][-1] == ':':
# print("checking this")
boolX, lineX = checkSTP_CLA(line[1:], lineX)
# print(lineX, boolX)
if boolX:
# lineX = ''
# print("error")
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid Opcode or Extra Arguements at:" + convertbin(str(bin(programCounterP2)[2:]),1))
else:
try:
lineX += convertbin(str(bin(programCounterP2)[2:]), 1) + " " # convert pc2 to binary
lineX += convertbin(str(bin(opCode_Table[line[0]])[2:]), 2) + " " # convet oppcode to binary
if RepresentsInt(line[1]):
lineX += convertbin(str(bin(int(line[1]))[2:]), 1)
else:
for symbol in symbol_Table:
if symbol['name'] == line[1]: # check for the symbol and if true
lineX += convertbin(str(bin(symbol['variableAddress'])[2:]),1) # add the binary of variableAdd to lineX
foundSymbol = True
if foundSymbol == False:
lineX = ''
ErrorFlagPass2 = True # else error as symbol couldn't be found
ErrorListPass2.append("Could not Find Symbol in the Table:" + line[1])
except KeyError:
lineX = ''
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid Opcode at: " + convertbin(str(bin(programCounterP2)[2:])),1)
return lineX
def passTwo():
global programCounterP2
global ErrorFlagPass2
for line in lines:
# print(line)
lineX = ''
foundSymbol = False
if len(line) == 1: # for len = 1 checkX - if true - error
# print("here is len 1")
boolX, lineX = checkSTP_CLA(line, lineX)
if boolX:
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid OpCode in at: " + convertbin(str(bin(programCounterP2))[2:],1))
elif len(line) == 2:
# print("Here is len 2")
lineX = checkTwo(line, lineX)
elif len(line) == 3:
# print("Here is len 3")
if line[0][-1] == ':':
# print("here")
lineX = checkTwo(line[1:], lineX)
elif line[1] == 'DW':
lineX = ''
else:
ErrorFlagPass2 = True
ErrorListPass2.append("Invalid Opcode or Extra Arguements at:" + convertbin(str(bin(programCounterP2))[2:],1))
else:
ErrorFlagPass2 = True
ErrorListPass2.append("More than Required Arguements")
finalOutput.append(lineX)
programCounterP2 += 1
# lineX = ''
| true | true |
f73efff8349b1d3161201f711d769acb83a38239 | 1,825 | py | Python | plugins/lastpass_enterprise/komand_lastpass_enterprise/actions/deprovision_user/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/lastpass_enterprise/komand_lastpass_enterprise/actions/deprovision_user/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/lastpass_enterprise/komand_lastpass_enterprise/actions/deprovision_user/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import komand
import requests
from .schema import DeprovisionUserInput, DeprovisionUserOutput
class DeprovisionUser(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="deprovision_user",
description="remove user",
input=DeprovisionUserInput(),
output=DeprovisionUserOutput(),
)
def run(self, params={}):
cid = self.connection.cid
provhash = self.connection.provhash
user_name = params.get("user_name")
delete_action = params.get("delete_action")
# Use the deluser command
cmd = "deluser"
# API URL
url = "https://lastpass.com/enterpriseapi.php"
# Set headers
headers = {"content-type": "application/json"}
# Initialize data dic
data = {}
# Add username
data["username"] = user_name
# Add deleteaction
if delete_action == "deactivate":
delete_action = 0
elif delete_action == "remove":
delete_action = 1
else:
delete_action = 2
data["deleteaction"] = delete_action
# Set POST data
post = {"provhash": provhash, "cid": cid, "cmd": cmd, "data": data}
# Generate request
response = requests.post(url, json=post, headers=headers)
try:
status = response.json().get("status")
except Exception as e:
self.logger.error(f"Deprovision user failed.\n" f"Exception was: {e}" f"Response was: {response.text}")
raise e
# Check status
if status != "OK":
self.logger.error(f"Deprovision user failed.\n" f"Response was: {response.text}")
raise Exception("Deprovision user failed.")
return {"status": status}
| 28.515625 | 115 | 0.582466 | import komand
import requests
from .schema import DeprovisionUserInput, DeprovisionUserOutput
class DeprovisionUser(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="deprovision_user",
description="remove user",
input=DeprovisionUserInput(),
output=DeprovisionUserOutput(),
)
def run(self, params={}):
cid = self.connection.cid
provhash = self.connection.provhash
user_name = params.get("user_name")
delete_action = params.get("delete_action")
cmd = "deluser"
url = "https://lastpass.com/enterpriseapi.php"
headers = {"content-type": "application/json"}
data = {}
data["username"] = user_name
if delete_action == "deactivate":
delete_action = 0
elif delete_action == "remove":
delete_action = 1
else:
delete_action = 2
data["deleteaction"] = delete_action
post = {"provhash": provhash, "cid": cid, "cmd": cmd, "data": data}
response = requests.post(url, json=post, headers=headers)
try:
status = response.json().get("status")
except Exception as e:
self.logger.error(f"Deprovision user failed.\n" f"Exception was: {e}" f"Response was: {response.text}")
raise e
if status != "OK":
self.logger.error(f"Deprovision user failed.\n" f"Response was: {response.text}")
raise Exception("Deprovision user failed.")
return {"status": status}
| true | true |
f73f00267f89b854ce528acc63461261f4444a06 | 47 | py | Python | variables.py | fabiolealsc/quest | 8f936aa6d8208ec23cc13f28148b9e39a976d997 | [
"MIT"
] | 2 | 2021-09-29T02:39:20.000Z | 2021-11-08T08:56:50.000Z | variables.py | fabiolealsc/quest | 8f936aa6d8208ec23cc13f28148b9e39a976d997 | [
"MIT"
] | null | null | null | variables.py | fabiolealsc/quest | 8f936aa6d8208ec23cc13f28148b9e39a976d997 | [
"MIT"
] | null | null | null | favorite_robot = "Cedric"
meaning_of_life = 42 | 23.5 | 25 | 0.787234 | favorite_robot = "Cedric"
meaning_of_life = 42 | true | true |
f73f0164313501a01674acc32e937c8dc0060da1 | 1,069 | py | Python | tests/test_isocolor.py | CagtayFabry/ipygany | 1ccaf45412b02dbd9548041bfa1d1f84eda94c76 | [
"BSD-3-Clause"
] | 3 | 2019-11-28T21:43:12.000Z | 2021-02-02T22:15:00.000Z | tests/test_isocolor.py | CagtayFabry/ipygany | 1ccaf45412b02dbd9548041bfa1d1f84eda94c76 | [
"BSD-3-Clause"
] | null | null | null | tests/test_isocolor.py | CagtayFabry/ipygany | 1ccaf45412b02dbd9548041bfa1d1f84eda94c76 | [
"BSD-3-Clause"
] | 1 | 2020-11-10T09:40:42.000Z | 2020-11-10T09:40:42.000Z | import pytest
from traitlets import TraitError
from ipygany import PolyMesh, IsoColor
from .utils import get_test_assets
def test_default_input():
vertices, triangles, data_1d, data_3d = get_test_assets()
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_1d, data_3d])
colored_mesh = IsoColor(poly)
assert colored_mesh.input == '1d'
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_3d])
colored_mesh = IsoColor(poly)
assert colored_mesh.input == (('3d', 'x'), )
def test_input():
vertices, triangles, data_1d, data_3d = get_test_assets()
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_1d, data_3d])
colored_mesh = IsoColor(poly)
colored_mesh.input = ('1d', )
assert colored_mesh.input == (('1d', 'x'), )
colored_mesh.input = ('3d', 'x')
assert colored_mesh.input == ('3d', 'x')
colored_mesh.input = (3.2, )
assert colored_mesh.input == (3.2, )
colored_mesh.input = 3.2
assert colored_mesh.input == 3.2
| 24.295455 | 91 | 0.690365 | import pytest
from traitlets import TraitError
from ipygany import PolyMesh, IsoColor
from .utils import get_test_assets
def test_default_input():
vertices, triangles, data_1d, data_3d = get_test_assets()
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_1d, data_3d])
colored_mesh = IsoColor(poly)
assert colored_mesh.input == '1d'
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_3d])
colored_mesh = IsoColor(poly)
assert colored_mesh.input == (('3d', 'x'), )
def test_input():
vertices, triangles, data_1d, data_3d = get_test_assets()
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_1d, data_3d])
colored_mesh = IsoColor(poly)
colored_mesh.input = ('1d', )
assert colored_mesh.input == (('1d', 'x'), )
colored_mesh.input = ('3d', 'x')
assert colored_mesh.input == ('3d', 'x')
colored_mesh.input = (3.2, )
assert colored_mesh.input == (3.2, )
colored_mesh.input = 3.2
assert colored_mesh.input == 3.2
| true | true |
f73f017bacc74b91d4bd111a13edc876e72084f0 | 157 | py | Python | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_PolyTrend_NoCycle_ARX.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_PolyTrend_NoCycle_ARX.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_PolyTrend_NoCycle_ARX.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['PolyTrend'] , ['NoCycle'] , ['ARX'] ); | 39.25 | 79 | 0.745223 | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['PolyTrend'] , ['NoCycle'] , ['ARX'] ); | true | true |
f73f028e24b99fdfeeaeccb87b58f4f3c2a51a9b | 6,540 | py | Python | KRIS/urllib3/dummyserver/testcase.py | AnarchyTeam/Anarchy.py6 | 090522fd4aae659bcaea8661126062e93f846f71 | [
"MIT"
] | 1 | 2021-06-05T07:02:26.000Z | 2021-06-05T07:02:26.000Z | KRIS/urllib3/dummyserver/testcase.py | AnarchyTeam/Anarchy.py6 | 090522fd4aae659bcaea8661126062e93f846f71 | [
"MIT"
] | 2 | 2020-03-24T15:25:06.000Z | 2020-03-30T20:04:12.000Z | KRIS/urllib3/dummyserver/testcase.py | AnarchyTeam/Anarchy.py6 | 090522fd4aae659bcaea8661126062e93f846f71 | [
"MIT"
] | 1 | 2020-01-18T06:29:31.000Z | 2020-01-18T06:29:31.000Z | import sys
import threading
import pytest
from tornado import ioloop, web
from dummyserver.server import (
SocketServerThread,
run_tornado_app,
run_loop_in_thread,
DEFAULT_CERTS,
HAS_IPV6,
)
from dummyserver.handlers import TestingApp
from dummyserver.proxy import ProxyHandler
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
def consume_socket(sock, chunks=65536):
while not sock.recv(chunks).endswith(b'\r\n\r\n'):
pass
class SocketDummyServerTestCase(unittest.TestCase):
"""
A simple socket-based server is created for this class that is good for
exactly one request.
"""
scheme = 'http'
host = 'localhost'
@classmethod
def _start_server(cls, socket_handler):
ready_event = threading.Event()
cls.server_thread = SocketServerThread(socket_handler=socket_handler,
ready_event=ready_event,
host=cls.host)
cls.server_thread.start()
ready_event.wait(5)
if not ready_event.is_set():
raise Exception("most likely failed to start server")
cls.port = cls.server_thread.port
@classmethod
def start_response_handler(cls, response, num=1, block_send=None):
ready_event = threading.Event()
def socket_handler(listener):
for _ in range(num):
ready_event.set()
sock = listener.accept()[0]
consume_socket(sock)
if block_send:
block_send.wait()
block_send.clear()
sock.send(response)
sock.close()
cls._start_server(socket_handler)
return ready_event
@classmethod
def start_basic_handler(cls, **kw):
return cls.start_response_handler(
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n'
b'\r\n', **kw)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'server_thread'):
cls.server_thread.join(0.1)
def assert_header_received(
self,
received_headers,
header_name,
expected_value=None
):
header_name = header_name.encode('ascii')
if expected_value is not None:
expected_value = expected_value.encode('ascii')
header_titles = []
for header in received_headers:
key, value = header.split(b': ')
header_titles.append(key)
if key == header_name and expected_value is not None:
self.assertEqual(value, expected_value)
self.assertIn(header_name, header_titles)
class IPV4SocketDummyServerTestCase(SocketDummyServerTestCase):
@classmethod
def _start_server(cls, socket_handler):
ready_event = threading.Event()
cls.server_thread = SocketServerThread(socket_handler=socket_handler,
ready_event=ready_event,
host=cls.host)
cls.server_thread.USE_IPV6 = False
cls.server_thread.start()
ready_event.wait(5)
if not ready_event.is_set():
raise Exception("most likely failed to start server")
cls.port = cls.server_thread.port
class HTTPDummyServerTestCase(unittest.TestCase):
""" A simple HTTP server that runs when your test class runs
Have your unittest class inherit from this one, and then a simple server
will start when your tests run, and automatically shut down when they
complete. For examples of what test requests you can send to the server,
see the TestingApp in dummyserver/handlers.py.
"""
scheme = 'http'
host = 'localhost'
host_alt = '127.0.0.1' # Some tests need two hosts
certs = DEFAULT_CERTS
@classmethod
def _start_server(cls):
cls.io_loop = ioloop.IOLoop()
app = web.Application([(r".*", TestingApp)])
cls.server, cls.port = run_tornado_app(app, cls.io_loop, cls.certs,
cls.scheme, cls.host)
cls.server_thread = run_loop_in_thread(cls.io_loop)
@classmethod
def _stop_server(cls):
cls.io_loop.add_callback(cls.server.stop)
cls.io_loop.add_callback(cls.io_loop.stop)
cls.server_thread.join()
@classmethod
def setUpClass(cls):
cls._start_server()
@classmethod
def tearDownClass(cls):
cls._stop_server()
class HTTPSDummyServerTestCase(HTTPDummyServerTestCase):
scheme = 'https'
host = 'localhost'
certs = DEFAULT_CERTS
@pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available')
class IPV6HTTPSDummyServerTestCase(HTTPSDummyServerTestCase):
host = '::1'
class HTTPDummyProxyTestCase(unittest.TestCase):
http_host = 'localhost'
http_host_alt = '127.0.0.1'
https_host = 'localhost'
https_host_alt = '127.0.0.1'
https_certs = DEFAULT_CERTS
proxy_host = 'localhost'
proxy_host_alt = '127.0.0.1'
@classmethod
def setUpClass(cls):
cls.io_loop = ioloop.IOLoop()
app = web.Application([(r'.*', TestingApp)])
cls.http_server, cls.http_port = run_tornado_app(
app, cls.io_loop, None, 'http', cls.http_host)
app = web.Application([(r'.*', TestingApp)])
cls.https_server, cls.https_port = run_tornado_app(
app, cls.io_loop, cls.https_certs, 'https', cls.http_host)
app = web.Application([(r'.*', ProxyHandler)])
cls.proxy_server, cls.proxy_port = run_tornado_app(
app, cls.io_loop, None, 'http', cls.proxy_host)
cls.server_thread = run_loop_in_thread(cls.io_loop)
@classmethod
def tearDownClass(cls):
cls.io_loop.add_callback(cls.http_server.stop)
cls.io_loop.add_callback(cls.https_server.stop)
cls.io_loop.add_callback(cls.proxy_server.stop)
cls.io_loop.add_callback(cls.io_loop.stop)
cls.server_thread.join()
@pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available')
class IPv6HTTPDummyServerTestCase(HTTPDummyServerTestCase):
host = '::1'
@pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available')
class IPv6HTTPDummyProxyTestCase(HTTPDummyProxyTestCase):
http_host = 'localhost'
http_host_alt = '127.0.0.1'
https_host = 'localhost'
https_host_alt = '127.0.0.1'
https_certs = DEFAULT_CERTS
proxy_host = '::1'
proxy_host_alt = '127.0.0.1'
| 30.418605 | 77 | 0.636391 | import sys
import threading
import pytest
from tornado import ioloop, web
from dummyserver.server import (
SocketServerThread,
run_tornado_app,
run_loop_in_thread,
DEFAULT_CERTS,
HAS_IPV6,
)
from dummyserver.handlers import TestingApp
from dummyserver.proxy import ProxyHandler
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
def consume_socket(sock, chunks=65536):
while not sock.recv(chunks).endswith(b'\r\n\r\n'):
pass
class SocketDummyServerTestCase(unittest.TestCase):
scheme = 'http'
host = 'localhost'
@classmethod
def _start_server(cls, socket_handler):
ready_event = threading.Event()
cls.server_thread = SocketServerThread(socket_handler=socket_handler,
ready_event=ready_event,
host=cls.host)
cls.server_thread.start()
ready_event.wait(5)
if not ready_event.is_set():
raise Exception("most likely failed to start server")
cls.port = cls.server_thread.port
@classmethod
def start_response_handler(cls, response, num=1, block_send=None):
ready_event = threading.Event()
def socket_handler(listener):
for _ in range(num):
ready_event.set()
sock = listener.accept()[0]
consume_socket(sock)
if block_send:
block_send.wait()
block_send.clear()
sock.send(response)
sock.close()
cls._start_server(socket_handler)
return ready_event
@classmethod
def start_basic_handler(cls, **kw):
return cls.start_response_handler(
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n'
b'\r\n', **kw)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'server_thread'):
cls.server_thread.join(0.1)
def assert_header_received(
self,
received_headers,
header_name,
expected_value=None
):
header_name = header_name.encode('ascii')
if expected_value is not None:
expected_value = expected_value.encode('ascii')
header_titles = []
for header in received_headers:
key, value = header.split(b': ')
header_titles.append(key)
if key == header_name and expected_value is not None:
self.assertEqual(value, expected_value)
self.assertIn(header_name, header_titles)
class IPV4SocketDummyServerTestCase(SocketDummyServerTestCase):
@classmethod
def _start_server(cls, socket_handler):
ready_event = threading.Event()
cls.server_thread = SocketServerThread(socket_handler=socket_handler,
ready_event=ready_event,
host=cls.host)
cls.server_thread.USE_IPV6 = False
cls.server_thread.start()
ready_event.wait(5)
if not ready_event.is_set():
raise Exception("most likely failed to start server")
cls.port = cls.server_thread.port
class HTTPDummyServerTestCase(unittest.TestCase):
scheme = 'http'
host = 'localhost'
host_alt = '127.0.0.1'
certs = DEFAULT_CERTS
@classmethod
def _start_server(cls):
cls.io_loop = ioloop.IOLoop()
app = web.Application([(r".*", TestingApp)])
cls.server, cls.port = run_tornado_app(app, cls.io_loop, cls.certs,
cls.scheme, cls.host)
cls.server_thread = run_loop_in_thread(cls.io_loop)
@classmethod
def _stop_server(cls):
cls.io_loop.add_callback(cls.server.stop)
cls.io_loop.add_callback(cls.io_loop.stop)
cls.server_thread.join()
@classmethod
def setUpClass(cls):
cls._start_server()
@classmethod
def tearDownClass(cls):
cls._stop_server()
class HTTPSDummyServerTestCase(HTTPDummyServerTestCase):
scheme = 'https'
host = 'localhost'
certs = DEFAULT_CERTS
@pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available')
class IPV6HTTPSDummyServerTestCase(HTTPSDummyServerTestCase):
host = '::1'
class HTTPDummyProxyTestCase(unittest.TestCase):
http_host = 'localhost'
http_host_alt = '127.0.0.1'
https_host = 'localhost'
https_host_alt = '127.0.0.1'
https_certs = DEFAULT_CERTS
proxy_host = 'localhost'
proxy_host_alt = '127.0.0.1'
@classmethod
def setUpClass(cls):
cls.io_loop = ioloop.IOLoop()
app = web.Application([(r'.*', TestingApp)])
cls.http_server, cls.http_port = run_tornado_app(
app, cls.io_loop, None, 'http', cls.http_host)
app = web.Application([(r'.*', TestingApp)])
cls.https_server, cls.https_port = run_tornado_app(
app, cls.io_loop, cls.https_certs, 'https', cls.http_host)
app = web.Application([(r'.*', ProxyHandler)])
cls.proxy_server, cls.proxy_port = run_tornado_app(
app, cls.io_loop, None, 'http', cls.proxy_host)
cls.server_thread = run_loop_in_thread(cls.io_loop)
@classmethod
def tearDownClass(cls):
cls.io_loop.add_callback(cls.http_server.stop)
cls.io_loop.add_callback(cls.https_server.stop)
cls.io_loop.add_callback(cls.proxy_server.stop)
cls.io_loop.add_callback(cls.io_loop.stop)
cls.server_thread.join()
@pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available')
class IPv6HTTPDummyServerTestCase(HTTPDummyServerTestCase):
host = '::1'
@pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available')
class IPv6HTTPDummyProxyTestCase(HTTPDummyProxyTestCase):
http_host = 'localhost'
http_host_alt = '127.0.0.1'
https_host = 'localhost'
https_host_alt = '127.0.0.1'
https_certs = DEFAULT_CERTS
proxy_host = '::1'
proxy_host_alt = '127.0.0.1'
| true | true |
f73f03100409d8b6c8563a740e55306d7f0fdb9f | 40,516 | py | Python | spytest/apis/switching/pvst.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | 1 | 2021-09-15T17:09:13.000Z | 2021-09-15T17:09:13.000Z | spytest/apis/switching/pvst.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | 1 | 2020-02-05T16:51:53.000Z | 2020-02-05T16:51:53.000Z | spytest/apis/switching/pvst.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | null | null | null | import re
import utilities.utils as utils
from spytest import st
from spytest.utils import filter_and_select
from spytest.utils import exec_foreach, exec_all
import utilities.common as utility
import apis.switching.portchannel as portchannel
import apis.system.basic as basic
from utilities.parallel import ensure_no_exception
from datetime import datetime,timedelta
debug_log_path = r"/var/log/stplog"
SHOW_STP_VLAN = "show spanning_tree vlan {}"
BLOCKING_STATE = "BLOCKING"
def config_spanning_tree(dut, feature="pvst", mode="enable", vlan=None, cli_type='click'):
"""
:param dut:
:param feature:
:param mode:
:param vlan:
:param cli_type:
:return:
"""
command = ''
no_form = 'no'
if mode == 'enable':
no_form = ''
st.log("{} spanning_tree {}".format(mode, feature))
if cli_type == 'click':
if vlan:
command = "config spanning_tree vlan {} {}".format(mode, vlan)
else:
command = "config spanning_tree {} {}".format(mode, feature)
elif cli_type == 'klish':
if mode == 'disable':
feature = ''
if vlan:
command = "{} spanning-tree vlan {}".format(no_form, vlan)
else:
command = "{} spanning-tree mode {}".format(no_form, feature)
st.config(dut, command, type=cli_type)
def config_stp_parameters(dut, cli_type='click', no_form='', **kwargs):
"""
:param dut:
:param cli_type:
:param no_form:
:param kwargs:
:return:
"""
no_form = 'no' if no_form else ''
for each_key in kwargs.keys():
if cli_type == 'click':
command = "config spanning_tree {} {}".format(each_key, kwargs[each_key])
elif cli_type == 'klish':
command = "{} spanning-tree {} {}".format(no_form, each_key, kwargs[each_key])
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
st.config(dut, command, type=cli_type)
def config_stp_vlan_parameters(dut, vlan, **kwargs):
"""
:param dut:
:param vlan:
:param kwargs:
:return:
"""
cli_type = kwargs.setdefault('cli_type', 'click')
no_form = 'no' if kwargs.setdefault('no_form', False) else ''
del kwargs['cli_type']
del kwargs['no_form']
click_2_klish = {'forward_delay': 'forward-time', 'hello': 'hello-time', 'max_age': 'max-age'}
for each_key, value in kwargs.items():
if cli_type == 'click':
command = "config spanning_tree vlan {} {} {}".format(each_key, vlan, value)
elif cli_type == 'klish':
each_key1 = click_2_klish.get(each_key, each_key)
command = "{} spanning-tree vlan {} {} {}".format(no_form, vlan, each_key1, value)
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
st.config(dut, command, type=cli_type)
def config_stp_vlan_parameters_parallel(dut_list, thread=True, **kwargs):
"""
Author : chaitanya lohith bollapragada
This will configure the "config_stp_vlan_parameters" in parallel to all DUTs mentioned.
:param dut_list:
:param vlan: list of vlans
:param priority: list of STP priorities
:param thread: True | False
:return:
"""
st.log("Configuring STP vlan parameters in paraller on all DUT's ... ")
dut_li = list(dut_list) if isinstance(dut_list, list) else [dut_list]
vlan_li = list(kwargs['vlan']) if isinstance(kwargs['vlan'], list) else [kwargs['vlan']]
priority_li = list(kwargs['priority']) if isinstance(kwargs['priority'], list) else [kwargs['priority']]
if not len(dut_li) == len(vlan_li) == len(priority_li):
return False
params = list()
for i,each in enumerate(dut_list):
params.append(utility.ExecAllFunc(config_stp_vlan_parameters, each, vlan_li[i], priority=priority_li[i]))
[out, exceptions] = exec_all(thread, params)
st.log(exceptions)
return False if False in out else True
def config_stp_vlan_interface(dut, vlan, iface, value, mode='cost', **kwargs):
"""
:param dut:
:param vlan:
:param iface:
:param value:
:param mode:
:return:
"""
cli_type = kwargs.get('cli_type', 'click')
no_form = 'no' if kwargs.get('no_form') else ''
if mode in ['cost', 'priority']:
if cli_type == 'click':
command = "config spanning_tree vlan interface {} {} {} {} ".format(mode, vlan, iface, value)
elif cli_type == 'klish':
if mode == 'priority':
mode = 'port-priority'
interface_data = utils.get_interface_number_from_name(iface)
command = ['interface {} {}'.format(interface_data["type"], interface_data["number"]),
'{} spanning-tree vlan {} {} {}'.format(no_form, vlan, mode, value), "exit"]
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
else:
st.log("Invalid mode = {}".format(mode))
return
st.config(dut, command, type=cli_type)
def config_stp_enable_interface(dut, iface, mode="enable"):
"""
:param dut:
:param iface:
:param mode:
:return:
"""
command = "config spanning_tree interface {} {}".format(mode, iface)
st.config(dut, command)
def config_stp_interface_params(dut, iface, **kwargs):
"""
:param dut:
:param iface:
:param cli_type:
:param kwargs:
:return:
"""
cli_type = kwargs.setdefault('cli_type', 'click')
del kwargs['cli_type']
click_2_klish = {"root_guard": " guard root", "bpdu_guard": "bpduguard ", "portfast": "portfast",
"uplink_fast": "uplinkfast"}
if cli_type == 'click':
for each_key in kwargs.keys():
if each_key == "priority" or each_key == "cost":
command = "config spanning_tree interface {} {} {}".format(each_key, iface, kwargs[each_key])
elif each_key == "bpdu_guard_action":
command = "config spanning_tree interface bpdu_guard enable {} {}".format(iface, kwargs[each_key])
else:
command = "config spanning_tree interface {} {} {}".format(each_key, kwargs[each_key], iface)
st.config(dut, command)
elif cli_type == 'klish':
interface_data = utils.get_interface_number_from_name(iface)
command = ['interface {} {}'.format(interface_data["type"], interface_data["number"])]
for each_key in kwargs.keys():
no_form = 'no' if kwargs[each_key] == 'disable' else ''
if each_key == "priority" or each_key == "cost":
command.append('spanning-tree {} {}'.format(each_key, kwargs[each_key]))
elif each_key == "bpdu_guard_action":
command.append('{} spanning-tree bpduguard port-shutdown'.format(no_form))
else:
command.append("{} spanning-tree {}".format(no_form, click_2_klish[each_key]))
command.append('exit')
st.config(dut, command, type=cli_type)
def config_stp_interface(dut, iface, mode="enable"):
"""
:param dut:
:param iface:
:param mode:
:return:
"""
command = "config spanning_tree interface {} {} ".format(mode, iface)
st.config(dut, command)
def show_stp(dut, **kwargs):
"""
:param dut:
:return:
"""
cli_type = kwargs.get("cli_type", 'click')
command = "show spanning_tree"
if 'sub_cmd' in kwargs:
command = "show spanning_tree {}".format(kwargs['sub_cmd'])
return st.show(dut, command, type=cli_type)
def show_stp_vlan(dut, vlan, cli_type="click"):
"""
:param dut:
:param vlan:
:param cli_type:
:return:
"""
st.log("show spanning_tree vlan <id>")
command = SHOW_STP_VLAN.format(vlan)
return st.show(dut, command, type=cli_type)
def show_stp_vlan_iface(dut, vlan, iface, cli_type="click"):
"""
:param dut:
:param vlan:
:param iface:
:return:
"""
if cli_type == "click":
command = "show spanning_tree vlan interface {} {}".format(vlan, iface)
elif cli_type == "klish":
command = "show spanning_tree vlan {} interface {}".format(vlan, iface)
else:
st.log("Unsupported CLI type {}".format(cli_type))
return list()
return st.show(dut, command, type="cli_type")
def show_stp_stats(dut):
"""
:param dut:
:return:
"""
command = "show spanning_tree statistics"
return st.show(dut, command)
def show_stp_stats_vlan(dut, vlan):
"""
:param dut:
:param vlan:
:return:
"""
command = "show spanning_tree statistics vlan {} ".format(vlan)
return st.show(dut, command)
def debug_stp(dut, *argv):
"""
:param dut:
:param argv:
:return:
Usage:
debug_stp(dut)
debug_stp(dut, "reset")
debug_stp(dut, "vlan 100", "interface Ethernet0")
debug_stp(dut, "vlan 100 -d", "interface Ethernet0 -d")
"""
command = 'debug spanning_tree'
if not argv:
st.config(dut, command)
for each in argv:
command2 = "{} {}".format(command, each)
st.config(dut, command2)
return True
def get_debug_stp_log(dut, filter_list=[]):
""""
:param dut:
:param filter_list:
:return:
"""
if isinstance(filter_list, list):
filter_list = list(filter_list)
else:
filter_list = [filter_list]
command = "cat {}".format(debug_log_path)
for each_filter in filter_list:
command += " | grep '{}'".format(each_filter)
output = st.show(dut, command, skip_tmpl=True, skip_error_check=True)
reg_output = utils.remove_last_line_from_string(output)
out_list = reg_output.split('\n')
return out_list
def clear_debug_stp_log(dut):
"""
:param dut:
:return:
"""
command = "dd if=/dev/null of={}".format(debug_log_path)
st.config(dut, command)
return True
def verify_stp_vlan_iface(dut, **kwargs):
"""
:param dut:
:param kwargs:
:return:
"""
output = show_stp_vlan_iface(dut, kwargs["vlan"], kwargs["iface"])
for each in kwargs.keys():
match = {each: kwargs[each]}
entries = filter_and_select(output, None, match)
if not entries:
st.log("{} and {} is not match ".format(each, kwargs[each]))
return False
return True
def verify_stp_statistics_vlan(dut, **kwargs):
"""
:param dut:
:param kwargs:
:return:
"""
output = show_stp_stats_vlan(dut, kwargs["vlan"])
for each in kwargs.keys():
match = {each: kwargs[each]}
entries = filter_and_select(output, None, match)
if not entries:
st.log("{} and {} is not match ".format(each, kwargs[each]))
return False
return True
def check_dut_is_root_bridge_for_vlan(dut, vlanid):
"""
:param dut:
:param vlanid:
:return:
"""
cmd = SHOW_STP_VLAN.format(vlanid)
stp_output = st.show(dut, cmd)
root_bridge=stp_output[0]["rt_id"]
dut_bridge_id=stp_output[0]["br_id"]
return (root_bridge == dut_bridge_id) and stp_output[0]["rt_port"] == "Root"
def get_stp_bridge_param(dut, vlanid, bridge_param):
"""
This is used to provide value of the bridge_param for given dut and vlanid
:param dut:
:param vlanid:
:param bridge_param: should be one of the below strings
stp_mode Returns STP mode
vid Returns vlanid
inst Returns STP intance id
br_id Returns Bridge id
br_maxage Returns Bridge max age
br_hello Returns Bridge Hello timer value
br_fwddly Returns Bridge Forward Delay
br_hold Returns Bridge Hold Timer value
rt_id Returns Root Bridge id
rt_pathcost Returns RootPath Cost
rt_desigbridgeid Returns DesignatedBridge id
rt_port Returns Root
rt_maxage Returns Root max age
rt_hello Returns Root Bridge Hello Timer value
rt_fwddly Returns Root Bridge Forward Delay
:return: Returns value of the bridge_param for given dut and vlanid
"""
stp_bridge_param_list = ['stp_mode',
'vid',
'inst',
'br_id',
'br_maxage',
'br_hello',
'br_fwddly',
'br_hold',
'br_lasttopo',
'br_topoch',
'rt_id',
'rt_pathcost',
'rt_desigbridgeid',
'rt_port',
'rt_maxage',
'rt_hello',
'rt_fwddly']
if bridge_param not in stp_bridge_param_list:
st.error("Please provide the valid stp bridge parameter")
return
cmd = SHOW_STP_VLAN.format(vlanid)
stp_output = st.show(dut, cmd)
return stp_output[0][bridge_param]
def get_stp_port_param(dut, vlanid, ifname, ifparam):
"""
This is used to provide value of the bridge_param for given dut and vlanid
:param dut:
:param vlanid:
:param bridge_param: should be one of the below strings
port_name Returns Port Name
port_priority Returns Port Priority
port_pathcost Returns Port pathcost
port_portfast Returns Portfast Enabled(Y) or Not(N)
port_uplinkfast Returns Uplinkfast is Enabled(Y) or Not(N)
port_state Returns Port state
port_desigcost Returns Port Designated cost
port_desigrootid Returns Port Designated Root id
port_desigbridgeid Returns Port Designated Bridge id
:return:
"""
stp_port_param_list = ['port_name',
'port_priority',
'port_pathcost',
'port_portfast',
'port_uplinkfast',
'port_state',
'port_desigcost',
'port_desigrootid',
'port_desigbridgeid']
if ifparam not in stp_port_param_list:
st.error("Please provide the valid stp port parameter")
return
cmd = SHOW_STP_VLAN.format(vlanid)+" interface {}".format(ifname)
stp_output = st.show(dut, cmd)
return None if len(stp_output) == 0 else stp_output[0][ifparam]
def get_default_root_bridge(dut_list):
"""
This is used to get the root bridge with default config
:param vars : Testbed Vars
:return: Returns root bridge like D1 or D2
"""
duts_mac_list = basic.get_dut_mac_address_thread(dut_list)
if duts_mac_list:
min_mac_addr = min(duts_mac_list.values())
root_bridge = [dut for dut, mac_addr in duts_mac_list.items() if mac_addr == min_mac_addr][0]
return [dut for dut in dut_list if dut==root_bridge][0]
else:
return None
def get_duts_mac_address(duts):
"""
This is used to get the Duts and its mac addresses mapping
:param duts: List of DUTs
:return : Duts and its mac addresses mapping
"""
duts_mac_addresses = {}
cmd = "show platform syseeprom"
for dut in duts:
if st.is_vsonic(dut):
mac = basic.get_ifconfig_ether(dut)
duts_mac_addresses[dut] = mac
continue
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if not eeprom_details:
iteration=3
for i in range(1, iteration+1):
st.wait(2)
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if eeprom_details:
break
if not eeprom_details and i >= iteration + 1:
st.log("EEPROM data not found for {}".format(dut))
st.report_fail("eeprom_data_not_found", dut)
st.log("EEPROM DETAILS -- {}".format(eeprom_details))
if eeprom_details:
for data in eeprom_details:
if "tlv_name" in data and data["tlv_name"] == "Base MAC Address":
duts_mac_addresses[dut] = data["value"].replace(":","")
st.log("DUT MAC ADDRESS -- {}".format(duts_mac_addresses))
return duts_mac_addresses
def _get_duts_list_in_order(vars):
"""
This is used to get the DUTs and their mac addresses in ascending order of Mac addresses
:param duts: List of DUTs
:return : Duts and its mac addresses mapping
"""
duts_mac_addresses = get_duts_mac_address(vars["dut_list"])
return sorted(zip(duts_mac_addresses.values(), duts_mac_addresses.keys()))
def get_ports_based_on_state(vars, vlanid, port_state, dut=None, cli_type='click'):
"""
This is used to get the blocked ports on none-root bridge
:param duts: List of DUTs
:return : Duts and its mac addresses mapping
"""
selected_non_root = ""
if dut is None:
duts_list = _get_duts_list_in_order(vars)
dut_with_max_mac_address = duts_list[len(duts_list) - 1][1]
selected_non_root = [dut_key for dut_key, dut_value in vars.items() if dut_value == dut_with_max_mac_address][0]
else:
selected_non_root = [dut_key for dut_key, dut_value in vars.items() if dut_value == dut][0]
stp_output = show_stp_vlan(vars[selected_non_root], vlanid, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output if
row["port_state"] == port_state and int(row["vid"]) == vlanid]
return ports_list
def poll_for_root_switch(dut, vlanid, iteration=20, delay=1):
"""
API to poll for root switch
:param dut:
:param vlanid:
:param iteration:
:param delay:
:return:
"""
i = 1
while True:
if check_dut_is_root_bridge_for_vlan(dut, vlanid):
st.log("Observed dut is root bridge {} iteration".format(i))
return True
if i > iteration:
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def poll_for_stp_status(dut, vlanid, interface, status, iteration=20, delay=1):
"""
API to poll for stp stauts for an interface
:param dut:
:param vlanid:
:param iteration:
:param delay:
:return:
"""
i = 1
while True:
if get_stp_port_param(dut, vlanid, interface, "port_state") == status:
st.log("Port status is changed to {} after {} sec".format(status, i))
return True
if i > iteration:
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def get_root_guard_details(dut, vlan=None, ifname=None , rg_param="rg_timeout"):
"""
API will return Root Guard timeout if vlan and interface won't provide , otherwise Root Guard state will return
:param dut:
:param vlan:
:param ifname:
:return:
"""
cmd = "show spanning_tree root_guard"
output = st.show(dut, cmd)
if vlan is None and ifname is None:
rg_value = int(output[0][rg_param])
else:
rg_value = [row[rg_param] for row in output if row["rg_ifname"] == ifname and int(row["rg_vid"]) == vlan][0]
return rg_value
def check_rg_current_state(dut, vlan, ifname):
"""
API will check the Root Guard status for given interface and vlan
:param dut:
:param vlan:
:param ifname:
:return:
"""
rg_status = get_root_guard_details(dut, vlan, ifname, "rg_status")
#show_stp_config_using_klish(dut, "root_guard", vlan)
return rg_status == "Consistent state"
def check_bpdu_guard_action(dut, ifname, **kwargs):
"""
API will check the BPDU Guard action config and it's operational status
:param dut:
:param ifname:
:param kwargs:
config_shut : BPDU shutdown configuration
opr_shut : status of the port shut due to BPDU Guard
:return:
"""
cmd = "show spanning_tree bpdu_guard"
show_out = st.show(dut, cmd)
#show_stp_config_using_klish(dut, "bpdu_guard")
if_out = [row for row in show_out if row['bg_ifname'] == ifname][0]
config_shut, opr_shut = if_out['bg_cfg_shut'], if_out['bg_oper_shut']
return kwargs['config_shut'] == config_shut and kwargs['opr_shut'] == opr_shut
def stp_clear_stats(dut, **kwargs):
"""
:param dut:
:param kwargs:
vlan :vlan id
interface : interface name
:return:
"""
cmd = "sonic-clear spanning_tree statistics"
if 'vlan' in kwargs and 'interface' not in kwargs:
cmd += ' vlan {}'.format(kwargs['vlan'])
if 'vlan' in kwargs and 'interface' in kwargs:
cmd += ' vlan-interface {} {}'.format(kwargs['vlan'], kwargs['interface'])
output = st.config(dut, cmd)
def get_stp_stats(dut, vlan, interface, param):
"""
:param dut:
:param vlan:
:param interface:
:param param:
tx_bpdu : BPDU Transmission count
rx_bpdu : BPDU Receive count
tx_tcn : TCN Transmission count
rx_tcn : TCN Receive count
:return:
"""
output = show_stp_stats_vlan(dut, vlan)
#show_stp_config_using_klish(dut, 'statistics', vlan)
value_list = [row[param] for row in output if int(row['st_vid']) == vlan and row['st_portno'] == interface]
utils.banner_log(value_list)
return None if len(output) == 0 else int(value_list[0])
def verify_stp_ports_by_state(dut, vlan, port_state, port_list, cli_type='click'):
"""
API Will check the port state in the VLAN.
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param vlan:
:param state:
:param port_list:
:param cli_type:
:return:
"""
port_li = list(port_list) if isinstance(port_list, list) else [port_list]
stp_output = show_stp_vlan(dut, vlan, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output if
row["port_state"] == port_state and int(row["vid"]) == vlan]
result = True
for each_port in port_li:
if each_port not in ports_list:
st.log("{} is not {} state ".format(each_port, port_state))
result = False
else:
st.log("{} is {} state ".format(each_port, port_state))
return result
def get_stp_port_list(dut, vlan, exclude_port=[], cli_type='click'):
"""
API will return all ports of VLAN instance.
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param vlan:
:param exclude_port:
:param cli_type:
:return:
"""
ex_port_li = list(exclude_port) if isinstance(exclude_port, list) else [exclude_port]
stp_output = show_stp_vlan(dut, vlan, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output]
for each_int in ex_port_li:
if each_int in ports_list:
ports_list.remove(each_int)
st.log("{} is excluded".format(each_int))
return ports_list
def get_stp_root_port(dut, vlan, cli_type='click'):
"""
API will return Root/Forwarding port of the device in the VLAN.
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param vlan:
:param cli_type:
:return:
"""
out = show_stp_vlan(dut, vlan, cli_type=cli_type)
if not out:
st.error("No Root/Forwarding port found")
return False
if out[0]['rt_port'] == "Root":
st.error("Given device is ROOT Bridge.")
return False
return out[0]['rt_port']
def get_stp_next_root_port(dut, vlan, cli_type='click'):
"""
API will return Next possible Root/Forwarding port of the device in the VLAN.
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param vlan:
:param cli_type:
:return:
"""
partner = None
next_root_port = None
sort_list = lambda list1, list2: [x for _, x in sorted(zip(list2, list1))]
out = show_stp_vlan(dut, vlan, cli_type=cli_type)
if not out:
st.error("No Initial Root/Forwarding port found")
return next_root_port
if out[0]['rt_port'] == "Root":
st.error("Given device is ROOT Bridge.")
return next_root_port
partner_ports = st.get_dut_links(dut)
root_port = out[0]['rt_port']
root_cost = int(filter_and_select(out, ['port_pathcost'], {'port_name': root_port})[0]['port_pathcost'])
st.log('root_port : {}, root_cost: {}'.format(root_port, root_cost))
# Finding the Root port connected partner
for each in partner_ports:
if not partner:
if root_port == each[0]:
partner = each[1]
st.log("partner : {}".format(partner))
if not partner:
st.error("No Partner found for Root/Forwarding Port.")
return next_root_port
# Dut Partner port mapping
dut_partner_ports = st.get_dut_links(dut, partner)
dut_partner_ports_map = {all[0]: all[2] for all in dut_partner_ports}
dut_partner_ports_map_rev = {all[2]: all[0] for all in dut_partner_ports}
st.log('dut_partner_ports_map : {}'.format(str(dut_partner_ports_map)))
st.log('dut_partner_ports_map_rev : {}'.format(str(dut_partner_ports_map_rev)))
# Preparing DATA to process and find the next Root/Forwarding port.
cut_data = {}
pc_list = [each['teamdev'] for each in portchannel.get_portchannel_list(partner)]
for each in out:
port = each['port_name']
if "Ethernet" in port and port in dut_partner_ports_map:
port = dut_partner_ports_map[each['port_name']]
ifindex = int(re.findall(r'\d+', port)[0])
cut_data[port] = [ifindex, each['port_state'], int(each['port_pathcost'])]
elif port in pc_list:
ifindex = int(re.findall(r'\d+', port)[0])
cut_data[port] = [ifindex, each['port_state'], int(each['port_pathcost'])]
else:
pass
st.log('cut_data == {}'.format(str(cut_data)))
cost_vs_port = {}
for each in cut_data:
if each != dut_partner_ports_map[root_port]:
if 'Ethernet' in each:
if cut_data[each][2] not in cost_vs_port:
cost_vs_port[cut_data[each][2]] = [[each], []]
else:
cost_vs_port[cut_data[each][2]][0].append(each)
else:
if cut_data[each][2] not in cost_vs_port:
cost_vs_port[cut_data[each][2]] = [[], [each]]
else:
cost_vs_port[cut_data[each][2]][1].append(each)
sorted_cost = sorted(cost_vs_port.keys())
st.log("cost_vs_port : {}".format(cost_vs_port))
st.log("sorted_cost : {}".format(sorted_cost))
# Logic to find next Root/Forwarding port
if root_cost in cost_vs_port and (len(cost_vs_port[root_cost][0]) or len(cost_vs_port[root_cost][1])):
st.debug("When 2 or more ports has configured with same root port cost.")
if len(cost_vs_port[root_cost][0]):
port_list = cost_vs_port[root_cost][0]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return dut_partner_ports_map_rev[next_root_port]
else:
port_list = cost_vs_port[root_cost][1]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return next_root_port
elif len(sorted_cost):
st.debug("When NO 2 or more ports has root port cost configured. So checking next larger cost ports")
next_root_cost = sorted_cost[0]
if len(cost_vs_port[next_root_cost][0]):
port_list = cost_vs_port[next_root_cost][0]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return dut_partner_ports_map_rev[next_root_port]
else:
port_list = cost_vs_port[next_root_cost][1]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return next_root_port
else:
st.error("No Match")
return next_root_port
def config_stp_in_parallel(dut_list, feature="pvst", mode="enable", vlan=None, thread=True):
"""
API to configure stp in parallel on all the provided DUT's
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut_list:
:param feature:
:param mode:
:param vlan:
:param thread:
:return:
"""
st.log("Configuring {} on all the DUT's with mode as {}".format(feature.capitalize(), mode))
dut_li = list([str(e) for e in dut_list]) if isinstance(dut_list, list) else [dut_list]
params = list()
for dut in dut_li:
params.append([config_spanning_tree, dut, feature, mode, vlan])
if params:
exec_all(thread, params)
def show_stp_in_parallel(dut_list, thread=True, cli_type='click'):
"""
API to show the stp configuration in parallel in all the provided DUT's
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut_list:
:param thread:
:param cli_type:
:return:
"""
st.log("Displaying STP result on all the DUT's in parallel ....")
dut_li = utility.make_list(dut_list)
exec_foreach(thread, dut_li, show_stp, cli_type=cli_type)
def get_root_bridge_for_vlan(dut_vlan_data, thread=True):
params = list()
result = dict()
for dut, vlan in dut_vlan_data.items():
params.append([check_dut_is_root_bridge_for_vlan, dut, vlan])
if params:
[out, exceptions] = exec_all(thread, params)
utils.banner_log("Getting root bridge details")
for i,response in enumerate(out):
result[params[i][1]] = response
print(result)
return result
def check_for_single_root_bridge_per_vlan(dut_list, vlan_list, dut_vlan_data, cli_type='click'):
"""
API to check for single root bridge per VLAN
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:param vlanid:
:param cli_type:
:return:
"""
st.log("Verifying the single root bridge per vlan ...")
dut_li = list([str(e) for e in dut_list]) if isinstance(dut_list, list) else [dut_list]
vlan_li = list([str(e) for e in vlan_list]) if isinstance(vlan_list, list) else [vlan_list]
if len(vlan_list) != len(dut_list):
st.log("Invalid data provided to check the root bridge per vlan ...")
st.report_fail("invalid_data_for_root_bridge_per_vlan")
for vlan in vlan_li:
root_count = 0
params = list()
for dut in dut_li:
params.append([show_stp_vlan, dut, vlan, cli_type])
stp_output, exceptions = exec_all(True, params)
st.log(stp_output)
st.log(exceptions)
for value in exceptions:
st.log("Exceptions observed {}".format(value))
if value is not None:
st.log("Exception occured {}".format(value))
return False
if not stp_output:
st.log("STP output not found on {} for {} instance".format(dut_li, vlan))
st.report_fail("stp_output_not_found", dut_li, vlan)
for index, stp_out in enumerate(stp_output):
if len(stp_out) <= 0:
st.log("STP OUTPUT IS NOT OBSERVED --- {}".format(stp_out))
st.report_fail("stp_output_not_found")
root_bridge = stp_out[0]["rt_id"]
dut_bridge_id = stp_out[0]["br_id"]
if root_bridge == dut_bridge_id and stp_out[0]["rt_port"] == "Root":
if dut_vlan_data[dut_li[index]] != int(vlan.strip()):
st.log("Expected DUT {} is not root for {} instance".format(dut_li[index], vlan))
st.report_fail("expected_dut_not_root", dut_li[index], vlan)
root_count += 1
if root_count > 1:
st.log("Observed more than 1 root bridge per {} instance".format(vlan))
st.report_fail("observed_more_than_1_root_bridge", vlan)
return True
def verify_root_bridge_interface_state(dut, vlan, interface_list, cli_type='click'):
"""
API to verify the root bridge interface state to be forwarded
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:param vlan:
:param interface_list:
:param cli_type:
:return:
"""
fail_states = ["BLOCKING", "DISABLED", "DISCARDING"]
pass_states = ["FORWARDING"]
forwarding_counter = 0
result = show_stp_vlan(dut, vlan, cli_type=cli_type)
if result:
for data in result:
if data["port_name"] not in interface_list:
st.log("Interface {} not found in expected list ...".format(data["port_name"]))
if data["port_state"] in fail_states:
st.log("Observed that interface {} state is {} for root bridge".format(data["port_name"],fail_states))
if data["port_state"] in pass_states:
forwarding_counter+=1
if forwarding_counter != len(interface_list):
return False
else:
return True
else:
st.log("No STP data found for {} and {} instance".format(dut, vlan))
return False
def poll_root_bridge_interfaces(dut_vlan_list, interfaces_list, iteration=30, delay=1):
"""
API to get the root bridge interfaces to be forwarded
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut_vlan_list:
:param interfaces_list:
:param iteration:
:param delay:
:return:
"""
st.log("Polling for root bridge interfaces ...")
if dut_vlan_list and interfaces_list:
no_of_duts = len(dut_vlan_list)
check=0
for dut, vlan in dut_vlan_list.items():
i=1
while True:
if verify_root_bridge_interface_state(dut, vlan, interfaces_list[dut]):
st.log("Root bridge interface verification succeeded.")
check+=1
break
if i > iteration:
st.log("Max iteration limit reached.")
break
i+=1
st.wait(delay)
if check != no_of_duts:
st.log("Number of root DUTs check failed ...")
return False
return True
else:
st.log("Empty DUT VLAN LIST dut_vlan_list AND INTERFACE LIST interfaces_list")
return False
def verify_root_bridge_on_stp_instances(dut_list, vlan, bridge_identifier):
"""
API to verify the bridge identifier with root bridge identifier
:param dut_list:
:param vlan:
:param bridge_identifier:
:return:
"""
dut_li = list(dut_list) if isinstance(dut_list, list) else [dut_list]
params = list()
for dut in dut_li:
params.append([get_stp_bridge_param, dut, vlan, "rt_id"])
if params:
[out, exceptions] = exec_all(True, params)
st.log("#########OUTPUT###########")
st.log(out)
st.log(exceptions)
for value in exceptions:
st.log("Exceptions observed {}".format(value))
if value is not None:
return False
for identifier in out:
st.log("Comparing ROOT bridge ID {} with Provided ID {}".format(identifier, bridge_identifier))
if identifier != bridge_identifier:
st.log("Mismatch in root and bridge identifiers")
return False
else:
st.log("Root Bridge Identifier {} is matched with provided identifier {}".format(identifier, bridge_identifier))
return True
return False
def config_bpdu_filter(dut, **kwargs):
"""
API to config BPDU filter for global and interface level
Usage:
======
Interface level config:
=========================
config_bpdu_filter(dut, interface="Ethernet8", action="enable", cli_type="klish")
config_bpdu_filter(dut, interface="Ethernet8", no_form=True, cli_type="klish")
Global level config:
====================
config_bpdu_filter(dut, cli_type="klish")
config_bpdu_filter(dut, ,no_form=True, cli_type="klish")
:param dut:
:param kwargs:
:return:
"""
cli_type = kwargs.get("cli_type", "klish")
interface=kwargs.get("interface",None)
no_form=kwargs.get("no_form", None)
action=kwargs.get("action", "enable")
commands = list()
if not interface:
command = "spanning-tree edge-port bpdufilter default"
if no_form:
command = "no {}".format(command)
commands.append(command)
else:
interface_details = utils.get_interface_number_from_name(interface)
if not interface_details:
st.log("Interface details not found {}".format(interface_details))
return False
commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number")))
command = "spanning-tree bpdufilter"
if no_form:
command = "no {}".format(command)
elif action:
command = "{} {}".format(command, action)
else:
command = ""
if command:
commands.append(command)
if commands:
st.config(dut, commands, type=cli_type)
return True
return False
def config_stp_root_bridge_by_vlan(stp_data):
"""
:param stp_data: {dut1: {"vlan":10, "priority": "0"}, dut2: {"vlan":20, "priority": "0"}, dut3: {"vlan":30, "priority": "0"}}
"""
params = list()
for dut, data in stp_data.items():
params.append(utility.ExecAllFunc(config_stp_vlan_parameters, dut, data["vlan"], priority=data["priority"]))
[out, exceptions] = exec_all(True, params)
ensure_no_exception(exceptions)
def config_port_type(dut, interface, stp_type="rpvst", port_type="edge", no_form=False, cli_type="klish"):
"""
API to config/unconfig the port type in RPVST
:param dut:
:param port_type:
:param no_form:
:return:
"""
commands = list()
command = "spanning-tree port type {}".format(port_type) if not no_form else "no spanning-tree port type"
interface_details = utils.get_interface_number_from_name(interface)
if not interface_details:
st.log("Interface details not found {}".format(interface_details))
return False
commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number")))
commands.append(command)
commands.append('exit')
st.config(dut, commands, type=cli_type)
return True
def show_stp_config_using_klish(dut, type="", vlan="", intf=""):
if type == 'statistics':
command = "show spanning-tree counters vlan {}".format(vlan)
elif type == 'root_guard':
command = "show spanning-tree inconsistentports vlan {}".format(vlan)
elif type == 'bpdu_guard':
command = "show spanning-tree bpdu-guard"
elif type == "vlan_intf":
command = "show spanning-tree vlan {} interface {}".format(vlan, intf)
# elif type == "vlan":
# command = "show spanning-tree vlan {}".format(vlan)
st.show(dut, command, type="klish", skip_tmpl=True)
def verify_stp_intf_status(dut, vlanid, interface, status):
"""
API to poll for stp stauts for an interface
:param dut:
:param vlanid:
:param interface:
:param status:
:return:
"""
if get_stp_port_param(dut, vlanid, interface, "port_state") == status:
st.log("Port status is changed to {}".format(status))
return True
return False | 36.046263 | 129 | 0.603218 | import re
import utilities.utils as utils
from spytest import st
from spytest.utils import filter_and_select
from spytest.utils import exec_foreach, exec_all
import utilities.common as utility
import apis.switching.portchannel as portchannel
import apis.system.basic as basic
from utilities.parallel import ensure_no_exception
from datetime import datetime,timedelta
debug_log_path = r"/var/log/stplog"
SHOW_STP_VLAN = "show spanning_tree vlan {}"
BLOCKING_STATE = "BLOCKING"
def config_spanning_tree(dut, feature="pvst", mode="enable", vlan=None, cli_type='click'):
command = ''
no_form = 'no'
if mode == 'enable':
no_form = ''
st.log("{} spanning_tree {}".format(mode, feature))
if cli_type == 'click':
if vlan:
command = "config spanning_tree vlan {} {}".format(mode, vlan)
else:
command = "config spanning_tree {} {}".format(mode, feature)
elif cli_type == 'klish':
if mode == 'disable':
feature = ''
if vlan:
command = "{} spanning-tree vlan {}".format(no_form, vlan)
else:
command = "{} spanning-tree mode {}".format(no_form, feature)
st.config(dut, command, type=cli_type)
def config_stp_parameters(dut, cli_type='click', no_form='', **kwargs):
no_form = 'no' if no_form else ''
for each_key in kwargs.keys():
if cli_type == 'click':
command = "config spanning_tree {} {}".format(each_key, kwargs[each_key])
elif cli_type == 'klish':
command = "{} spanning-tree {} {}".format(no_form, each_key, kwargs[each_key])
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
st.config(dut, command, type=cli_type)
def config_stp_vlan_parameters(dut, vlan, **kwargs):
cli_type = kwargs.setdefault('cli_type', 'click')
no_form = 'no' if kwargs.setdefault('no_form', False) else ''
del kwargs['cli_type']
del kwargs['no_form']
click_2_klish = {'forward_delay': 'forward-time', 'hello': 'hello-time', 'max_age': 'max-age'}
for each_key, value in kwargs.items():
if cli_type == 'click':
command = "config spanning_tree vlan {} {} {}".format(each_key, vlan, value)
elif cli_type == 'klish':
each_key1 = click_2_klish.get(each_key, each_key)
command = "{} spanning-tree vlan {} {} {}".format(no_form, vlan, each_key1, value)
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
st.config(dut, command, type=cli_type)
def config_stp_vlan_parameters_parallel(dut_list, thread=True, **kwargs):
st.log("Configuring STP vlan parameters in paraller on all DUT's ... ")
dut_li = list(dut_list) if isinstance(dut_list, list) else [dut_list]
vlan_li = list(kwargs['vlan']) if isinstance(kwargs['vlan'], list) else [kwargs['vlan']]
priority_li = list(kwargs['priority']) if isinstance(kwargs['priority'], list) else [kwargs['priority']]
if not len(dut_li) == len(vlan_li) == len(priority_li):
return False
params = list()
for i,each in enumerate(dut_list):
params.append(utility.ExecAllFunc(config_stp_vlan_parameters, each, vlan_li[i], priority=priority_li[i]))
[out, exceptions] = exec_all(thread, params)
st.log(exceptions)
return False if False in out else True
def config_stp_vlan_interface(dut, vlan, iface, value, mode='cost', **kwargs):
cli_type = kwargs.get('cli_type', 'click')
no_form = 'no' if kwargs.get('no_form') else ''
if mode in ['cost', 'priority']:
if cli_type == 'click':
command = "config spanning_tree vlan interface {} {} {} {} ".format(mode, vlan, iface, value)
elif cli_type == 'klish':
if mode == 'priority':
mode = 'port-priority'
interface_data = utils.get_interface_number_from_name(iface)
command = ['interface {} {}'.format(interface_data["type"], interface_data["number"]),
'{} spanning-tree vlan {} {} {}'.format(no_form, vlan, mode, value), "exit"]
else:
st.error("Invalid CLI type - {}".format(cli_type))
return
else:
st.log("Invalid mode = {}".format(mode))
return
st.config(dut, command, type=cli_type)
def config_stp_enable_interface(dut, iface, mode="enable"):
command = "config spanning_tree interface {} {}".format(mode, iface)
st.config(dut, command)
def config_stp_interface_params(dut, iface, **kwargs):
cli_type = kwargs.setdefault('cli_type', 'click')
del kwargs['cli_type']
click_2_klish = {"root_guard": " guard root", "bpdu_guard": "bpduguard ", "portfast": "portfast",
"uplink_fast": "uplinkfast"}
if cli_type == 'click':
for each_key in kwargs.keys():
if each_key == "priority" or each_key == "cost":
command = "config spanning_tree interface {} {} {}".format(each_key, iface, kwargs[each_key])
elif each_key == "bpdu_guard_action":
command = "config spanning_tree interface bpdu_guard enable {} {}".format(iface, kwargs[each_key])
else:
command = "config spanning_tree interface {} {} {}".format(each_key, kwargs[each_key], iface)
st.config(dut, command)
elif cli_type == 'klish':
interface_data = utils.get_interface_number_from_name(iface)
command = ['interface {} {}'.format(interface_data["type"], interface_data["number"])]
for each_key in kwargs.keys():
no_form = 'no' if kwargs[each_key] == 'disable' else ''
if each_key == "priority" or each_key == "cost":
command.append('spanning-tree {} {}'.format(each_key, kwargs[each_key]))
elif each_key == "bpdu_guard_action":
command.append('{} spanning-tree bpduguard port-shutdown'.format(no_form))
else:
command.append("{} spanning-tree {}".format(no_form, click_2_klish[each_key]))
command.append('exit')
st.config(dut, command, type=cli_type)
def config_stp_interface(dut, iface, mode="enable"):
command = "config spanning_tree interface {} {} ".format(mode, iface)
st.config(dut, command)
def show_stp(dut, **kwargs):
cli_type = kwargs.get("cli_type", 'click')
command = "show spanning_tree"
if 'sub_cmd' in kwargs:
command = "show spanning_tree {}".format(kwargs['sub_cmd'])
return st.show(dut, command, type=cli_type)
def show_stp_vlan(dut, vlan, cli_type="click"):
st.log("show spanning_tree vlan <id>")
command = SHOW_STP_VLAN.format(vlan)
return st.show(dut, command, type=cli_type)
def show_stp_vlan_iface(dut, vlan, iface, cli_type="click"):
if cli_type == "click":
command = "show spanning_tree vlan interface {} {}".format(vlan, iface)
elif cli_type == "klish":
command = "show spanning_tree vlan {} interface {}".format(vlan, iface)
else:
st.log("Unsupported CLI type {}".format(cli_type))
return list()
return st.show(dut, command, type="cli_type")
def show_stp_stats(dut):
command = "show spanning_tree statistics"
return st.show(dut, command)
def show_stp_stats_vlan(dut, vlan):
command = "show spanning_tree statistics vlan {} ".format(vlan)
return st.show(dut, command)
def debug_stp(dut, *argv):
command = 'debug spanning_tree'
if not argv:
st.config(dut, command)
for each in argv:
command2 = "{} {}".format(command, each)
st.config(dut, command2)
return True
def get_debug_stp_log(dut, filter_list=[]):
if isinstance(filter_list, list):
filter_list = list(filter_list)
else:
filter_list = [filter_list]
command = "cat {}".format(debug_log_path)
for each_filter in filter_list:
command += " | grep '{}'".format(each_filter)
output = st.show(dut, command, skip_tmpl=True, skip_error_check=True)
reg_output = utils.remove_last_line_from_string(output)
out_list = reg_output.split('\n')
return out_list
def clear_debug_stp_log(dut):
command = "dd if=/dev/null of={}".format(debug_log_path)
st.config(dut, command)
return True
def verify_stp_vlan_iface(dut, **kwargs):
output = show_stp_vlan_iface(dut, kwargs["vlan"], kwargs["iface"])
for each in kwargs.keys():
match = {each: kwargs[each]}
entries = filter_and_select(output, None, match)
if not entries:
st.log("{} and {} is not match ".format(each, kwargs[each]))
return False
return True
def verify_stp_statistics_vlan(dut, **kwargs):
output = show_stp_stats_vlan(dut, kwargs["vlan"])
for each in kwargs.keys():
match = {each: kwargs[each]}
entries = filter_and_select(output, None, match)
if not entries:
st.log("{} and {} is not match ".format(each, kwargs[each]))
return False
return True
def check_dut_is_root_bridge_for_vlan(dut, vlanid):
cmd = SHOW_STP_VLAN.format(vlanid)
stp_output = st.show(dut, cmd)
root_bridge=stp_output[0]["rt_id"]
dut_bridge_id=stp_output[0]["br_id"]
return (root_bridge == dut_bridge_id) and stp_output[0]["rt_port"] == "Root"
def get_stp_bridge_param(dut, vlanid, bridge_param):
stp_bridge_param_list = ['stp_mode',
'vid',
'inst',
'br_id',
'br_maxage',
'br_hello',
'br_fwddly',
'br_hold',
'br_lasttopo',
'br_topoch',
'rt_id',
'rt_pathcost',
'rt_desigbridgeid',
'rt_port',
'rt_maxage',
'rt_hello',
'rt_fwddly']
if bridge_param not in stp_bridge_param_list:
st.error("Please provide the valid stp bridge parameter")
return
cmd = SHOW_STP_VLAN.format(vlanid)
stp_output = st.show(dut, cmd)
return stp_output[0][bridge_param]
def get_stp_port_param(dut, vlanid, ifname, ifparam):
stp_port_param_list = ['port_name',
'port_priority',
'port_pathcost',
'port_portfast',
'port_uplinkfast',
'port_state',
'port_desigcost',
'port_desigrootid',
'port_desigbridgeid']
if ifparam not in stp_port_param_list:
st.error("Please provide the valid stp port parameter")
return
cmd = SHOW_STP_VLAN.format(vlanid)+" interface {}".format(ifname)
stp_output = st.show(dut, cmd)
return None if len(stp_output) == 0 else stp_output[0][ifparam]
def get_default_root_bridge(dut_list):
duts_mac_list = basic.get_dut_mac_address_thread(dut_list)
if duts_mac_list:
min_mac_addr = min(duts_mac_list.values())
root_bridge = [dut for dut, mac_addr in duts_mac_list.items() if mac_addr == min_mac_addr][0]
return [dut for dut in dut_list if dut==root_bridge][0]
else:
return None
def get_duts_mac_address(duts):
duts_mac_addresses = {}
cmd = "show platform syseeprom"
for dut in duts:
if st.is_vsonic(dut):
mac = basic.get_ifconfig_ether(dut)
duts_mac_addresses[dut] = mac
continue
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if not eeprom_details:
iteration=3
for i in range(1, iteration+1):
st.wait(2)
eeprom_details = st.show(dut, cmd, skip_error_check=True)
if eeprom_details:
break
if not eeprom_details and i >= iteration + 1:
st.log("EEPROM data not found for {}".format(dut))
st.report_fail("eeprom_data_not_found", dut)
st.log("EEPROM DETAILS -- {}".format(eeprom_details))
if eeprom_details:
for data in eeprom_details:
if "tlv_name" in data and data["tlv_name"] == "Base MAC Address":
duts_mac_addresses[dut] = data["value"].replace(":","")
st.log("DUT MAC ADDRESS -- {}".format(duts_mac_addresses))
return duts_mac_addresses
def _get_duts_list_in_order(vars):
duts_mac_addresses = get_duts_mac_address(vars["dut_list"])
return sorted(zip(duts_mac_addresses.values(), duts_mac_addresses.keys()))
def get_ports_based_on_state(vars, vlanid, port_state, dut=None, cli_type='click'):
selected_non_root = ""
if dut is None:
duts_list = _get_duts_list_in_order(vars)
dut_with_max_mac_address = duts_list[len(duts_list) - 1][1]
selected_non_root = [dut_key for dut_key, dut_value in vars.items() if dut_value == dut_with_max_mac_address][0]
else:
selected_non_root = [dut_key for dut_key, dut_value in vars.items() if dut_value == dut][0]
stp_output = show_stp_vlan(vars[selected_non_root], vlanid, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output if
row["port_state"] == port_state and int(row["vid"]) == vlanid]
return ports_list
def poll_for_root_switch(dut, vlanid, iteration=20, delay=1):
i = 1
while True:
if check_dut_is_root_bridge_for_vlan(dut, vlanid):
st.log("Observed dut is root bridge {} iteration".format(i))
return True
if i > iteration:
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def poll_for_stp_status(dut, vlanid, interface, status, iteration=20, delay=1):
i = 1
while True:
if get_stp_port_param(dut, vlanid, interface, "port_state") == status:
st.log("Port status is changed to {} after {} sec".format(status, i))
return True
if i > iteration:
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def get_root_guard_details(dut, vlan=None, ifname=None , rg_param="rg_timeout"):
cmd = "show spanning_tree root_guard"
output = st.show(dut, cmd)
if vlan is None and ifname is None:
rg_value = int(output[0][rg_param])
else:
rg_value = [row[rg_param] for row in output if row["rg_ifname"] == ifname and int(row["rg_vid"]) == vlan][0]
return rg_value
def check_rg_current_state(dut, vlan, ifname):
rg_status = get_root_guard_details(dut, vlan, ifname, "rg_status")
#show_stp_config_using_klish(dut, "root_guard", vlan)
return rg_status == "Consistent state"
def check_bpdu_guard_action(dut, ifname, **kwargs):
cmd = "show spanning_tree bpdu_guard"
show_out = st.show(dut, cmd)
#show_stp_config_using_klish(dut, "bpdu_guard")
if_out = [row for row in show_out if row['bg_ifname'] == ifname][0]
config_shut, opr_shut = if_out['bg_cfg_shut'], if_out['bg_oper_shut']
return kwargs['config_shut'] == config_shut and kwargs['opr_shut'] == opr_shut
def stp_clear_stats(dut, **kwargs):
cmd = "sonic-clear spanning_tree statistics"
if 'vlan' in kwargs and 'interface' not in kwargs:
cmd += ' vlan {}'.format(kwargs['vlan'])
if 'vlan' in kwargs and 'interface' in kwargs:
cmd += ' vlan-interface {} {}'.format(kwargs['vlan'], kwargs['interface'])
output = st.config(dut, cmd)
def get_stp_stats(dut, vlan, interface, param):
output = show_stp_stats_vlan(dut, vlan)
#show_stp_config_using_klish(dut, 'statistics', vlan)
value_list = [row[param] for row in output if int(row['st_vid']) == vlan and row['st_portno'] == interface]
utils.banner_log(value_list)
return None if len(output) == 0 else int(value_list[0])
def verify_stp_ports_by_state(dut, vlan, port_state, port_list, cli_type='click'):
port_li = list(port_list) if isinstance(port_list, list) else [port_list]
stp_output = show_stp_vlan(dut, vlan, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output if
row["port_state"] == port_state and int(row["vid"]) == vlan]
result = True
for each_port in port_li:
if each_port not in ports_list:
st.log("{} is not {} state ".format(each_port, port_state))
result = False
else:
st.log("{} is {} state ".format(each_port, port_state))
return result
def get_stp_port_list(dut, vlan, exclude_port=[], cli_type='click'):
ex_port_li = list(exclude_port) if isinstance(exclude_port, list) else [exclude_port]
stp_output = show_stp_vlan(dut, vlan, cli_type=cli_type)
ports_list = [row["port_name"] for row in stp_output]
for each_int in ex_port_li:
if each_int in ports_list:
ports_list.remove(each_int)
st.log("{} is excluded".format(each_int))
return ports_list
def get_stp_root_port(dut, vlan, cli_type='click'):
out = show_stp_vlan(dut, vlan, cli_type=cli_type)
if not out:
st.error("No Root/Forwarding port found")
return False
if out[0]['rt_port'] == "Root":
st.error("Given device is ROOT Bridge.")
return False
return out[0]['rt_port']
def get_stp_next_root_port(dut, vlan, cli_type='click'):
partner = None
next_root_port = None
sort_list = lambda list1, list2: [x for _, x in sorted(zip(list2, list1))]
out = show_stp_vlan(dut, vlan, cli_type=cli_type)
if not out:
st.error("No Initial Root/Forwarding port found")
return next_root_port
if out[0]['rt_port'] == "Root":
st.error("Given device is ROOT Bridge.")
return next_root_port
partner_ports = st.get_dut_links(dut)
root_port = out[0]['rt_port']
root_cost = int(filter_and_select(out, ['port_pathcost'], {'port_name': root_port})[0]['port_pathcost'])
st.log('root_port : {}, root_cost: {}'.format(root_port, root_cost))
# Finding the Root port connected partner
for each in partner_ports:
if not partner:
if root_port == each[0]:
partner = each[1]
st.log("partner : {}".format(partner))
if not partner:
st.error("No Partner found for Root/Forwarding Port.")
return next_root_port
# Dut Partner port mapping
dut_partner_ports = st.get_dut_links(dut, partner)
dut_partner_ports_map = {all[0]: all[2] for all in dut_partner_ports}
dut_partner_ports_map_rev = {all[2]: all[0] for all in dut_partner_ports}
st.log('dut_partner_ports_map : {}'.format(str(dut_partner_ports_map)))
st.log('dut_partner_ports_map_rev : {}'.format(str(dut_partner_ports_map_rev)))
# Preparing DATA to process and find the next Root/Forwarding port.
cut_data = {}
pc_list = [each['teamdev'] for each in portchannel.get_portchannel_list(partner)]
for each in out:
port = each['port_name']
if "Ethernet" in port and port in dut_partner_ports_map:
port = dut_partner_ports_map[each['port_name']]
ifindex = int(re.findall(r'\d+', port)[0])
cut_data[port] = [ifindex, each['port_state'], int(each['port_pathcost'])]
elif port in pc_list:
ifindex = int(re.findall(r'\d+', port)[0])
cut_data[port] = [ifindex, each['port_state'], int(each['port_pathcost'])]
else:
pass
st.log('cut_data == {}'.format(str(cut_data)))
cost_vs_port = {}
for each in cut_data:
if each != dut_partner_ports_map[root_port]:
if 'Ethernet' in each:
if cut_data[each][2] not in cost_vs_port:
cost_vs_port[cut_data[each][2]] = [[each], []]
else:
cost_vs_port[cut_data[each][2]][0].append(each)
else:
if cut_data[each][2] not in cost_vs_port:
cost_vs_port[cut_data[each][2]] = [[], [each]]
else:
cost_vs_port[cut_data[each][2]][1].append(each)
sorted_cost = sorted(cost_vs_port.keys())
st.log("cost_vs_port : {}".format(cost_vs_port))
st.log("sorted_cost : {}".format(sorted_cost))
# Logic to find next Root/Forwarding port
if root_cost in cost_vs_port and (len(cost_vs_port[root_cost][0]) or len(cost_vs_port[root_cost][1])):
st.debug("When 2 or more ports has configured with same root port cost.")
if len(cost_vs_port[root_cost][0]):
port_list = cost_vs_port[root_cost][0]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return dut_partner_ports_map_rev[next_root_port]
else:
port_list = cost_vs_port[root_cost][1]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return next_root_port
elif len(sorted_cost):
st.debug("When NO 2 or more ports has root port cost configured. So checking next larger cost ports")
next_root_cost = sorted_cost[0]
if len(cost_vs_port[next_root_cost][0]):
port_list = cost_vs_port[next_root_cost][0]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return dut_partner_ports_map_rev[next_root_port]
else:
port_list = cost_vs_port[next_root_cost][1]
port_index_li = [cut_data[e][0] for e in port_list]
next_root_port = sort_list(port_list, port_index_li)[0]
return next_root_port
else:
st.error("No Match")
return next_root_port
def config_stp_in_parallel(dut_list, feature="pvst", mode="enable", vlan=None, thread=True):
st.log("Configuring {} on all the DUT's with mode as {}".format(feature.capitalize(), mode))
dut_li = list([str(e) for e in dut_list]) if isinstance(dut_list, list) else [dut_list]
params = list()
for dut in dut_li:
params.append([config_spanning_tree, dut, feature, mode, vlan])
if params:
exec_all(thread, params)
def show_stp_in_parallel(dut_list, thread=True, cli_type='click'):
st.log("Displaying STP result on all the DUT's in parallel ....")
dut_li = utility.make_list(dut_list)
exec_foreach(thread, dut_li, show_stp, cli_type=cli_type)
def get_root_bridge_for_vlan(dut_vlan_data, thread=True):
params = list()
result = dict()
for dut, vlan in dut_vlan_data.items():
params.append([check_dut_is_root_bridge_for_vlan, dut, vlan])
if params:
[out, exceptions] = exec_all(thread, params)
utils.banner_log("Getting root bridge details")
for i,response in enumerate(out):
result[params[i][1]] = response
print(result)
return result
def check_for_single_root_bridge_per_vlan(dut_list, vlan_list, dut_vlan_data, cli_type='click'):
st.log("Verifying the single root bridge per vlan ...")
dut_li = list([str(e) for e in dut_list]) if isinstance(dut_list, list) else [dut_list]
vlan_li = list([str(e) for e in vlan_list]) if isinstance(vlan_list, list) else [vlan_list]
if len(vlan_list) != len(dut_list):
st.log("Invalid data provided to check the root bridge per vlan ...")
st.report_fail("invalid_data_for_root_bridge_per_vlan")
for vlan in vlan_li:
root_count = 0
params = list()
for dut in dut_li:
params.append([show_stp_vlan, dut, vlan, cli_type])
stp_output, exceptions = exec_all(True, params)
st.log(stp_output)
st.log(exceptions)
for value in exceptions:
st.log("Exceptions observed {}".format(value))
if value is not None:
st.log("Exception occured {}".format(value))
return False
if not stp_output:
st.log("STP output not found on {} for {} instance".format(dut_li, vlan))
st.report_fail("stp_output_not_found", dut_li, vlan)
for index, stp_out in enumerate(stp_output):
if len(stp_out) <= 0:
st.log("STP OUTPUT IS NOT OBSERVED --- {}".format(stp_out))
st.report_fail("stp_output_not_found")
root_bridge = stp_out[0]["rt_id"]
dut_bridge_id = stp_out[0]["br_id"]
if root_bridge == dut_bridge_id and stp_out[0]["rt_port"] == "Root":
if dut_vlan_data[dut_li[index]] != int(vlan.strip()):
st.log("Expected DUT {} is not root for {} instance".format(dut_li[index], vlan))
st.report_fail("expected_dut_not_root", dut_li[index], vlan)
root_count += 1
if root_count > 1:
st.log("Observed more than 1 root bridge per {} instance".format(vlan))
st.report_fail("observed_more_than_1_root_bridge", vlan)
return True
def verify_root_bridge_interface_state(dut, vlan, interface_list, cli_type='click'):
fail_states = ["BLOCKING", "DISABLED", "DISCARDING"]
pass_states = ["FORWARDING"]
forwarding_counter = 0
result = show_stp_vlan(dut, vlan, cli_type=cli_type)
if result:
for data in result:
if data["port_name"] not in interface_list:
st.log("Interface {} not found in expected list ...".format(data["port_name"]))
if data["port_state"] in fail_states:
st.log("Observed that interface {} state is {} for root bridge".format(data["port_name"],fail_states))
if data["port_state"] in pass_states:
forwarding_counter+=1
if forwarding_counter != len(interface_list):
return False
else:
return True
else:
st.log("No STP data found for {} and {} instance".format(dut, vlan))
return False
def poll_root_bridge_interfaces(dut_vlan_list, interfaces_list, iteration=30, delay=1):
st.log("Polling for root bridge interfaces ...")
if dut_vlan_list and interfaces_list:
no_of_duts = len(dut_vlan_list)
check=0
for dut, vlan in dut_vlan_list.items():
i=1
while True:
if verify_root_bridge_interface_state(dut, vlan, interfaces_list[dut]):
st.log("Root bridge interface verification succeeded.")
check+=1
break
if i > iteration:
st.log("Max iteration limit reached.")
break
i+=1
st.wait(delay)
if check != no_of_duts:
st.log("Number of root DUTs check failed ...")
return False
return True
else:
st.log("Empty DUT VLAN LIST dut_vlan_list AND INTERFACE LIST interfaces_list")
return False
def verify_root_bridge_on_stp_instances(dut_list, vlan, bridge_identifier):
dut_li = list(dut_list) if isinstance(dut_list, list) else [dut_list]
params = list()
for dut in dut_li:
params.append([get_stp_bridge_param, dut, vlan, "rt_id"])
if params:
[out, exceptions] = exec_all(True, params)
st.log("#########OUTPUT###########")
st.log(out)
st.log(exceptions)
for value in exceptions:
st.log("Exceptions observed {}".format(value))
if value is not None:
return False
for identifier in out:
st.log("Comparing ROOT bridge ID {} with Provided ID {}".format(identifier, bridge_identifier))
if identifier != bridge_identifier:
st.log("Mismatch in root and bridge identifiers")
return False
else:
st.log("Root Bridge Identifier {} is matched with provided identifier {}".format(identifier, bridge_identifier))
return True
return False
def config_bpdu_filter(dut, **kwargs):
cli_type = kwargs.get("cli_type", "klish")
interface=kwargs.get("interface",None)
no_form=kwargs.get("no_form", None)
action=kwargs.get("action", "enable")
commands = list()
if not interface:
command = "spanning-tree edge-port bpdufilter default"
if no_form:
command = "no {}".format(command)
commands.append(command)
else:
interface_details = utils.get_interface_number_from_name(interface)
if not interface_details:
st.log("Interface details not found {}".format(interface_details))
return False
commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number")))
command = "spanning-tree bpdufilter"
if no_form:
command = "no {}".format(command)
elif action:
command = "{} {}".format(command, action)
else:
command = ""
if command:
commands.append(command)
if commands:
st.config(dut, commands, type=cli_type)
return True
return False
def config_stp_root_bridge_by_vlan(stp_data):
params = list()
for dut, data in stp_data.items():
params.append(utility.ExecAllFunc(config_stp_vlan_parameters, dut, data["vlan"], priority=data["priority"]))
[out, exceptions] = exec_all(True, params)
ensure_no_exception(exceptions)
def config_port_type(dut, interface, stp_type="rpvst", port_type="edge", no_form=False, cli_type="klish"):
commands = list()
command = "spanning-tree port type {}".format(port_type) if not no_form else "no spanning-tree port type"
interface_details = utils.get_interface_number_from_name(interface)
if not interface_details:
st.log("Interface details not found {}".format(interface_details))
return False
commands.append("interface {} {}".format(interface_details.get("type"), interface_details.get("number")))
commands.append(command)
commands.append('exit')
st.config(dut, commands, type=cli_type)
return True
def show_stp_config_using_klish(dut, type="", vlan="", intf=""):
if type == 'statistics':
command = "show spanning-tree counters vlan {}".format(vlan)
elif type == 'root_guard':
command = "show spanning-tree inconsistentports vlan {}".format(vlan)
elif type == 'bpdu_guard':
command = "show spanning-tree bpdu-guard"
elif type == "vlan_intf":
command = "show spanning-tree vlan {} interface {}".format(vlan, intf)
# elif type == "vlan":
# command = "show spanning-tree vlan {}".format(vlan)
st.show(dut, command, type="klish", skip_tmpl=True)
def verify_stp_intf_status(dut, vlanid, interface, status):
if get_stp_port_param(dut, vlanid, interface, "port_state") == status:
st.log("Port status is changed to {}".format(status))
return True
return False | true | true |
f73f03be3b3051fb1ca517ba8b376fbb92a0af77 | 5,982 | py | Python | python_stub.py | skazanyNaGlany/python_stub | 50a137e9360d0d83535abb863948afdf9594204b | [
"MIT"
] | null | null | null | python_stub.py | skazanyNaGlany/python_stub | 50a137e9360d0d83535abb863948afdf9594204b | [
"MIT"
] | null | null | null | python_stub.py | skazanyNaGlany/python_stub | 50a137e9360d0d83535abb863948afdf9594204b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 Paweł Kacperski (screamingbox@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def install_pip_and_modules(package_names):
import os
import os.path
import sys
import shutil
import subprocess
assert sys.version_info >= (2, 7) or sys.version_info >= (3, 4), 'Python 2.7+ or 3.4+ required'
count_installed_packages = 0
try:
import urllib2
except ModuleNotFoundError:
import urllib.request
def download_file(url):
print('Downloading ' + url)
if 'urllib2' in globals() or 'urllib2' in locals():
remote_file = urllib2.urlopen(url)
elif 'urllib' in globals() or 'urllib' in locals():
remote_file = urllib.request.urlopen(url)
with open(os.path.basename(url), 'wb') as local_file:
local_file.write(remote_file.read())
def pip_install_module(module_name, as_user):
cmd = sys.executable + ' -m pip install ' + module_name
if as_user:
cmd += ' --user'
print('Executing: ' + cmd)
os.system(cmd)
def determine_install_as_user():
in_virtualenv = 'VIRTUAL_ENV' in os.environ
is_root = hasattr(os, 'geteuid') and os.geteuid() == 0
return not in_virtualenv and not is_root
def restart():
print('Restarting')
os.system(sys.executable + ' ' + str(' '.join(sys.argv)))
exit(0)
def get_installed_packages():
packages = {}
output_lines = subprocess.check_output([
sys.executable,
'-m',
'pip',
'list'
]).decode('utf-8').split('\n')
for iline in output_lines[2:]:
iline = iline.strip()
if not iline:
continue
parts = iline.split(' ')
packages[parts[0]] = parts[len(parts) - 1]
return packages
install_as_user = determine_install_as_user()
# install pip
try:
import pip
except ImportError as x1:
print(x1)
download_file('https://bootstrap.pypa.io/get-pip.py')
print('Installing: pip')
cmd = sys.executable + ' get-pip.py'
if install_as_user:
cmd += ' --user'
print('Executing: ' + cmd)
os.system(cmd)
os.remove('get-pip.py')
count_installed_packages += 1
try:
import pip
except ImportError:
print('Unable to install pip')
exit(1)
installed_packages = get_installed_packages()
cwd = os.getcwd()
# check if we need Dulwich - pure Python Git implementation
need_dulwich = False
for ipackage_name2 in package_names:
if ipackage_name2.startswith('git+https://'):
need_dulwich = True
break
if need_dulwich:
if not 'dulwich' in installed_packages:
pip_install_module('dulwich', install_as_user)
count_installed_packages += 1
installed_packages = get_installed_packages()
if not 'dulwich' in installed_packages:
print('Unable to install dulwich')
exit(1)
restart()
# install packages
for ipackage_name in package_names:
imodule_pip_basename = os.path.basename(ipackage_name)
if not imodule_pip_basename in installed_packages:
print('Installing: {} ({})'.format(ipackage_name, ipackage_name))
if ipackage_name.startswith('git+https://'):
import dulwich.porcelain
# just remove git+ and install
pkg_url = ipackage_name[4:]
pkg_basename = os.path.basename(pkg_url)
try:
shutil.rmtree(os.path.join(cwd, pkg_basename))
except OSError:
pass
dulwich.porcelain.clone(pkg_url)
pip_install_module(pkg_basename, install_as_user)
count_installed_packages += 1
try:
shutil.rmtree(os.path.join(cwd, pkg_basename))
except Exception as x5:
print(x5)
else:
pip_install_module(ipackage_name, install_as_user)
count_installed_packages += 1
installed_packages = get_installed_packages()
for ipackage_name2 in package_names:
imodule_pip_name2 = os.path.basename(ipackage_name2)
if imodule_pip_name2 not in installed_packages:
print('Unable to install ' + imodule_pip_name2)
exit(1)
if count_installed_packages > 0:
restart()
# this will install some packages
install_pip_and_modules([
'selenium',
'git+https://github.com/boppreh/mouse'
])
# packages installed
# rest of your code goes below
# this lines
import selenium
import mouse
def main():
pass
if __name__ == '__main__':
main()
| 27.953271 | 99 | 0.621364 |
def install_pip_and_modules(package_names):
import os
import os.path
import sys
import shutil
import subprocess
assert sys.version_info >= (2, 7) or sys.version_info >= (3, 4), 'Python 2.7+ or 3.4+ required'
count_installed_packages = 0
try:
import urllib2
except ModuleNotFoundError:
import urllib.request
def download_file(url):
print('Downloading ' + url)
if 'urllib2' in globals() or 'urllib2' in locals():
remote_file = urllib2.urlopen(url)
elif 'urllib' in globals() or 'urllib' in locals():
remote_file = urllib.request.urlopen(url)
with open(os.path.basename(url), 'wb') as local_file:
local_file.write(remote_file.read())
def pip_install_module(module_name, as_user):
cmd = sys.executable + ' -m pip install ' + module_name
if as_user:
cmd += ' --user'
print('Executing: ' + cmd)
os.system(cmd)
def determine_install_as_user():
in_virtualenv = 'VIRTUAL_ENV' in os.environ
is_root = hasattr(os, 'geteuid') and os.geteuid() == 0
return not in_virtualenv and not is_root
def restart():
print('Restarting')
os.system(sys.executable + ' ' + str(' '.join(sys.argv)))
exit(0)
def get_installed_packages():
packages = {}
output_lines = subprocess.check_output([
sys.executable,
'-m',
'pip',
'list'
]).decode('utf-8').split('\n')
for iline in output_lines[2:]:
iline = iline.strip()
if not iline:
continue
parts = iline.split(' ')
packages[parts[0]] = parts[len(parts) - 1]
return packages
install_as_user = determine_install_as_user()
try:
import pip
except ImportError as x1:
print(x1)
download_file('https://bootstrap.pypa.io/get-pip.py')
print('Installing: pip')
cmd = sys.executable + ' get-pip.py'
if install_as_user:
cmd += ' --user'
print('Executing: ' + cmd)
os.system(cmd)
os.remove('get-pip.py')
count_installed_packages += 1
try:
import pip
except ImportError:
print('Unable to install pip')
exit(1)
installed_packages = get_installed_packages()
cwd = os.getcwd()
need_dulwich = False
for ipackage_name2 in package_names:
if ipackage_name2.startswith('git+https://'):
need_dulwich = True
break
if need_dulwich:
if not 'dulwich' in installed_packages:
pip_install_module('dulwich', install_as_user)
count_installed_packages += 1
installed_packages = get_installed_packages()
if not 'dulwich' in installed_packages:
print('Unable to install dulwich')
exit(1)
restart()
for ipackage_name in package_names:
imodule_pip_basename = os.path.basename(ipackage_name)
if not imodule_pip_basename in installed_packages:
print('Installing: {} ({})'.format(ipackage_name, ipackage_name))
if ipackage_name.startswith('git+https://'):
import dulwich.porcelain
pkg_url = ipackage_name[4:]
pkg_basename = os.path.basename(pkg_url)
try:
shutil.rmtree(os.path.join(cwd, pkg_basename))
except OSError:
pass
dulwich.porcelain.clone(pkg_url)
pip_install_module(pkg_basename, install_as_user)
count_installed_packages += 1
try:
shutil.rmtree(os.path.join(cwd, pkg_basename))
except Exception as x5:
print(x5)
else:
pip_install_module(ipackage_name, install_as_user)
count_installed_packages += 1
installed_packages = get_installed_packages()
for ipackage_name2 in package_names:
imodule_pip_name2 = os.path.basename(ipackage_name2)
if imodule_pip_name2 not in installed_packages:
print('Unable to install ' + imodule_pip_name2)
exit(1)
if count_installed_packages > 0:
restart()
install_pip_and_modules([
'selenium',
'git+https://github.com/boppreh/mouse'
])
import selenium
import mouse
def main():
pass
if __name__ == '__main__':
main()
| true | true |
f73f03f6b88df9e59ef3cd1050ce5d365459d2e4 | 2,046 | py | Python | sigda/user/services.py | yangluoshen/sigda | 83a2149d07edfbe56be95d5dc2a316c044bee54e | [
"BSD-2-Clause"
] | null | null | null | sigda/user/services.py | yangluoshen/sigda | 83a2149d07edfbe56be95d5dc2a316c044bee54e | [
"BSD-2-Clause"
] | 3 | 2017-08-21T07:26:11.000Z | 2017-11-09T02:19:23.000Z | sigda/user/services.py | yangluoshen/sigda | 83a2149d07edfbe56be95d5dc2a316c044bee54e | [
"BSD-2-Clause"
] | null | null | null | #coding:utf-8
import flask_login
from flask import render_template
from sigda.models import db, User
from sigda.config.common import ErrorCode
import logging
login_manager = flask_login.LoginManager()
class UserDbService(object):
@staticmethod
def add(email, name, passwd):
u = UserDbService.get_user_by_email(email)
if u:
return u, ErrorCode.EXIST
u = User(email=email, name=name, passwd=passwd)
db.session.add(u)
try:
db.session.flush()
db.session.commit()
except Exception as e:
db.session.rollback()
logging.error('add user')
return None, ErrorCode.FAILURE
return u, ErrorCode.SUCCESS
@staticmethod
def get_user_by_name(name):
u = User.query.filter(User.name == name).first()
return u
@staticmethod
def get_user_by_id(uid):
u = User.query.filter(User.id == uid).first()
return u
@staticmethod
def get_user_by_email(email):
u = User.query.filter(User.email == email).first()
return u
@staticmethod
def auth(email, passwd):
real_user = UserDbService.get_user_by_email(email)
if not real_user:
return False
return real_user.passwd == passwd
class UserAuth(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(email):
u = UserDbService.get_user_by_email(email)
if not u :
return
ua = UserAuth()
ua.id = u.email
ua.name = u.name
return ua
'''
@login_manager.request_loader
def request_loader(request):
email = request.form.get('email')
u = UserDbService.get_user_by_email(email)
if not u:
return
ua = UserAuth()
ua.id = email
ua.is_authenticated = request.form.get('passwd') == u.passwd
ua.is_authenticated
return ua
'''
@login_manager.unauthorized_handler
def unauthorized_handler():
return render_template('login.html', notifier='')
| 19.485714 | 64 | 0.631476 |
import flask_login
from flask import render_template
from sigda.models import db, User
from sigda.config.common import ErrorCode
import logging
login_manager = flask_login.LoginManager()
class UserDbService(object):
@staticmethod
def add(email, name, passwd):
u = UserDbService.get_user_by_email(email)
if u:
return u, ErrorCode.EXIST
u = User(email=email, name=name, passwd=passwd)
db.session.add(u)
try:
db.session.flush()
db.session.commit()
except Exception as e:
db.session.rollback()
logging.error('add user')
return None, ErrorCode.FAILURE
return u, ErrorCode.SUCCESS
@staticmethod
def get_user_by_name(name):
u = User.query.filter(User.name == name).first()
return u
@staticmethod
def get_user_by_id(uid):
u = User.query.filter(User.id == uid).first()
return u
@staticmethod
def get_user_by_email(email):
u = User.query.filter(User.email == email).first()
return u
@staticmethod
def auth(email, passwd):
real_user = UserDbService.get_user_by_email(email)
if not real_user:
return False
return real_user.passwd == passwd
class UserAuth(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(email):
u = UserDbService.get_user_by_email(email)
if not u :
return
ua = UserAuth()
ua.id = u.email
ua.name = u.name
return ua
@login_manager.unauthorized_handler
def unauthorized_handler():
return render_template('login.html', notifier='')
| true | true |
f73f0441d196355b3a55813b217924e970336016 | 1,743 | py | Python | Converter-Scripts/StanfordConll_to_IOB.py | Jadaju/Korpus-Plenarprotokolle | 3ed32a5f3f47503b65563f3816a0ffce2adfa927 | [
"Apache-2.0"
] | null | null | null | Converter-Scripts/StanfordConll_to_IOB.py | Jadaju/Korpus-Plenarprotokolle | 3ed32a5f3f47503b65563f3816a0ffce2adfa927 | [
"Apache-2.0"
] | null | null | null | Converter-Scripts/StanfordConll_to_IOB.py | Jadaju/Korpus-Plenarprotokolle | 3ed32a5f3f47503b65563f3816a0ffce2adfa927 | [
"Apache-2.0"
] | null | null | null | import codecs
import re
filename = "18181.txt"
#fileone = codecs.open("F:/Dropbox/Master/Plenarprotokolle/Annotiert/alle-final/Gold-18181-noAnhang.conll", "r", "utf-8")
fileone = codecs.open("F:/Dropbox/Master/"+filename+".conll", "r", "utf-8")
TextString = fileone.read()
#print(TextString)
TextList = TextString.splitlines()
#print(TextList)
newfile = open(r"F:/Dropbox/Master/"+filename+"_IOB.conll","w+", encoding="utf-8")
indi = 0
for x in TextList:
if re.findall("PERSON", x) != []:
#print("X:"+" "+x+"\n"+"TextListindi:"+" "+TextList[indi+1])
TextList[indi] = re.sub("PERSON", "B-PER", TextList[indi])
if re.findall("[B|I]-PER", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-PER", "I-PER", TextList[indi])
elif re.findall("LOCATION", x) != []:
#print("X:"+" "+x+"\n"+"TextListindi:"+" "+TextList[indi+1])
TextList[indi] = re.sub("LOCATION", "B-LOC", TextList[indi])
if re.findall("[B|I]-LOC", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-LOC", "I-LOC", TextList[indi])
elif re.findall("ORGANIZATION", x) != []:
#print("X:"+" "+x+"\n"+"TextListindi:"+" "+TextList[indi+1])
TextList[indi] = re.sub("ORGANIZATION", "B-ORG", TextList[indi])
if re.findall("[B|I]-ORG", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-ORG", "I-ORG", TextList[indi])
elif re.findall("MISC", x) != []:
#print("X:"+" "+x+"\n"+"TextListindi:"+" "+TextList[indi+1])
TextList[indi] = re.sub("MISC", "B-MISC", TextList[indi])
if re.findall("[B|I]-MISC", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-MISC", "I-MISC", TextList[indi])
indi += 1
for x in TextList:
newfile.write(x+"\n")
newfile.close()
#print(TextList)
#if re.findall("PERSON|ORGANIZATION|LOCATION|OTHER", x):
#print(x) | 35.571429 | 121 | 0.620769 | import codecs
import re
filename = "18181.txt"
fileone = codecs.open("F:/Dropbox/Master/"+filename+".conll", "r", "utf-8")
TextString = fileone.read()
TextList = TextString.splitlines()
newfile = open(r"F:/Dropbox/Master/"+filename+"_IOB.conll","w+", encoding="utf-8")
indi = 0
for x in TextList:
if re.findall("PERSON", x) != []:
TextList[indi] = re.sub("PERSON", "B-PER", TextList[indi])
if re.findall("[B|I]-PER", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-PER", "I-PER", TextList[indi])
elif re.findall("LOCATION", x) != []:
TextList[indi] = re.sub("LOCATION", "B-LOC", TextList[indi])
if re.findall("[B|I]-LOC", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-LOC", "I-LOC", TextList[indi])
elif re.findall("ORGANIZATION", x) != []:
TextList[indi] = re.sub("ORGANIZATION", "B-ORG", TextList[indi])
if re.findall("[B|I]-ORG", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-ORG", "I-ORG", TextList[indi])
elif re.findall("MISC", x) != []:
TextList[indi] = re.sub("MISC", "B-MISC", TextList[indi])
if re.findall("[B|I]-MISC", TextList[indi-1]) != []:
TextList[indi] = re.sub("B-MISC", "I-MISC", TextList[indi])
indi += 1
for x in TextList:
newfile.write(x+"\n")
newfile.close()
| true | true |
f73f06226e99e3ac82701806c60f2979cccfda02 | 722 | py | Python | server/store/views.py | duxevy/SPbSTU_Django_Shop | a94b140aa57cec83f788aabc8e7bb1e11968a657 | [
"Apache-2.0"
] | null | null | null | server/store/views.py | duxevy/SPbSTU_Django_Shop | a94b140aa57cec83f788aabc8e7bb1e11968a657 | [
"Apache-2.0"
] | null | null | null | server/store/views.py | duxevy/SPbSTU_Django_Shop | a94b140aa57cec83f788aabc8e7bb1e11968a657 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from .models import Category, Product
def product_all(request):
products = Product.products.all()
return render(request, 'home.html', {'products': products})
def category_list(request, category_slug=None):
category = get_object_or_404(Category, slug=category_slug)
return render(request, 'products/category.html', {'category': category})
def product_detail(request, slug):
product = get_object_or_404(Product, slug=slug, in_stock=True)
return render(request, 'products/single.html', {'product': product})
def contact(request):
return render(request, 'contacts.html')
def about(request):
return render(request, 'about.html')
| 26.740741 | 76 | 0.740997 | from django.shortcuts import get_object_or_404, render
from .models import Category, Product
def product_all(request):
products = Product.products.all()
return render(request, 'home.html', {'products': products})
def category_list(request, category_slug=None):
category = get_object_or_404(Category, slug=category_slug)
return render(request, 'products/category.html', {'category': category})
def product_detail(request, slug):
product = get_object_or_404(Product, slug=slug, in_stock=True)
return render(request, 'products/single.html', {'product': product})
def contact(request):
return render(request, 'contacts.html')
def about(request):
return render(request, 'about.html')
| true | true |
f73f06dc3fef4787483a60ba28a28bc38651e10a | 2,877 | py | Python | ceshi.py | Lechatelia/Welding_Joints | 7cb5b8ac4c961c4080e1590934c24130bfde3a26 | [
"MIT"
] | null | null | null | ceshi.py | Lechatelia/Welding_Joints | 7cb5b8ac4c961c4080e1590934c24130bfde3a26 | [
"MIT"
] | null | null | null | ceshi.py | Lechatelia/Welding_Joints | 7cb5b8ac4c961c4080e1590934c24130bfde3a26 | [
"MIT"
] | null | null | null | import cv2
import tensorflow as tf
import numpy as np
import random
y = tf.constant([1,2,3,4,5,6], name='y',dtype=tf.float32)
y_ = tf.constant([0,1,2,3,4,5], name='Y_',dtype=tf.float32)
y = tf.reshape(y,[2,3])
y_ = tf.reshape(y_,[2,3])
z= tf.constant([1,2], name='z',dtype=tf.float32)
z=tf.reshape(z,[2,-1])
result=[]
result.append(tf.subtract(y,y_))
result.append(tf.multiply(y,y_))
result.append(tf.multiply(y,z))
with tf.Session() as sess:
result=sess.run(result)
for i in result:
print(i)
# result=sess.run(multiply)
# print(result)
# y = tf.constant(0.5, shape=[7],name='y',dtype=tf.float32)
# y_ = tf.constant([0.6, 0.3,0.4,0.6,0.6,0.5,0.8], name='Y_',dtype=tf.float32)
# y_ = tf.constant([[9, 8], [7, 6], [10, 11]], name='x')
# b = tf.constant(1, name='b')
# a = tf.Variable(tf.zeros([3,3]))
# result=tf.zeros(y.get_shape().as_list()[0])
# result = tf.where(tf.greater(tf.abs((y-y_),"abs"),tf.constant(0.15,shape=y.get_shape(),dtype=tf.float32)),tf.constant(0,shape=y.get_shape(),dtype=tf.float32),tf.constant(1,shape=y.get_shape(),dtype=tf.float32))
# y=23
# y_=24
# # result = tf.where(tf.greater(y,y_),tf.abs(y-y_)*10,tf.abs(y-y_))
# result = tf.where(tf.greater(y,y_),y,y_)
# z = tf.where(tf.greater(y,y_),y_,y)
# z1=tf.to_int32(z)
# z2=tf.to_int32(result)
# #
#
# # result_mean=tf.reduce_mean(result)
# # Create a session to compute
# with tf.Session() as sess:
# result=sess.run(result)
# z=sess.run(z)
# print(result)
# # print(sess.run(result_mean))
# print(z)
# img = cv2.imread("test.jpg")
#
# # img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = np.float32(img)/255
# cv2.imshow("Image",img)
# mask = np.zeros((50,50,1),dtype=np.float32)
# for i in range(20):
# for j in range(20):
# mask[i][j]=-0.5
# mask[i+30][j+30]=0.5
#
# mask = cv2.resize(mask,(658,832))
#
# mask=cv2.cvtColor(mask,cv2.COLOR_GRAY2RGB)
# cv2.imshow("a",mask)
# cv2.addWeighted(img,0.5,mask,0.5,0,mask)
# cv2.imshow('hunh',mask)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# for i in range(10):
# print(random.randint(0, 1))
#
# a=[[[i*j*k for i in range(0,3)]for j in range(0,3)] for k in range(0,3)]
# # b=[[j*i for i in range(0,3)]for j in range(0,3)]
# print(a)
# # print(b)
# a=np.array(a)
# # b=np.array(b)
# print((list(a.shape)))
# # print(a+b);
# for n in a:
# print(n)
# np.random.shuffle(a)
#
# print(len(a))
#
# print(random.randint(0, 2))
# print(random.randint(0, 2))
# print(random.randint(0, 2))
# print(random.randint(0, 2))
# print(random.randint(0, 2))
# c=[i for i in range(7)]
# print(c[-2:])
r1 = np.array([1.2, 2, 3, 4],dtype=np.float32)
r2 = np.array([1.1, 1.8, 3.3, 4.4],dtype=np.float32)
cha = r1 - r2
print(cha)
error = np.mean(np.abs(cha), axis=0)
print(error) | 25.6875 | 213 | 0.595412 | import cv2
import tensorflow as tf
import numpy as np
import random
y = tf.constant([1,2,3,4,5,6], name='y',dtype=tf.float32)
y_ = tf.constant([0,1,2,3,4,5], name='Y_',dtype=tf.float32)
y = tf.reshape(y,[2,3])
y_ = tf.reshape(y_,[2,3])
z= tf.constant([1,2], name='z',dtype=tf.float32)
z=tf.reshape(z,[2,-1])
result=[]
result.append(tf.subtract(y,y_))
result.append(tf.multiply(y,y_))
result.append(tf.multiply(y,z))
with tf.Session() as sess:
result=sess.run(result)
for i in result:
print(i)
| true | true |
f73f085b8b441f06632d5e3418089087d969e1f6 | 2,074 | py | Python | cipher.py | ilwoong/neural_distinguisher | 92bc49f58dd10a3c75a24be2813aeb8a2141e6f0 | [
"MIT"
] | 3 | 2019-09-03T18:01:06.000Z | 2020-01-04T05:27:41.000Z | cipher.py | ilwoong/neural_distinguisher | 92bc49f58dd10a3c75a24be2813aeb8a2141e6f0 | [
"MIT"
] | null | null | null | cipher.py | ilwoong/neural_distinguisher | 92bc49f58dd10a3c75a24be2813aeb8a2141e6f0 | [
"MIT"
] | null | null | null | from abc import *
class Cipher(metaclass=ABCMeta):
WORDSIZE = 0
WORDMASK = 0
NUM_ROUNDS = 0
@abstractmethod
def name(self):
pass
@abstractmethod
def expand_key(self, mk, num_rounds):
pass
@abstractmethod
def encrypt_one_round(self, pt, rk):
pass
@abstractmethod
def decrypt_one_round(self, pt, rk):
pass
## 왼쪽 회전 연산
# @param self 객체 포인터
# @param value 회전 연산을 수행할 변수값
# @param amount 회전량
def rol(self, value, amount):
return ((value << amount) | (value >> (self.WORDSIZE - amount))) & self.WORDMASK
## 오른쪽 회전 연산
# @param self 객체 포인터
# @param value 회전 연산을 수행할 변수값
# @param amount 회전량
def ror(self, value, amount):
return ((value >> amount) | (value << (self.WORDSIZE - amount))) & self.WORDMASK
## 여러 라운드 암호화
# @param self 객체 포인터
# @param pt 암호화 할 평문
# @param rk 라운드 키
def encrypt(self, pt, rks):
x, y = pt[0], pt[1]
for rk in rks:
x, y = self.encrypt_one_round((x, y), rk)
return x, y
## 여러 라운드 복호화
# @param self 객체 포인터
# @param ct 복호화 할 암호문
# @param rk 라운드 키
def decrypt(self, ct, rks):
x, y = ct[0], ct[1]
for rk in reversed(rks):
x, y = self.decrypt_one_round((x, y), rk)
return x, y
## 테스트벡터 확인
# @param self 객체 포인터
def check_testvector(self, key, pt, ct):
rks = self.expand_key(key, self.NUM_ROUNDS)
enc = self.encrypt(pt, rks)
dec = self.decrypt(ct, rks)
if (enc == ct and dec == pt):
print("testvector verified")
if (enc != ct):
print("encryption failed")
print(' '.join(format(x, '04x') for x in ct))
print(' '.join(format(x, '04x') for x in enc))
if (dec != pt):
print("decryption failed")
print(' '.join(format(x, '04x') for x in pt))
print(' '.join(format(x, '04x') for x in dec))
| 26.589744 | 89 | 0.513983 | from abc import *
class Cipher(metaclass=ABCMeta):
WORDSIZE = 0
WORDMASK = 0
NUM_ROUNDS = 0
@abstractmethod
def name(self):
pass
@abstractmethod
def expand_key(self, mk, num_rounds):
pass
@abstractmethod
def encrypt_one_round(self, pt, rk):
pass
@abstractmethod
def decrypt_one_round(self, pt, rk):
pass
def rol(self, value, amount):
return ((value << amount) | (value >> (self.WORDSIZE - amount))) & self.WORDMASK
def ror(self, value, amount):
return ((value >> amount) | (value << (self.WORDSIZE - amount))) & self.WORDMASK
def encrypt(self, pt, rks):
x, y = pt[0], pt[1]
for rk in rks:
x, y = self.encrypt_one_round((x, y), rk)
return x, y
def decrypt(self, ct, rks):
x, y = ct[0], ct[1]
for rk in reversed(rks):
x, y = self.decrypt_one_round((x, y), rk)
return x, y
ef check_testvector(self, key, pt, ct):
rks = self.expand_key(key, self.NUM_ROUNDS)
enc = self.encrypt(pt, rks)
dec = self.decrypt(ct, rks)
if (enc == ct and dec == pt):
print("testvector verified")
if (enc != ct):
print("encryption failed")
print(' '.join(format(x, '04x') for x in ct))
print(' '.join(format(x, '04x') for x in enc))
if (dec != pt):
print("decryption failed")
print(' '.join(format(x, '04x') for x in pt))
print(' '.join(format(x, '04x') for x in dec))
| true | true |
f73f09158e9653e7a88bdc68620948b1a65fddef | 5,315 | py | Python | gcloud/label/models.py | sighttviewliu/bk-sops | 6bf2f38bd93990f20f7c3a4decafc310e09e679c | [
"Apache-2.0"
] | 1 | 2021-05-19T04:31:34.000Z | 2021-05-19T04:31:34.000Z | gcloud/label/models.py | sighttviewliu/bk-sops | 6bf2f38bd93990f20f7c3a4decafc310e09e679c | [
"Apache-2.0"
] | null | null | null | gcloud/label/models.py | sighttviewliu/bk-sops | 6bf2f38bd93990f20f7c3a4decafc310e09e679c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from collections import defaultdict
from django.db import models
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
logger = logging.getLogger("root")
class LabelManager(models.Manager):
def check_label_ids(self, label_ids):
if len(label_ids) != self.filter(id__in=label_ids).count():
return False
return True
class Label(models.Model):
name = models.CharField(_("标签名称"), max_length=255, db_index=True)
creator = models.CharField(_("创建者"), max_length=255)
project_id = models.IntegerField(_("项目 ID"), default=-1)
is_default = models.BooleanField(_("默认标签"), default=False)
color = models.CharField(_("标签颜色"), max_length=7, default="#dcffe2")
description = models.CharField(_("标签描述"), max_length=255, blank=True, null=True)
objects = LabelManager()
class Meta:
verbose_name = _("用户标签 Label")
verbose_name_plural = _("用户标签 Label")
unique_together = ("project_id", "name")
def __str__(self):
return "label name:{}, description:{}".format(self.name, self.description)
class TemplateLabelManager(models.Manager):
def set_labels_for_template(self, template_id, label_ids):
existing_labels = self.filter(template_id=template_id).values_list("label_id", flat=True)
add_labels = list(set(label_ids).difference(set(existing_labels)))
add_relations = [TemplateLabelRelation(template_id=template_id, label_id=label_id) for label_id in add_labels]
remove_labels = list(set(existing_labels).difference(set(label_ids)))
self.filter(template_id=template_id, label_id__in=remove_labels).delete()
self.bulk_create(add_relations)
def fetch_labels_for_templates(self, template_ids):
label_ids = self.filter(template_id__in=template_ids).distinct().values_list("label_id", flat=True)
labels = Label.objects.filter(id__in=label_ids).values_list("id", "name", "color")
return labels
def fetch_templates_labels(self, template_ids, label_fields=("name", "color")):
select_fields = {field_name: field_name for field_name in label_fields if field_name != "id"}
relations = (
self.filter(template_id__in=template_ids)
.extra(select=select_fields, tables=["label_label"], where=["label_label.id=label_id"])
.values("template_id", "label_id", *select_fields.values())
)
templates_labels = defaultdict(list)
for relation in relations:
template_id = relation.pop("template_id")
templates_labels[template_id].append(relation)
return dict(templates_labels)
def fetch_common_labels_for_templates(self, template_ids, label_fields=("name", "color")):
label_ids = (
self.filter(template_id__in=template_ids)
.values_list("label_id", flat=True)
.annotate(num_labels=Count("label_id"))
.filter(num_labels=len(template_ids))
)
labels = Label.objects.filter(id__in=label_ids).values_list("id", *label_fields)
return labels
def fetch_template_ids_using_labels(self, label_ids):
template_ids = self.filter(label_id__in=label_ids).distinct().values_list("template_id", flat=True)
return template_ids
def fetch_template_ids_using_union_labels(self, label_ids):
template_ids = (
self.filter(label_id__in=label_ids)
.values_list("template_id", flat=True)
.annotate(num_templates=Count("template_id"))
.filter(num_templates=len(label_ids))
)
return template_ids
def fetch_label_template_ids(self, label_ids):
relations = self.filter(label_id__in=label_ids)
label_template_ids = defaultdict(list)
for relation in relations:
label_template_ids[relation.label_id].append(relation.template_id)
return label_template_ids
def delete_relations_based_on_template(self, template_id):
self.filter(template_id=template_id).delete()
def delete_relations_based_on_label(self, label_id):
self.filter(label_id=label_id).delete()
class TemplateLabelRelation(models.Model):
template_id = models.IntegerField(_("模版ID"), db_index=True)
label_id = models.IntegerField(_("标签ID"), db_index=True)
objects = TemplateLabelManager()
class Meta:
verbose_name = _("模版标签关系 TemplateLabelRelation")
verbose_name_plural = _("模版标签关系 TemplateLabelRelation")
unique_together = ("template_id", "label_id")
| 42.862903 | 118 | 0.70969 |
import logging
from collections import defaultdict
from django.db import models
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
logger = logging.getLogger("root")
class LabelManager(models.Manager):
def check_label_ids(self, label_ids):
if len(label_ids) != self.filter(id__in=label_ids).count():
return False
return True
class Label(models.Model):
name = models.CharField(_("标签名称"), max_length=255, db_index=True)
creator = models.CharField(_("创建者"), max_length=255)
project_id = models.IntegerField(_("项目 ID"), default=-1)
is_default = models.BooleanField(_("默认标签"), default=False)
color = models.CharField(_("标签颜色"), max_length=7, default="#dcffe2")
description = models.CharField(_("标签描述"), max_length=255, blank=True, null=True)
objects = LabelManager()
class Meta:
verbose_name = _("用户标签 Label")
verbose_name_plural = _("用户标签 Label")
unique_together = ("project_id", "name")
def __str__(self):
return "label name:{}, description:{}".format(self.name, self.description)
class TemplateLabelManager(models.Manager):
def set_labels_for_template(self, template_id, label_ids):
existing_labels = self.filter(template_id=template_id).values_list("label_id", flat=True)
add_labels = list(set(label_ids).difference(set(existing_labels)))
add_relations = [TemplateLabelRelation(template_id=template_id, label_id=label_id) for label_id in add_labels]
remove_labels = list(set(existing_labels).difference(set(label_ids)))
self.filter(template_id=template_id, label_id__in=remove_labels).delete()
self.bulk_create(add_relations)
def fetch_labels_for_templates(self, template_ids):
label_ids = self.filter(template_id__in=template_ids).distinct().values_list("label_id", flat=True)
labels = Label.objects.filter(id__in=label_ids).values_list("id", "name", "color")
return labels
def fetch_templates_labels(self, template_ids, label_fields=("name", "color")):
select_fields = {field_name: field_name for field_name in label_fields if field_name != "id"}
relations = (
self.filter(template_id__in=template_ids)
.extra(select=select_fields, tables=["label_label"], where=["label_label.id=label_id"])
.values("template_id", "label_id", *select_fields.values())
)
templates_labels = defaultdict(list)
for relation in relations:
template_id = relation.pop("template_id")
templates_labels[template_id].append(relation)
return dict(templates_labels)
def fetch_common_labels_for_templates(self, template_ids, label_fields=("name", "color")):
label_ids = (
self.filter(template_id__in=template_ids)
.values_list("label_id", flat=True)
.annotate(num_labels=Count("label_id"))
.filter(num_labels=len(template_ids))
)
labels = Label.objects.filter(id__in=label_ids).values_list("id", *label_fields)
return labels
def fetch_template_ids_using_labels(self, label_ids):
template_ids = self.filter(label_id__in=label_ids).distinct().values_list("template_id", flat=True)
return template_ids
def fetch_template_ids_using_union_labels(self, label_ids):
template_ids = (
self.filter(label_id__in=label_ids)
.values_list("template_id", flat=True)
.annotate(num_templates=Count("template_id"))
.filter(num_templates=len(label_ids))
)
return template_ids
def fetch_label_template_ids(self, label_ids):
relations = self.filter(label_id__in=label_ids)
label_template_ids = defaultdict(list)
for relation in relations:
label_template_ids[relation.label_id].append(relation.template_id)
return label_template_ids
def delete_relations_based_on_template(self, template_id):
self.filter(template_id=template_id).delete()
def delete_relations_based_on_label(self, label_id):
self.filter(label_id=label_id).delete()
class TemplateLabelRelation(models.Model):
template_id = models.IntegerField(_("模版ID"), db_index=True)
label_id = models.IntegerField(_("标签ID"), db_index=True)
objects = TemplateLabelManager()
class Meta:
verbose_name = _("模版标签关系 TemplateLabelRelation")
verbose_name_plural = _("模版标签关系 TemplateLabelRelation")
unique_together = ("template_id", "label_id")
| true | true |
f73f0921ad38985f43c3b0c2f97416efa06040be | 1,306 | py | Python | tools/skp/page_sets/skia_motionmarksuits_desktop.py | vibeus/skia | 498bfa4a8590398ac01cb801ea1349a293054073 | [
"BSD-3-Clause"
] | 6,304 | 2015-01-05T23:45:12.000Z | 2022-03-31T09:48:13.000Z | tools/skp/page_sets/skia_motionmarksuits_desktop.py | lbnb/skia | f61ec43f84dd73508a566c36ef085156b40285f0 | [
"BSD-3-Clause"
] | 67 | 2016-04-18T13:30:02.000Z | 2022-03-31T23:06:55.000Z | tools/skp/page_sets/skia_motionmarksuits_desktop.py | lbnb/skia | f61ec43f84dd73508a566c36ef085156b40285f0 | [
"BSD-3-Clause"
] | 1,231 | 2015-01-05T03:17:39.000Z | 2022-03-31T22:54:58.000Z | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_motionmarksuits_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url, timeout_in_seconds=120)
class SkiaMotionmarksuitsDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaMotionmarksuitsDesktopPageSet, self).__init__(
archive_data_file='data/skia_motionmarksuits_desktop.json')
urls_list = [
# Why: from skbug.com/12292
('https://storage.googleapis.com/skia-recreateskps-bot-hosted-pages/'
'desk_motionmarksuits.svg'),
]
for url in urls_list:
self.AddStory(SkiaBuildbotDesktopPage(url, self))
| 31.095238 | 75 | 0.75268 |
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_motionmarksuits_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url, timeout_in_seconds=120)
class SkiaMotionmarksuitsDesktopPageSet(story.StorySet):
def __init__(self):
super(SkiaMotionmarksuitsDesktopPageSet, self).__init__(
archive_data_file='data/skia_motionmarksuits_desktop.json')
urls_list = [
('https://storage.googleapis.com/skia-recreateskps-bot-hosted-pages/'
'desk_motionmarksuits.svg'),
]
for url in urls_list:
self.AddStory(SkiaBuildbotDesktopPage(url, self))
| true | true |
f73f09751e813cde35a74416c7d826ba7d39d4be | 5,658 | py | Python | src/scripts/RQ1.py | WeiyuCheng/FIA-KDD-19 | 18f29f8babbf1c505973a8a62ac48c6ca34ccd8a | [
"MIT"
] | 16 | 2019-05-15T05:54:22.000Z | 2021-11-30T03:31:42.000Z | src/scripts/RQ1.py | WeiyuCheng/FIA-KDD-19 | 18f29f8babbf1c505973a8a62ac48c6ca34ccd8a | [
"MIT"
] | 1 | 2019-11-21T01:38:04.000Z | 2019-11-22T03:24:43.000Z | src/scripts/RQ1.py | WeiyuCheng/FIA-KDD-19 | 18f29f8babbf1c505973a8a62ac48c6ca34ccd8a | [
"MIT"
] | 4 | 2020-09-03T02:35:39.000Z | 2022-03-16T13:47:44.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import argparse
import os
from scipy.stats import pearsonr
import sys
sys.path.append("..")
from scripts.load_movielens import load_movielens
from scripts.load_yelp import load_yelp
import influence.experiments as experiments
from influence.matrix_factorization import MF
from influence.NCF import NCF
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--avextol', type=float, default=1e-3,
help='threshold for optimization in influence function')
parser.add_argument('--damping', type=float, default=1e-6,
help='damping term in influence function')
parser.add_argument('--weight_decay', type=float, default=1e-3,
help='l2 regularization term for training MF or NCF model')
parser.add_argument('--lr', type=float, default=1e-3,
help='initial learning rate for training MF or NCF model')
parser.add_argument('--embed_size', type=int, default=16,
help='embedding size')
parser.add_argument('--maxinf', type=int, default=1,
help='remove type of train indices')
parser.add_argument('--dataset', type=str, default='movielens',
help='name of dataset: movielens or yelp')
parser.add_argument('--model', type=str, default='NCF',
help='model type: MF or NCF')
parser.add_argument('--num_test', type=int, default=5,
help='number of test points of retraining')
parser.add_argument('--num_steps_train', type=int, default=180000,
help='training steps')
parser.add_argument('--num_steps_retrain', type=int, default=27000,
help='retraining steps')
parser.add_argument('--reset_adam', type=int, default=0)
parser.add_argument('--load_checkpoint', type=int, default=1)
parser.add_argument('--retrain_times', type=int, default=4)
parser.add_argument('--sort_test_case', type=int, default=0)
return parser.parse_args()
args = parse_args()
if args.dataset == 'movielens':
data_sets = load_movielens('../../data')
batch_size = 3020
elif args.dataset == 'yelp':
data_sets = load_yelp('../../data')
batch_size = 3009
else:
raise NotImplementedError
weight_decay = args.weight_decay
initial_learning_rate = args.lr
num_users = int(np.max(data_sets.train._x[:, 0])+1)
num_items = int(np.max(data_sets.train._x[:, 1])+1)
print("number of users: %d" % num_users)
print("number of items: %d" % num_items)
print("number of training examples: %d" % data_sets.train._x.shape[0])
print("number of testing examples: %d" % data_sets.test._x.shape[0])
avextol = args.avextol
damping = args.damping
print("Using avextol of %.0e" % avextol)
print("Using damping of %.0e" % damping)
print("Using embedding size of %d" % args.embed_size)
if args.model == 'MF':
Model = MF
elif args.model == 'NCF':
Model = NCF
else:
raise NotImplementedError
model = Model(
num_users=num_users,
num_items=num_items,
embedding_size=args.embed_size,
weight_decay=weight_decay,
num_classes=1,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
damping=damping,
decay_epochs=[10000, 20000],
mini_batch=True,
train_dir='output',
log_dir='log',
avextol=avextol,
model_name='%s_%s_explicit_damping%.0e_avextol%.0e_embed%d_maxinf%d_wd%.0e' % (
args.dataset, args.model, damping, avextol, args.embed_size, args.maxinf, weight_decay))
print(f'Model name is: {model.model_name}')
num_steps = args.num_steps_train
iter_to_load = num_steps - 1
if os.path.isfile("%s-%s.index" % (model.checkpoint_file, iter_to_load)):
print('Checkpoint found, loading...')
model.load_checkpoint(iter_to_load=iter_to_load)
else:
print('Checkpoint not found, start training...')
model.train(
num_steps=num_steps)
model.saver.save(model.sess, model.checkpoint_file, global_step=num_steps - 1)
if args.maxinf:
remove_type = 'maxinf'
else:
remove_type = 'random'
test_size = data_sets.test.num_examples
num_test = args.num_test
test_indices = np.random.choice(test_size, num_test, replace=False)
if args.sort_test_case:
num_related_ratings = []
for i in range(test_size):
num_related_ratings += [model.get_train_indices_of_test_case([i]).shape[0]]
test_indices = np.argsort(np.array(num_related_ratings))[:num_test]
actual_y_diff = np.zeros(num_test)
predicted_y_diff = np.zeros(num_test)
removed_indices = np.zeros(num_test)
for i, test_idx in enumerate(test_indices):
print(f'test point====={i}=====')
actual_y_diffs, predicted_y_diffs, indices_to_remove = experiments.test_retraining(
model,
test_idx=test_idx,
iter_to_load=iter_to_load,
retrain_times=args.retrain_times,
num_to_remove=1,
num_steps=args.num_steps_retrain,
remove_type=remove_type,
force_refresh=True,
reset_adam=args.reset_adam,
load_checkpoint=args.load_checkpoint)
actual_y_diff[i] = actual_y_diffs[0]
predicted_y_diff[i] = predicted_y_diffs[0]
removed_indices[i] = indices_to_remove[0]
np.savez(
'output/RQ1-%s-%s.npz' % (args.model, args.dataset),
actual_loss_diffs=actual_y_diff,
predicted_loss_diffs=predicted_y_diff,
indices_to_remove=removed_indices
)
print('Correlation is %s' % pearsonr(actual_y_diff, predicted_y_diff)[0])
| 37.470199 | 96 | 0.696713 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import argparse
import os
from scipy.stats import pearsonr
import sys
sys.path.append("..")
from scripts.load_movielens import load_movielens
from scripts.load_yelp import load_yelp
import influence.experiments as experiments
from influence.matrix_factorization import MF
from influence.NCF import NCF
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--avextol', type=float, default=1e-3,
help='threshold for optimization in influence function')
parser.add_argument('--damping', type=float, default=1e-6,
help='damping term in influence function')
parser.add_argument('--weight_decay', type=float, default=1e-3,
help='l2 regularization term for training MF or NCF model')
parser.add_argument('--lr', type=float, default=1e-3,
help='initial learning rate for training MF or NCF model')
parser.add_argument('--embed_size', type=int, default=16,
help='embedding size')
parser.add_argument('--maxinf', type=int, default=1,
help='remove type of train indices')
parser.add_argument('--dataset', type=str, default='movielens',
help='name of dataset: movielens or yelp')
parser.add_argument('--model', type=str, default='NCF',
help='model type: MF or NCF')
parser.add_argument('--num_test', type=int, default=5,
help='number of test points of retraining')
parser.add_argument('--num_steps_train', type=int, default=180000,
help='training steps')
parser.add_argument('--num_steps_retrain', type=int, default=27000,
help='retraining steps')
parser.add_argument('--reset_adam', type=int, default=0)
parser.add_argument('--load_checkpoint', type=int, default=1)
parser.add_argument('--retrain_times', type=int, default=4)
parser.add_argument('--sort_test_case', type=int, default=0)
return parser.parse_args()
args = parse_args()
if args.dataset == 'movielens':
data_sets = load_movielens('../../data')
batch_size = 3020
elif args.dataset == 'yelp':
data_sets = load_yelp('../../data')
batch_size = 3009
else:
raise NotImplementedError
weight_decay = args.weight_decay
initial_learning_rate = args.lr
num_users = int(np.max(data_sets.train._x[:, 0])+1)
num_items = int(np.max(data_sets.train._x[:, 1])+1)
print("number of users: %d" % num_users)
print("number of items: %d" % num_items)
print("number of training examples: %d" % data_sets.train._x.shape[0])
print("number of testing examples: %d" % data_sets.test._x.shape[0])
avextol = args.avextol
damping = args.damping
print("Using avextol of %.0e" % avextol)
print("Using damping of %.0e" % damping)
print("Using embedding size of %d" % args.embed_size)
if args.model == 'MF':
Model = MF
elif args.model == 'NCF':
Model = NCF
else:
raise NotImplementedError
model = Model(
num_users=num_users,
num_items=num_items,
embedding_size=args.embed_size,
weight_decay=weight_decay,
num_classes=1,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
damping=damping,
decay_epochs=[10000, 20000],
mini_batch=True,
train_dir='output',
log_dir='log',
avextol=avextol,
model_name='%s_%s_explicit_damping%.0e_avextol%.0e_embed%d_maxinf%d_wd%.0e' % (
args.dataset, args.model, damping, avextol, args.embed_size, args.maxinf, weight_decay))
print(f'Model name is: {model.model_name}')
num_steps = args.num_steps_train
iter_to_load = num_steps - 1
if os.path.isfile("%s-%s.index" % (model.checkpoint_file, iter_to_load)):
print('Checkpoint found, loading...')
model.load_checkpoint(iter_to_load=iter_to_load)
else:
print('Checkpoint not found, start training...')
model.train(
num_steps=num_steps)
model.saver.save(model.sess, model.checkpoint_file, global_step=num_steps - 1)
if args.maxinf:
remove_type = 'maxinf'
else:
remove_type = 'random'
test_size = data_sets.test.num_examples
num_test = args.num_test
test_indices = np.random.choice(test_size, num_test, replace=False)
if args.sort_test_case:
num_related_ratings = []
for i in range(test_size):
num_related_ratings += [model.get_train_indices_of_test_case([i]).shape[0]]
test_indices = np.argsort(np.array(num_related_ratings))[:num_test]
actual_y_diff = np.zeros(num_test)
predicted_y_diff = np.zeros(num_test)
removed_indices = np.zeros(num_test)
for i, test_idx in enumerate(test_indices):
print(f'test point====={i}=====')
actual_y_diffs, predicted_y_diffs, indices_to_remove = experiments.test_retraining(
model,
test_idx=test_idx,
iter_to_load=iter_to_load,
retrain_times=args.retrain_times,
num_to_remove=1,
num_steps=args.num_steps_retrain,
remove_type=remove_type,
force_refresh=True,
reset_adam=args.reset_adam,
load_checkpoint=args.load_checkpoint)
actual_y_diff[i] = actual_y_diffs[0]
predicted_y_diff[i] = predicted_y_diffs[0]
removed_indices[i] = indices_to_remove[0]
np.savez(
'output/RQ1-%s-%s.npz' % (args.model, args.dataset),
actual_loss_diffs=actual_y_diff,
predicted_loss_diffs=predicted_y_diff,
indices_to_remove=removed_indices
)
print('Correlation is %s' % pearsonr(actual_y_diff, predicted_y_diff)[0])
| true | true |
f73f09a92559444565258b2573b324e84d3b177d | 793 | py | Python | GalDM/prop_mod.py | nickrodd/GalDM | 6ea9ed3882ecf4235fb4a773f8370858ad9ebf1e | [
"MIT"
] | null | null | null | GalDM/prop_mod.py | nickrodd/GalDM | 6ea9ed3882ecf4235fb4a773f8370858ad9ebf1e | [
"MIT"
] | null | null | null | GalDM/prop_mod.py | nickrodd/GalDM | 6ea9ed3882ecf4235fb4a773f8370858ad9ebf1e | [
"MIT"
] | null | null | null | ###############################################################################
# prop_mod.py
###############################################################################
#
# Calculate mod without numpy issue
#
###############################################################################
import numpy as np
from tqdm import *
def mod(dividends, divisor):
""" return dividends (array) mod divisor (double)
"""
output = np.zeros(len(dividends))
for i in tqdm(range(len(dividends))):
output[i] = dividends[i]
done=False
while (not done):
if output[i] >= divisor:
output[i] -= divisor
elif output[i] < 0.:
output[i] += divisor
else:
done=True
return output
| 26.433333 | 79 | 0.373266 | true | true | |
f73f09dad8a0654be9bce41704b2e32d8cd06094 | 2,192 | py | Python | pre_commit_hooks/string_fixer.py | HeadspaceMeditation/pre-commit-hooks | 8971d2fa75dbeea40b714987a499e24082242b92 | [
"MIT"
] | null | null | null | pre_commit_hooks/string_fixer.py | HeadspaceMeditation/pre-commit-hooks | 8971d2fa75dbeea40b714987a499e24082242b92 | [
"MIT"
] | 2 | 2018-06-21T23:29:15.000Z | 2019-04-12T16:20:05.000Z | pre_commit_hooks/string_fixer.py | HeadspaceMeditation/pre-commit-hooks | 8971d2fa75dbeea40b714987a499e24082242b92 | [
"MIT"
] | 1 | 2017-11-23T20:04:12.000Z | 2017-11-23T20:04:12.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import io
import tokenize
double_quote_starts = tuple(s for s in tokenize.single_quoted if '"' in s)
def handle_match(token_text):
if '"""' in token_text or "'''" in token_text:
return token_text
for double_quote_start in double_quote_starts:
if token_text.startswith(double_quote_start):
meat = token_text[len(double_quote_start):-1]
if '"' in meat or "'" in meat:
break
return double_quote_start.replace('"', "'") + meat + "'"
return token_text
def get_line_offsets_by_line_no(src):
# Padded so we can index with line number
offsets = [None, 0]
for line in src.splitlines():
offsets.append(offsets[-1] + len(line) + 1)
return offsets
def fix_strings(filename):
contents = io.open(filename, encoding='UTF-8').read()
line_offsets = get_line_offsets_by_line_no(contents)
# Basically a mutable string
splitcontents = list(contents)
# Iterate in reverse so the offsets are always correct
tokens = reversed(list(tokenize.generate_tokens(
io.StringIO(contents).readline,
)))
for token_type, token_text, (srow, scol), (erow, ecol), _ in tokens:
if token_type == tokenize.STRING:
new_text = handle_match(token_text)
splitcontents[
line_offsets[srow] + scol:
line_offsets[erow] + ecol
] = new_text
new_contents = ''.join(splitcontents)
if contents != new_contents:
with io.open(filename, 'w', encoding='UTF-8') as write_handle:
write_handle.write(new_contents)
return 1
else:
return 0
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
return_value = fix_strings(filename)
if return_value != 0:
print('Fixing strings in {}'.format(filename))
retv |= return_value
return retv
| 28.842105 | 74 | 0.650547 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import io
import tokenize
double_quote_starts = tuple(s for s in tokenize.single_quoted if '"' in s)
def handle_match(token_text):
if '"""' in token_text or "'''" in token_text:
return token_text
for double_quote_start in double_quote_starts:
if token_text.startswith(double_quote_start):
meat = token_text[len(double_quote_start):-1]
if '"' in meat or "'" in meat:
break
return double_quote_start.replace('"', "'") + meat + "'"
return token_text
def get_line_offsets_by_line_no(src):
# Padded so we can index with line number
offsets = [None, 0]
for line in src.splitlines():
offsets.append(offsets[-1] + len(line) + 1)
return offsets
def fix_strings(filename):
contents = io.open(filename, encoding='UTF-8').read()
line_offsets = get_line_offsets_by_line_no(contents)
# Basically a mutable string
splitcontents = list(contents)
# Iterate in reverse so the offsets are always correct
tokens = reversed(list(tokenize.generate_tokens(
io.StringIO(contents).readline,
)))
for token_type, token_text, (srow, scol), (erow, ecol), _ in tokens:
if token_type == tokenize.STRING:
new_text = handle_match(token_text)
splitcontents[
line_offsets[srow] + scol:
line_offsets[erow] + ecol
] = new_text
new_contents = ''.join(splitcontents)
if contents != new_contents:
with io.open(filename, 'w', encoding='UTF-8') as write_handle:
write_handle.write(new_contents)
return 1
else:
return 0
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
return_value = fix_strings(filename)
if return_value != 0:
print('Fixing strings in {}'.format(filename))
retv |= return_value
return retv
| true | true |
f73f0b7db4188b77271cf409ecc1a32c4c34c312 | 3,937 | py | Python | libs/VManagerSDK/vmanager/models/ap_list_element.py | rgrr/smartmeshsdk | a95f3e4d9e2254d59d326428fef8c77319cd4373 | [
"BSD-3-Clause"
] | 29 | 2015-02-17T14:22:14.000Z | 2021-02-19T06:01:10.000Z | libs/VManagerSDK/vmanager/models/ap_list_element.py | rgrr/smartmeshsdk | a95f3e4d9e2254d59d326428fef8c77319cd4373 | [
"BSD-3-Clause"
] | 104 | 2016-04-10T19:22:20.000Z | 2018-11-20T15:47:14.000Z | libs/VManagerSDK/vmanager/models/ap_list_element.py | rgrr/smartmeshsdk | a95f3e4d9e2254d59d326428fef8c77319cd4373 | [
"BSD-3-Clause"
] | 35 | 2015-07-10T18:58:15.000Z | 2022-03-20T08:56:25.000Z | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class APListElement(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
APListElement - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'mac_address': 'str',
'state': 'str'
}
self.attribute_map = {
'mac_address': 'macAddress',
'state': 'state'
}
self._mac_address = None
self._state = None
@property
def mac_address(self):
"""
Gets the mac_address of this APListElement.
MAC address
:return: The mac_address of this APListElement.
:rtype: str
"""
return self._mac_address
@mac_address.setter
def mac_address(self, mac_address):
"""
Sets the mac_address of this APListElement.
MAC address
:param mac_address: The mac_address of this APListElement.
:type: str
"""
self._mac_address = mac_address
@property
def state(self):
"""
Gets the state of this APListElement.
AP mote state
:return: The state of this APListElement.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this APListElement.
AP mote state
:param state: The state of this APListElement.
:type: str
"""
allowed_values = ["lost", "negotiating", "connected", "operational", "decommissioned"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state`, must be one of {0}"
.format(allowed_values)
)
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.965753 | 94 | 0.565659 |
from pprint import pformat
from six import iteritems
class APListElement(object):
def __init__(self):
self.swagger_types = {
'mac_address': 'str',
'state': 'str'
}
self.attribute_map = {
'mac_address': 'macAddress',
'state': 'state'
}
self._mac_address = None
self._state = None
@property
def mac_address(self):
return self._mac_address
@mac_address.setter
def mac_address(self, mac_address):
self._mac_address = mac_address
@property
def state(self):
return self._state
@state.setter
def state(self, state):
allowed_values = ["lost", "negotiating", "connected", "operational", "decommissioned"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state`, must be one of {0}"
.format(allowed_values)
)
self._state = state
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.