hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f638a47fc261ba269cd6aeaf1215f099134f09d | 22,742 | py | Python | app/models.py | nonemaw/YeTi | 92a3ba89f5b7fd8b2d5d3f5929ade0bf0b9e5cbe | [
"MIT"
] | 1 | 2017-10-04T12:21:20.000Z | 2017-10-04T12:21:20.000Z | app/models.py | nonemaw/YeTi | 92a3ba89f5b7fd8b2d5d3f5929ade0bf0b9e5cbe | [
"MIT"
] | null | null | null | app/models.py | nonemaw/YeTi | 92a3ba89f5b7fd8b2d5d3f5929ade0bf0b9e5cbe | [
"MIT"
] | null | null | null | import hashlib
from datetime import datetime
from werkzeug.security import generate_password_hash
from flask import current_app, request
from flask_login import UserMixin, AnonymousUserMixin, current_user
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from bson import ObjectId
from app import login_manager
from common.db import mongo_connect, MongoConfig, create_dict_path
from common.meta import Meta
from common.general import merge_dict
from fuzzier.jison import Jison
# Note: all DB model class are using legacy PyMongo method `insert()` to create
# new document into collection, rather than `insert_one()`, as `insert()` will
# return new `_id` directly
class Group:
def __init__(self, var: str, name: str):
self.var = var
self.name = name
def __repr__(self):
return str(self)
def __str__(self):
return f'<Group {self.var} - {self.name}>'
def new(self, specific_db = None) -> str:
if specific_db:
db = specific_db
else:
db = current_user.db
document = {
'var': self.var,
'name': self.name,
'sub_groups': []
}
legacy = db.Group.find_one({'var': self.var})
if not legacy:
return str(db.Group.insert(document))
else:
return str(legacy.get('_id'))
@staticmethod
def update_doc(locate: dict, update: dict, specific_db = None):
if specific_db:
db = specific_db
else:
db = current_user.db
db.Group.update_one(locate, {'$set': update})
@staticmethod
def delete_doc(locate: dict, specific_db = None):
if specific_db:
db = specific_db
else:
db = current_user.db
to_be_deleted = db.Group.find_one(locate)
if to_be_deleted:
sub_groups = to_be_deleted.get('sub_groups')
db.Group.delete_one(locate)
for sub_group_id in sub_groups:
try:
SubGroup.delete_doc({'_id': ObjectId(sub_group_id)})
except:
pass
@staticmethod
def search(locate: dict, specific_db = None) -> dict:
if specific_db:
db = specific_db
else:
db = current_user.db
return db.Group.find_one(locate)
class SubGroup:
def __init__(self, name: str):
self.name = name
def __repr__(self):
return str(self)
def __str__(self):
return f'<SubGroup {self.name}>'
def new(self, specific_db = None) -> str:
if specific_db:
db = specific_db
else:
db = current_user.db
document = {
'name': self.name,
'variables': []
}
legacy = db.Variables.find_one({'name': self.name})
if not legacy:
return str(db.SubGroup.insert(document))
else:
return str(legacy.get('_id'))
@staticmethod
def update_doc(locate: dict, update: dict, specific_db = None):
if specific_db:
db = specific_db
else:
db = current_user.db
db.SubGroup.update_one(locate, {'$set': update})
@staticmethod
def delete_doc(locate: dict, specific_db = None):
if specific_db:
db = specific_db
else:
db = current_user.db
db.SubGroup.delete_one(locate)
@staticmethod
def search(locate: dict, specific_db = None) -> dict:
if specific_db:
db = specific_db
else:
db = current_user.db
return db.SubGroup.find_one(locate)
class InterfaceNode:
def __init__(self, node: dict):
self.node = node
def __repr__(self):
return str(self)
def __str__(self):
return f'<InterfaceNode {self.node.get("id")} {self.node.get("text")}>'
def get_id(self, depth: int = 0) -> tuple:
"""
return child id and MongoDB query syntax based on depth
"""
if depth:
node_id = ''
query_base = ''
try:
node_dict = self.node
while depth:
node_dict = node_dict.get('children')[0]
node_id = node_dict.get('id')
query_base = f'children.{query_base}'
depth -= 1
return f'{query_base}id', node_id
except:
return '', ''
return '', ''
def new(self, force: bool = False, depth: int = 0, specific_db = None) -> str:
"""
normally when `new()` is called and a legacy data already exists, it
does nothing but just return legacy data's serial number
when option `force` is enabled, the `new()` method will try to update
information to the data if legacy data already exists
depth is used for controlling update depth, e.g. if I get following
data with depth is 3:
depth = 0 depth = 1 depth = 2 depth = 3
{'children': [{'children': [{'children': [{'id': 'custom_page_207_0_5',
'text': 'Trust Name',
'type': 'variable'}],
'id': 'client_52-2-0',
'text': 'Trust Details',
'type': 'root'}],
'id': 'client_52-2',
'text': 'FMD Trust SoA Wizard',
'type': 'root'}],
'id': 'client_52',
'text': 'FMD SoA Wizard',
'type': 'root'}
then I ONLY update/insert the content of the leaf node on depth = 3
(when depth = 0 is the normal case)
"""
if specific_db:
db = specific_db
else:
db = current_user.db
# TODO: FIX ME, too buggy
legacy = db.InterfaceNode.find_one(
{'id': self.node.get('id')})
if not legacy:
return str(db.InterfaceNode.insert(self.node))
elif force and not depth:
pushed_list = [x for x in self.node.get('children')
if x.get('text') not in
[l.get('text') for l in legacy.get('children')]
]
# `pushed_list` is a list of {child} which are not in legacy children
# is `pushed_list` then append each {child} to legacy's children list
if pushed_list:
db.InterfaceNode.update_one(
{'id': self.node.get('id')},
{
'$set': {
'text': self.node.get('text'),
'type': self.node.get('type')},
'$push': {
'children': {'$each': pushed_list}}
})
# if `pushed_list` is empty, just make current children to overwrite
# legacy's children
else:
db.InterfaceNode.update_one(
{'id': self.node.get('id')},
{'$set': {
'text': self.node.get('text'),
'type': self.node.get('type'),
'children': self.node.get('children')
}})
elif depth:
import re
query, child_node_id = self.get_id(depth)
# child node is a root node
if re.search('^client_[0-9]+', child_node_id):
db.InterfaceNode.update_one(
{
'id': self.node.get('id'),
query: child_node_id
},
{'$set': {
}}
)
# child node is a leaf
else:
pass
return str(legacy.get('_id'))
@staticmethod
def update_doc(locate: dict, update: dict, specific_db = None):
if specific_db:
db = specific_db
else:
db = current_user.db
db.InterfaceNode.update_one(locate, {'$set': update})
@staticmethod
def search(locate: dict, specific_db = None) -> dict:
if specific_db:
db = specific_db
else:
db = current_user.db
return db.InterfaceNode.find_one(locate)
class InterfaceLeafPage:
def __init__(self, id: str, text: str, leaf_type: str, menu_path: str,
page: dict):
self.id = id
self.text = text
self.leaf_type = leaf_type
self.menu_path = menu_path
self.page = page
def __repr__(self):
return str(self)
def __str__(self):
return f'<InterfaceLeafPage {self.id}>'
def new(self, force: bool = False, specific_db = None) -> str:
if specific_db:
db = specific_db
else:
db = current_user.db
document = {
'id': self.id,
'text': self.text,
'leaf_type': self.leaf_type,
'menu_path': self.menu_path,
'page': self.page
}
legacy = db.InterfaceLeafPage.find_one({'id': self.id})
if not legacy:
return str(db.InterfaceLeafPage.insert(document))
elif force:
db.InterfaceLeafPage.update_one(
{'id': self.id},
{'$set': {
'text': self.text,
'leaf_type': self.leaf_type,
'menu_path': self.menu_path,
'page': self.page
}})
return str(legacy.get('_id'))
return str(legacy.get('_id'))
@staticmethod
def update_doc(locate: dict, update: dict, specific_db = None):
if specific_db:
db = specific_db
else:
db = current_user.db
db.InterfaceLeafPage.update_one(locate, {'$set': update})
@staticmethod
def search(locate: dict, specific_db = None) -> dict:
if specific_db:
db = specific_db
else:
db = current_user.db
return db.InterfaceLeafPage.find_one(locate)
class AnonymousUser(AnonymousUserMixin):
"""
add permission check method to the default AnonymousUse (no login)
"""
def can(self, permission):
return False
def is_administrator(self):
return False
class Permission:
ACCESS = 0x01
ADMIN = 0x80
class Role:
def __init__(self, role: dict):
self.type = role.get('type')
self.permission = role.get('permission')
self.default = role.get('default')
def __repr__(self):
return str(self)
def __str__(self):
return f'<Role {self.type}>'
def add_role(self):
document = {
'type': self.type,
'permission': self.permission,
'default': self.default
}
Meta.db.Role.insert(document)
@staticmethod
def insert_roles():
roles = [
{'type': 'ACCESS',
'permission': Permission.ACCESS,
'default': True},
{'type': 'ADMIN',
'permission': 0xff,
'default': False}]
for role in roles:
if not Meta.db.Role.find_one({'type': role.get('type')}):
Meta.db.Role.insert(role)
class User:
"""
a base class for constructing user object
"""
def __init__(self, email: str, username: str, password: str,
location: str):
self.email = email
self.username = username
self.password = generate_password_hash(password)
self.location = location
self.avatar_hash = None
if self.email == current_app.config['SITE_ADMIN']:
self.role = Meta.db.Role.find_one(
{'permission': 0xff}).get('type')
else:
self.role = Meta.db.Role.find_one(
{'default': True}).get('type')
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8') +
str(datetime.utcnow()).encode('utf-8')).hexdigest()
def __repr__(self):
return str(self)
def __str__(self):
return f'<User {self.username}>'
def new(self) -> str:
document = {
'email': self.email,
'username': self.username,
'password': self.password, # store hash result
'is_confirmed': True, # FIXME: for test
'role': self.role,
'name': '',
'location': self.location,
'about_me': '',
'avatar_hash': self.avatar_hash,
'member_since': datetime.utcnow(),
'last_login': datetime.utcnow(),
'company': ''
}
return str(Meta.db.User.insert(document))
@staticmethod
def update_doc(locate: dict, update: dict):
Meta.db.User.update_one(locate, {'$set': update})
@staticmethod
def search(locate: dict) -> dict:
return Meta.db.User.find_one(locate)
@staticmethod
def save_misc(locate: dict, key: str, data: dict):
# save non-statistic data
if key != 'statistic':
Meta.db.User.update_one(locate, {'$set': {key: data}})
# save statistic data
else:
legacy = User.get_misc({'_id': ObjectId(current_user.id)},
'statistic')
# no legacy statistic data
if legacy is None:
Meta.db.User.update_one(locate, {'$set': {key: data}})
# has existing statistic data
else:
new_search_history = merge_dict(legacy.get('search_history'),
data.get('search_history'))
Meta.db.User.update_one(
locate,
{
# '$push': {
# 'statistic.search_history': {
# '$each': data.get('search_history')
# }
# },
'$set': {
'statistic.search_history': new_search_history,
'statistic.search_count': data.get('search_count') + legacy.get('search_count'),
'statistic.judge_count': data.get('judge_count') + legacy.get('judge_count'),
'statistic.failed_count': data.get('failed_count') + legacy.get('failed_count'),
'statistic.success_count': data.get('success_count') + legacy.get('success_count')
}
}
)
@staticmethod
def get_misc(locate: dict, key: str):
return Meta.db.User.find_one(locate).get(key)
@staticmethod
def login(locate: dict, logged):
Meta.db.User.update_one(locate, {'$set': {'logged': logged}})
class UserUtl(UserMixin):
"""
a utility class based from UserMixin for Flask 'current_user', operating
current logged user utilities on a global level
"""
def __init__(self, user: dict):
self.id = str(user.get('_id'))
self.email = user.get('email')
self.username = user.get('username')
self.password = user.get('password') # store hash result
self.is_confirmed = user.get('is_confirmed')
self.role = Role(Meta.db.Role.find_one({'type': user.get('role')}))
self.name = user.get('name')
self.location = user.get('location')
self.about_me = user.get('about_me')
self.avatar_hash = user.get('avatar_hash')
self.last_login = user.get('last_login')
self.member_since = user.get('member_since')
self.company = user.get('company')
self.db = Meta.db if self.company == MongoConfig.HOME else \
mongo_connect(self.company)
try:
self.jison = Jison(file_name=user.get('company'))
except:
# if user is login into an empty company (no database)
self.jison = Jison()
def __repr__(self):
return str(self)
def __str__(self):
return f'<CurrentUser {self.username}>'
def get_id(self):
"""
used by Flask's login_user() to retrieve logged user id, for a global
session usage. Callback function use id to load_user() for returning
the global 'current_user'
https://flask-login.readthedocs.io/en/latest/
"""
return self.id
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
return '{u}/{h}?s={s}&d={d}&r={r}'.format(u=url, h=self.avatar_hash,
s=size, d=default,
r=rating)
def can(self, permission):
return self.role is not None and (self.role.permission & permission) \
== permission
def is_administrator(self):
return self.can(Permission.ADMIN)
def ping(self):
Meta.db.User.update_one({'email': self.email},
{'$set': {
'last_login': datetime.utcnow()}})
def generate_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'ID': self.id})
# token == s.dumps(data)
# data == s.loads(token)
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'ID': self.id, 'new_email': new_email})
class Snippet():
def __init__(self, group: str, scenario: str, code: str):
self.group = group
self.scenario = scenario
self.code = code
def __repr__(self):
return str(self)
def __str__(self):
return f'<Snippet Scenario {self.scenario}>'
def new(self) -> tuple:
# check duplication, OK if only group name or scenario name is same
duplicated = False
group_dict = Meta.db.SnippetGroup.find_one({'name': self.group})
if group_dict:
# group existing, check scenario name
old_scenario_id_list = group_dict.get('scenarios')
for id in old_scenario_id_list:
if Meta.db.SnippetScenario.find_one(
{'_id': ObjectId(id)}).get('name') == self.scenario:
# both group and scenario are duplicated, you are in big
# trouble, skipped
duplicated = True
break
if not duplicated:
# insert new scenario
document = {
'name': self.scenario,
'group': self.group,
'code': self.code
}
scenario_id = str(Meta.db.SnippetScenario.insert(document))
if group_dict:
# update new scenario id into existing group
group_id = str(group_dict.get('_id'))
Meta.db.SnippetGroup.update_one({'name': self.group},
{'$push': {
'scenarios': scenario_id}})
else:
# insert new group for the new scenario
document = {
'name': self.group,
'scenarios': [scenario_id]
}
group_id = str(Meta.db.SnippetGroup.insert(document))
return group_id, scenario_id
else:
return None, None
@staticmethod
def new_group(doc: dict) -> str:
return str(Meta.db.SnippetGroup.insert(doc))
@staticmethod
def get_group_cursor(locate: dict, s_condition: list = None):
if s_condition and isinstance(s_condition, list):
return Meta.db.SnippetGroup.find(locate).sort(s_condition)
return Meta.db.SnippetGroup.find(locate)
@staticmethod
def get_scenario_cursor(locate: dict, s_condition: list = None):
if s_condition and isinstance(s_condition, list):
return Meta.db.SnippetScenario.find(locate).sort(s_condition)
return Meta.db.SnippetScenario.find(locate)
@staticmethod
def update_doc_group(locate: dict, update: dict):
Meta.db.SnippetGroup.update_one(locate, {'$set': update})
@staticmethod
def update_doc_scenario(locate: dict, update: dict):
Meta.db.SnippetScenario.update_one(locate, {'$set': update})
@staticmethod
def delete_doc_group(locate: dict):
Meta.db.SnippetGroup.delete_one(locate)
@staticmethod
def delete_doc_scenario(locate: dict):
Meta.db.SnippetScenario.delete_one(locate)
@staticmethod
def search_group(locate: dict) -> dict:
return Meta.db.SnippetGroup.find_one(locate)
@staticmethod
def search_scenario(locate: dict) -> dict:
return Meta.db.SnippetScenario.find_one(locate)
class Ticket():
def __init__(self, ticket: str, description: str):
self.ticket = ticket
self.description = description
def __repr__(self):
return str(self)
def __str__(self):
return f'<Ticket {self.ticket}>'
def new(self) -> str:
document = {
'ticket': self.ticket,
'description': self.ticket,
'timestamp': datetime.utcnow(),
'solved_timestamp': None,
'solved': False
}
return str(Meta.db.Ticket.insert(document))
login_manager.anonymous_user = AnonymousUser
"""
It is not mandator but there are permission test like is_administrator() and
can() in templates which are not defined to anonymous sessions (without login)
and Errors will be raised.
In class AnonymousUser(AnonymousUserMixin) these two permission methods are
defined and by setting login_manager.anonymous_user to class
AnonymousUser(AnonymousUserMixin) for making those operations available.
This can be tested by just commenting this line and check the Error message
"""
@login_manager.user_loader
def load_user(user_id):
"""
This callback is used to reload the user object from the user ID stored
in the session (as current_user)
"""
return UserUtl(Meta.db.User.find_one({'_id': ObjectId(user_id)}))
| 32.442225 | 110 | 0.538035 |
7e97164ad37af5d6a4a756996a2d164d752b44d4 | 437 | py | Python | dojo/rules/urls.py | mtcolman/django-DefectDojo | 76175aca446e077884bdb5e1d8e2a671a0840775 | [
"BSD-3-Clause"
] | 1,772 | 2018-01-22T23:32:15.000Z | 2022-03-31T14:49:33.000Z | dojo/rules/urls.py | mtcolman/django-DefectDojo | 76175aca446e077884bdb5e1d8e2a671a0840775 | [
"BSD-3-Clause"
] | 3,461 | 2018-01-20T19:12:28.000Z | 2022-03-31T17:14:39.000Z | dojo/rules/urls.py | mtcolman/django-DefectDojo | 76175aca446e077884bdb5e1d8e2a671a0840775 | [
"BSD-3-Clause"
] | 1,173 | 2018-01-23T07:10:23.000Z | 2022-03-31T14:40:43.000Z | from django.conf.urls import url
from dojo.rules import views
urlpatterns = [
url(r'^rules', views.rules, name='rules'),
url(r'^rule/add', views.new_rule, name='Add Rule'),
url(r'^rule/(?P<pid>\d+)/edit$', views.edit_rule,
name='Edit Rule'),
url(r'^rule/(?P<pid>\d+)/add_child', views.add_child,
name='Add Child'),
url(r'^rule/(?P<tid>\d+)/delete$', views.delete_rule,
name='Delete Rule'), ]
| 33.615385 | 57 | 0.606407 |
47dbe9513a9ab5d26c191ec7f2cb0c7972c78426 | 92 | py | Python | _2019/src/day_2/__init__.py | JHowell45/advent-of-code | c7bdd3881573f259af2cb826d0c77bbef0b3417e | [
"MIT"
] | null | null | null | _2019/src/day_2/__init__.py | JHowell45/advent-of-code | c7bdd3881573f259af2cb826d0c77bbef0b3417e | [
"MIT"
] | null | null | null | _2019/src/day_2/__init__.py | JHowell45/advent-of-code | c7bdd3881573f259af2cb826d0c77bbef0b3417e | [
"MIT"
] | null | null | null | from .puzzle_1 import day_2_puzzle_1_solution
from .puzzle_2 import day_2_puzzle_2_solution
| 30.666667 | 45 | 0.891304 |
6b5a99cbdcbd9e85cc95d39a8dcb8506132d69d6 | 1,713 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGSES_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGSES_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGSES_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoE_GSES(Base):
__slots__ = ()
_SDM_NAME = 'fCoEGSES'
_SDM_ATT_MAP = {
'FCoE Header': 'fCoEGSES.header.fcoeHeader',
'FC Header': 'fCoEGSES.header.fcHeader',
'FC_CT': 'fCoEGSES.header.fcCT',
'FCS': 'fCoEGSES.header.FCS',
'FC CRC': 'fCoEGSES.header.fcCRC',
'FC Trailer': 'fCoEGSES.header.fcTrailer',
}
def __init__(self, parent):
super(FCoE_GSES, self).__init__(parent)
@property
def FCoE_Header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCoE Header']))
@property
def FC_Header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC Header']))
@property
def FC_CT(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC_CT']))
@property
def FCS(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCS']))
@property
def FC_CRC(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC CRC']))
@property
def FC_Trailer(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FC Trailer']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 32.942308 | 86 | 0.68885 |
0e2e20279ce963054de869e54b4b76e2f73abf4b | 1,578 | py | Python | wlts/config.py | fabianazioti/wlts | e534b3af5e42e715d027af446db79bba6662af5a | [
"MIT"
] | 1 | 2020-02-25T20:02:02.000Z | 2020-02-25T20:02:02.000Z | wlts/config.py | fabianazioti/wlts | e534b3af5e42e715d027af446db79bba6662af5a | [
"MIT"
] | 33 | 2019-12-24T12:58:38.000Z | 2022-03-18T18:03:41.000Z | wlts/config.py | fabianazioti/wlts | e534b3af5e42e715d027af446db79bba6662af5a | [
"MIT"
] | 4 | 2019-10-01T13:16:43.000Z | 2021-02-12T18:10:04.000Z | #
# This file is part of Web Land Trajectory Service.
# Copyright (C) 2019-2020 INPE.
#
# Web Land Trajectory Service is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Brazil Data Cube Configuration."""
import os
from packaging import version as _version
from .version import __version__
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def get_settings(env):
"""Retrieve Config class from environment."""
return CONFIG.get(env)
class Config():
"""Base configuration with default flags."""
DEBUG = False
TESTING = False
CSRF_ENABLED = False
WTF_CSRF_ENABLED = False
SECRET_KEY = "APi-Users-123456"
SQLALCHEMY_TRACK_MODIFICATIONS = True
WLTS_URL = os.getenv('WLTS_URL', 'http://localhost:5000')
WLTS_API_VERSION = _version.parse(__version__).base_version
BDC_AUTH_CLIENT_SECRET = os.getenv("BDC_AUTH_CLIENT_SECRET", None)
BDC_AUTH_CLIENT_ID = os.getenv("BDC_AUTH_CLIENT_ID", None)
BDC_AUTH_ACCESS_TOKEN_URL = os.getenv("BDC_AUTH_ACCESS_TOKEN_URL", None)
class ProductionConfig(Config):
"""Production Mode."""
DEBUG = False
class DevelopmentConfig(Config):
"""Development Mode."""
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
"""Testing Mode (Continous Integration)."""
TESTING = True
DEBUG = True
key = Config.SECRET_KEY
CONFIG = {
"DevelopmentConfig": DevelopmentConfig(),
"ProductionConfig": ProductionConfig(),
"TestingConfig": TestingConfig()
}
| 23.205882 | 88 | 0.71673 |
96110a00c1b1b99aaa0f0352608a5eec1442b397 | 1,422 | py | Python | payments/migrations/0029_posteroption.py | jakereps/workshops.qiime2.org | 5941e4db8b63c3518db2b85d5c45afbea5781bfc | [
"BSD-3-Clause"
] | null | null | null | payments/migrations/0029_posteroption.py | jakereps/workshops.qiime2.org | 5941e4db8b63c3518db2b85d5c45afbea5781bfc | [
"BSD-3-Clause"
] | null | null | null | payments/migrations/0029_posteroption.py | jakereps/workshops.qiime2.org | 5941e4db8b63c3518db2b85d5c45afbea5781bfc | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('payments', '0028_rate_sales_open'),
]
operations = [
migrations.CreateModel(
name='PosterOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('sort_order', models.IntegerField(help_text='This value is used to sort the display order of the poster presentation options')),
('workshop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.Workshop')),
],
options={
'ordering': ('sort_order',),
},
),
migrations.AddField(
model_name='orderitem',
name='poster',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='payments.PosterOption'),
),
]
| 37.421053 | 145 | 0.552039 |
bf48a6f62b8ebf06cc41634a520ba4ac59f6570c | 9,955 | py | Python | mistral/context.py | Abnerzhao/mistral | 55c8d70536c8e9de03376b523e8dfa3c3cf7070f | [
"Apache-2.0"
] | null | null | null | mistral/context.py | Abnerzhao/mistral | 55c8d70536c8e9de03376b523e8dfa3c3cf7070f | [
"Apache-2.0"
] | null | null | null | mistral/context.py | Abnerzhao/mistral | 55c8d70536c8e9de03376b523e8dfa3c3cf7070f | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 - Mirantis, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from mistral_lib.actions import context as lib_ctx
from mistral_lib import serialization
from oslo_config import cfg
from oslo_context import context as oslo_context
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from osprofiler import profiler
import pecan
from pecan import hooks
from mistral import auth
from mistral import exceptions as exc
from mistral import utils
CONF = cfg.CONF
_CTX_THREAD_LOCAL_NAME = "MISTRAL_APP_CTX_THREAD_LOCAL"
ALLOWED_WITHOUT_AUTH = ['/', '/v2/', '/workflowv2/', '/workflowv2/v2/']
class MistralContext(oslo_context.RequestContext):
def __init__(self, auth_uri=None, auth_cacert=None, insecure=False,
service_catalog=None, region_name=None, is_trust_scoped=False,
redelivered=False, expires_at=None, trust_id=None,
is_target=False, **kwargs):
self.auth_uri = auth_uri
self.auth_cacert = auth_cacert
self.insecure = insecure
self.service_catalog = service_catalog
self.region_name = region_name
self.is_trust_scoped = is_trust_scoped
self.redelivered = redelivered
self.expires_at = expires_at
self.trust_id = trust_id
self.is_target = is_target
# We still use Mistral thread local variable. Maybe could consider
# using the variable provided by oslo_context in future.
super(MistralContext, self).__init__(overwrite=False, **kwargs)
def to_dict(self):
"""Return a dictionary of context attributes."""
ctx_dict = super(MistralContext, self).to_dict()
ctx_dict.update(
{
'user_name': self.user_name,
'project_name': self.project_name,
'domain_name': self.domain_name,
'user_domain_name': self.user_domain_name,
'project_domain_name': self.project_domain_name,
'auth_uri': self.auth_uri,
'auth_cacert': self.auth_cacert,
'insecure': self.insecure,
'service_catalog': self.service_catalog,
'region_name': self.region_name,
'is_trust_scoped': self.is_trust_scoped,
'redelivered': self.redelivered,
'expires_at': self.expires_at,
'trust_id': self.trust_id,
'is_target': self.is_target,
}
)
return ctx_dict
@classmethod
def from_dict(cls, values, **kwargs):
"""Construct a context object from a provided dictionary."""
kwargs.setdefault('auth_uri', values.get('auth_uri'))
kwargs.setdefault('auth_cacert', values.get('auth_cacert'))
kwargs.setdefault('insecure', values.get('insecure', False))
kwargs.setdefault('service_catalog', values.get('service_catalog'))
kwargs.setdefault('region_name', values.get('region_name'))
kwargs.setdefault(
'is_trust_scoped', values.get('is_trust_scoped', False)
)
kwargs.setdefault('redelivered', values.get('redelivered', False))
kwargs.setdefault('expires_at', values.get('expires_at'))
kwargs.setdefault('trust_id', values.get('trust_id'))
kwargs.setdefault('is_target', values.get('is_target', False))
return super(MistralContext, cls).from_dict(values, **kwargs)
@classmethod
def from_environ(cls, headers, env):
kwargs = _extract_mistral_auth_params(headers)
token_info = env.get('keystone.token_info', {})
if not kwargs['is_target']:
kwargs['service_catalog'] = token_info.get('token', {})
kwargs['expires_at'] = (token_info['token']['expires_at']
if token_info else None)
context = super(MistralContext, cls).from_environ(env, **kwargs)
context.is_admin = True if 'admin' in context.roles else False
return context
def has_ctx():
return utils.has_thread_local(_CTX_THREAD_LOCAL_NAME)
def ctx():
if not has_ctx():
raise exc.ApplicationContextNotFoundException()
return utils.get_thread_local(_CTX_THREAD_LOCAL_NAME)
def set_ctx(new_ctx):
utils.set_thread_local(_CTX_THREAD_LOCAL_NAME, new_ctx)
def _extract_mistral_auth_params(headers):
service_catalog = None
if headers.get("X-Target-Auth-Uri"):
insecure_header = headers.get('X-Target-Insecure', 'False')
if insecure_header == 'False':
insecure = False
elif insecure_header == 'True':
insecure = True
else:
raise (exc.MistralException(
'X-Target-Insecure must be either "True", "False" or not '
'provided. The default is "False".'))
params = {
# TODO(akovi): Target cert not handled yet
'auth_cacert': None,
'insecure': insecure,
'auth_token': headers.get('X-Target-Auth-Token'),
'auth_uri': headers.get('X-Target-Auth-Uri'),
'tenant': headers.get('X-Target-Project-Id'),
'user': headers.get('X-Target-User-Id'),
'user_name': headers.get('X-Target-User-Name'),
'region_name': headers.get('X-Target-Region-Name'),
'is_target': True
}
if not params['auth_token']:
raise (exc.MistralException(
'Target auth URI (X-Target-Auth-Uri) target auth token '
'(X-Target-Auth-Token) must be present'))
# It's possible that target service catalog is not provided, in this
# case, Mistral needs to get target service catalog dynamically when
# talking to target openstack deployment later on.
service_catalog = _extract_service_catalog_from_headers(
headers
)
else:
params = {
'auth_uri': CONF.keystone_authtoken.www_authenticate_uri,
'auth_cacert': CONF.keystone_authtoken.cafile,
'insecure': False,
'region_name': headers.get('X-Region-Name'),
'is_target': False
}
params['service_catalog'] = service_catalog
return params
def _extract_service_catalog_from_headers(headers):
target_service_catalog_header = headers.get(
'X-Target-Service-Catalog')
if target_service_catalog_header:
decoded_catalog = base64.b64decode(
target_service_catalog_header).decode()
return jsonutils.loads(decoded_catalog)
else:
return None
class RpcContextSerializer(messaging.Serializer):
def __init__(self, entity_serializer=None):
self.entity_serializer = (
entity_serializer or serialization.get_polymorphic_serializer()
)
def serialize_entity(self, context, entity):
if not self.entity_serializer:
return entity
return self.entity_serializer.serialize(entity)
def deserialize_entity(self, context, entity):
if not self.entity_serializer:
return entity
return self.entity_serializer.deserialize(entity)
def serialize_context(self, context):
ctx = context.to_dict()
pfr = profiler.get()
if pfr:
ctx['trace_info'] = {
"hmac_key": pfr.hmac_key,
"base_id": pfr.get_base_id(),
"parent_id": pfr.get_id()
}
return ctx
def deserialize_context(self, context):
trace_info = context.pop('trace_info', None)
if trace_info:
profiler.init(**trace_info)
ctx = MistralContext.from_dict(context)
set_ctx(ctx)
return ctx
class AuthHook(hooks.PecanHook):
def before(self, state):
if state.request.path in ALLOWED_WITHOUT_AUTH:
return
if not CONF.pecan.auth_enable:
return
try:
auth_handler = auth.get_auth_handler()
auth_handler.authenticate(state.request)
except Exception as e:
msg = "Failed to validate access token: %s" % str(e)
pecan.abort(
status_code=401,
detail=msg,
headers={'Server-Error-Message': msg, "WWW-Authenticate": msg}
)
class ContextHook(hooks.PecanHook):
def before(self, state):
context = MistralContext.from_environ(
state.request.headers, state.request.environ
)
set_ctx(context)
def after(self, state):
set_ctx(None)
def create_action_context(execution_ctx):
context = ctx()
security_ctx = lib_ctx.SecurityContext(
auth_cacert=context.auth_cacert,
auth_token=context.auth_token,
auth_uri=context.auth_uri,
expires_at=context.expires_at,
insecure=context.insecure,
is_target=context.is_target,
is_trust_scoped=context.is_trust_scoped,
project_id=context.project_id,
project_name=context.project_name,
user_name=context.user_name,
redelivered=context.redelivered,
region_name=context.region_name,
service_catalog=context.service_catalog,
trust_id=context.trust_id,
)
ex_ctx = lib_ctx.ExecutionContext(**execution_ctx)
return lib_ctx.ActionContext(security_ctx, ex_ctx)
| 33.745763 | 79 | 0.641185 |
9a7fa71b251c7907482ed1b5fc772010bb16a349 | 3,861 | py | Python | .history/my_classes/FirstClassFunctions/reducing_functions_20210707182813.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707182813.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707182813.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example: Finding the maximum value in an iterable
a0, a1, a2, ...,, aN-1
max(a, b) _> maximum of a and b
result =a0
result = max(result, a1)
result = max(result, a2)
...
result = max(result, an-1)
# max value in a0, a1, a2, ..., an-1
the special case of sequences
(i.e. we can use indexes to access elements in the sequence)
Using a loop
"""
from msilib import sequence
from unittest import result
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
Notice the sequence of steps:
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
l = [5, 8, 6, 10, 9]
^ | | | |
| | |
5 | |
\ | |
max(5, 8) | | |
8 |
\ |
\ |
max(8, 6)
8 | |
\
max(8, 10)
10
\ |
max(10, 9)
10
result -> 10
To caculate the min: # I just need to change (max) to (min)
l = l[5, 8, 6, 10, 9] # result = 5
min_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def min_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = min_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
# I could just write:
def _reduce(fn, sequence):
result = sequence[0
for x in sequence[1:]]:
result = fn(result, x)
return result
_reduce(lambda a, b: a if a > b else b, l) # maximum
_reduce(lambda a, b: a if a < b else b, l) # minimum
# Adding all the elements to a list
add = lambda a, b: a+b
# result = 5
l = [5, 8, 6, 10, 9]
# result = add(5, 8) = 13
# result = add(13, 6) = 19
def _reduce(fn, sequence): # result = add(19, 10) = 29
result = sequence[0]
for x in sequence[1:]: # result = add(29. 9) = 38
result = fn(result, x)
return result # result = 38
_reduce(add. l)
""" The functools module
Pthon implements a reduce function that will handle any iterable, but works similarly to what I just saw.
"""
from functools import reduce
l = [5, 8, 6, 10, 9]
reduce(lambda a, b: a if a > b else b, l) # max -> 10
reduce(lambda a, b: a if a < b else b, l) # min -> 5
reduce(lambda a, b: a if a + b, l) # sum -> 38
# reduce works on any iterable
reduce(lambda a, b: a if a < b else b, {10, 5, 2, 4}) # 2
reduce(lambda a, b: a if a < b else b, 'python') # h
reduce(lambda a, b: a + ' ' + b else b, ('python', 'is', 'awesome')) # 'python is awesome'
"""Built-in Reducing Function"""
| 25.912752 | 105 | 0.480186 |
1954821f68c7f9c8e058c00408b92bd445e46ac8 | 1,338 | py | Python | test/devvyn/scrape/model/test_course_catalogue.py | devvyn/knowledge-mapper | 441d34db04c8ca8892dade2a64983635e39b728c | [
"MIT"
] | 1 | 2019-11-21T17:48:52.000Z | 2019-11-21T17:48:52.000Z | test/devvyn/scrape/model/test_course_catalogue.py | devvyn/usask-scrape-course-prerequisites | 441d34db04c8ca8892dade2a64983635e39b728c | [
"MIT"
] | 8 | 2019-10-07T05:31:42.000Z | 2019-11-29T01:31:02.000Z | test/devvyn/scrape/model/test_course_catalogue.py | devvyn/knowledge-mapper | 441d34db04c8ca8892dade2a64983635e39b728c | [
"MIT"
] | null | null | null | """
Tests for course dictionary model.
The model must be able to:
- Find official course description online, given only a course code.
- Understand course codes in various formats:
- PSY 120
- PSY-120
- PSY 120.3
- PSY-120.3
- Parse course prerequisite descriptions, producing:
- list of course codes mentioned
- remaining content fragments that could not be parsed
"""
import pytest
from devvyn.model.course_catalogue import Code
class TestCourseCode:
def test_course_parse_code_invalid(self):
with pytest.raises(ValueError):
assert str(Code(code='PSY')) != 'PSY'
def test_course_parse_code_full_hyphen(self):
code = Code(code='PSY-120.3')
assert str(code) == 'PSY-120.3'
assert str(code.subject) == 'PSY'
assert int(code.number) == 120
assert int(code.credit) == 3
def test_course_parse_code_full_space(self):
code = Code(code='PSY 120.3')
assert str(code) == 'PSY-120.3'
assert str(code.subject) == 'PSY'
assert int(code.number) == 120
assert int(code.credit) == 3
def test_course_parse_code_partial(self):
code = Code(code='PSY-120')
assert str(code) == 'PSY-120'
assert str(code.subject) == 'PSY'
assert int(code.number) == 120
assert code.credit is None
| 27.875 | 68 | 0.650224 |
9030d8d66a1dc015479633488578b970049be6c2 | 156 | py | Python | sender.py | pavlovajane/python-messenger | a6294b5390c764f4b3eb7efca20d0432af82317e | [
"MIT"
] | null | null | null | sender.py | pavlovajane/python-messenger | a6294b5390c764f4b3eb7efca20d0432af82317e | [
"MIT"
] | null | null | null | sender.py | pavlovajane/python-messenger | a6294b5390c764f4b3eb7efca20d0432af82317e | [
"MIT"
] | null | null | null | import requests
while True:
text = input()
requests.post(
'http://127.0.0.1:5000/send',
json={'text': text, 'name': 'admin'}
)
| 17.333333 | 44 | 0.532051 |
d4cea12adc4a39582414a9304cffab96e394688e | 3,880 | py | Python | terraform/cloud-functions/alice-func/index.py | chaos-adept/iot-dipstick | 27a543b6673e8f0f04c2bdf0e31e5f3760f8b45c | [
"Apache-2.0"
] | 2 | 2021-02-19T21:08:36.000Z | 2021-04-02T09:38:10.000Z | terraform/cloud-functions/alice-func/index.py | chaos-adept/iot-dipstick | 27a543b6673e8f0f04c2bdf0e31e5f3760f8b45c | [
"Apache-2.0"
] | 35 | 2021-01-11T06:52:24.000Z | 2021-06-15T06:59:00.000Z | terraform/cloud-functions/alice-func/index.py | chaos-adept/iot-dipstick | 27a543b6673e8f0f04c2bdf0e31e5f3760f8b45c | [
"Apache-2.0"
] | null | null | null | import os
import logging
import datetime as dt
import json
import re
import paho.mqtt.publish as publish
import requests
from functools import reduce
logger = logging.getLogger()
logger.setLevel(logging.INFO)
lightCmdRegexp = re.compile("(свет)|(вкл)|(выкл)")
lightCMDTurnOffRegexp = re.compile("выкл")
weatherForecastHoursInterval = 3
def is_verbose_logging_enabled() -> bool:
return os.getenv('VERBOSE_LOG') == "True"
METRICS_PUSH_URL = 'https://monitoring.api.cloud.yandex.net/monitoring/v2/data/read'
METRICS_SERVICE = 'custom'
if is_verbose_logging_enabled():
logger.info('Loading msgHandler function')
def getWheather(iamToken):
if is_verbose_logging_enabled():
logger.info(f'get weather metrics')
folderId = os.getenv('METRICS_FOLDER_ID')
minutesInterval = 60 * weatherForecastHoursInterval
toTime = dt.datetime.utcnow().replace(microsecond=0)
fromTime = toTime - dt.timedelta(minutes=minutesInterval)
requestBody = {
"query": "\"Temperature\"{service=\"custom\", device_id=\"*\"}",
"fromTime": f"{fromTime.isoformat()}Z",
"toTime": f"{toTime.isoformat()}Z",
"downsampling": {
"gridAggregation": "LAST",
"gapFilling": "NONE",
"gridInterval": minutesInterval * 1000 * 60
}
}
if is_verbose_logging_enabled():
logger.info(f'Metrics request: {requestBody}')
resp = requests.post(
METRICS_PUSH_URL,
json=requestBody,
headers={"Authorization": "Bearer " + iamToken},
params={"folderId": folderId, "service": METRICS_SERVICE}
)
if is_verbose_logging_enabled():
logger.info(f'Metrics response: {resp}')
logger.info(f'Metrics response.content: {resp.content}')
metrics = json.loads(resp.content)["metrics"]
temperatureValuesParitions = map(lambda item: item["timeseries"]["doubleValues"], metrics)
temperatureValues = reduce(lambda a, b: a + b, temperatureValuesParitions)
if not temperatureValues:
return f"нет данных о температуре за последние {weatherForecastHoursInterval} часа"
temperature = temperatureValues[0]
text = f"температура {temperature} градусов."
if is_verbose_logging_enabled():
logger.info(f'temperature: {temperature}')
return text
def sentLightCmd(isOn):
ca_certs = os.getenv('CA_CERT')
registryId = os.getenv('REGISTRY_ID')
auth = {'username': registryId, 'password': os.getenv('REGISTRY_PASSWORD')}
topic = f"$registries/{registryId}/commands/light"
publish.single(topic, payload=isOn, qos=0, retain=False, hostname="mqtt.cloud.yandex.net",
port=8883, client_id="alice-func", keepalive=2, will=None, auth=auth, tls={'ca_certs': ca_certs})
state = "включен" if isOn else "выключен"
return f"свет: {state}"
"""
Entry-point for Serverless Function.
:param event: IoT message payload.
:param context: information about current execution context.
:return: successfull response statusCode: 200
"""
def msgHandler(event, context):
statusCode = 500 ## Error response by default
iamToken = context.token["access_token"]
if is_verbose_logging_enabled():
logger.info(event)
logger.info(context)
logger.info(f"iamToken: {iamToken}")
msg = event["request"]
session = event["session"]
version = event["version"]
if is_verbose_logging_enabled():
logger.info(f'msg[0]: {msg}')
tokens = msg["nlu"]["tokens"]
if (any(lightCmdRegexp.match(s) for s in tokens)):
isOn = not any(lightCMDTurnOffRegexp.match(s) for s in tokens)
text = sentLightCmd(isOn)
else:
text = getWheather(iamToken)
return {
'version': version,
'session': session,
'response': {
'text': text,
'end_session': False
}
} | 30.793651 | 116 | 0.665722 |
1af4fc52d8c571682f8c54383827d508decf1962 | 150 | py | Python | ecom/cart/context_processors.py | archeski/ecom | fb09730146ceb51894cc67fa3411ce4cd1b1b9fb | [
"MIT"
] | null | null | null | ecom/cart/context_processors.py | archeski/ecom | fb09730146ceb51894cc67fa3411ce4cd1b1b9fb | [
"MIT"
] | 6 | 2020-06-05T18:35:05.000Z | 2022-03-11T23:26:37.000Z | ecom/cart/context_processors.py | arturkuchynski/ecom | fb09730146ceb51894cc67fa3411ce4cd1b1b9fb | [
"MIT"
] | null | null | null | from .cart import Cart
def cart(request):
"""
Pass a cart object when use this context processor
"""
return {'cart': Cart(request)}
| 16.666667 | 54 | 0.633333 |
14cfe094fa651ac68911a92906949d51dc340df7 | 24,623 | py | Python | ironic/common/exception.py | markbeierl/ironic | bcf5b37c736bc36abe94489c366fe26f198a7e7a | [
"Apache-2.0"
] | null | null | null | ironic/common/exception.py | markbeierl/ironic | bcf5b37c736bc36abe94489c366fe26f198a7e7a | [
"Apache-2.0"
] | null | null | null | ironic/common/exception.py | markbeierl/ironic | bcf5b37c736bc36abe94489c366fe26f198a7e7a | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic base exception handling.
SHOULD include dedicated exception logging.
"""
import collections
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from ironic.common.i18n import _
from ironic.conf import CONF
LOG = logging.getLogger(__name__)
def _ensure_exception_kwargs_serializable(exc_class_name, kwargs):
"""Ensure that kwargs are serializable
Ensure that all kwargs passed to exception constructor can be passed over
RPC, by trying to convert them to JSON, or, as a last resort, to string.
If it is not possible, unserializable kwargs will be removed, letting the
receiver to handle the exception string as it is configured to.
:param exc_class_name: an IronicException class name.
:param kwargs: a dictionary of keyword arguments passed to the exception
constructor.
:returns: a dictionary of serializable keyword arguments.
"""
serializers = [(jsonutils.dumps, _('when converting to JSON')),
(six.text_type, _('when converting to string'))]
exceptions = collections.defaultdict(list)
serializable_kwargs = {}
for k, v in kwargs.items():
for serializer, msg in serializers:
try:
serializable_kwargs[k] = serializer(v)
exceptions.pop(k, None)
break
except Exception as e:
exceptions[k].append(
'(%(serializer_type)s) %(e_type)s: %(e_contents)s' %
{'serializer_type': msg, 'e_contents': e,
'e_type': e.__class__.__name__})
if exceptions:
LOG.error("One or more arguments passed to the %(exc_class)s "
"constructor as kwargs can not be serialized. The "
"serialized arguments: %(serialized)s. These "
"unserialized kwargs were dropped because of the "
"exceptions encountered during their "
"serialization:\n%(errors)s",
dict(errors=';\n'.join("%s: %s" % (k, '; '.join(v))
for k, v in exceptions.items()),
exc_class=exc_class_name,
serialized=serializable_kwargs))
# We might be able to actually put the following keys' values into
# format string, but there is no guarantee, drop it just in case.
for k in exceptions:
del kwargs[k]
return serializable_kwargs
class IronicException(Exception):
"""Base Ironic Exception
To correctly use this class, inherit from it and define
a '_msg_fmt' property. That message will get printf'd
with the keyword arguments provided to the constructor.
If you need to access the message from an exception you should use
six.text_type(exc)
"""
_msg_fmt = _("An unknown exception occurred.")
code = http_client.INTERNAL_SERVER_ERROR
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = _ensure_exception_kwargs_serializable(
self.__class__.__name__, kwargs)
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
else:
self.code = int(kwargs['code'])
if not message:
try:
message = self._msg_fmt % kwargs
except Exception as e:
# kwargs doesn't match a variable in self._msg_fmt
# log the issue and the kwargs
prs = ', '.join('%s: %s' % pair for pair in kwargs.items())
LOG.exception('Exception in string format operation '
'(arguments %s)', prs)
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core self._msg_fmt out if something
# happened
message = self._msg_fmt
super(IronicException, self).__init__(message)
def __str__(self):
"""Encode to utf-8 then wsme api can consume it as well."""
if not six.PY3:
return six.text_type(self.args[0]).encode('utf-8')
return self.args[0]
def __unicode__(self):
"""Return a unicode representation of the exception message."""
return six.text_type(self.args[0])
class NotAuthorized(IronicException):
_msg_fmt = _("Not authorized.")
code = http_client.FORBIDDEN
class OperationNotPermitted(NotAuthorized):
_msg_fmt = _("Operation not permitted.")
class Invalid(IronicException):
_msg_fmt = _("Unacceptable parameters.")
code = http_client.BAD_REQUEST
class Conflict(IronicException):
_msg_fmt = _('Conflict.')
code = http_client.CONFLICT
class TemporaryFailure(IronicException):
_msg_fmt = _("Resource temporarily unavailable, please retry.")
code = http_client.SERVICE_UNAVAILABLE
class NotAcceptable(IronicException):
# TODO(deva): We need to set response headers in the API for this exception
_msg_fmt = _("Request not acceptable.")
code = http_client.NOT_ACCEPTABLE
class InvalidState(Conflict):
_msg_fmt = _("Invalid resource state.")
class NodeAlreadyExists(Conflict):
_msg_fmt = _("A node with UUID %(uuid)s already exists.")
class MACAlreadyExists(Conflict):
_msg_fmt = _("A port with MAC address %(mac)s already exists.")
class ChassisAlreadyExists(Conflict):
_msg_fmt = _("A chassis with UUID %(uuid)s already exists.")
class PortAlreadyExists(Conflict):
_msg_fmt = _("A port with UUID %(uuid)s already exists.")
class PortgroupAlreadyExists(Conflict):
_msg_fmt = _("A portgroup with UUID %(uuid)s already exists.")
class PortgroupDuplicateName(Conflict):
_msg_fmt = _("A portgroup with name %(name)s already exists.")
class PortgroupMACAlreadyExists(Conflict):
_msg_fmt = _("A portgroup with MAC address %(mac)s already exists.")
class InstanceAssociated(Conflict):
_msg_fmt = _("Instance %(instance_uuid)s is already associated with a "
"node, it cannot be associated with this other node %(node)s")
class DuplicateName(Conflict):
_msg_fmt = _("A node with name %(name)s already exists.")
class VolumeConnectorAlreadyExists(Conflict):
_msg_fmt = _("A volume connector with UUID %(uuid)s already exists.")
class VolumeConnectorTypeAndIdAlreadyExists(Conflict):
_msg_fmt = _("A volume connector with type %(type)s and connector ID "
"%(connector_id)s already exists.")
class VolumeTargetAlreadyExists(Conflict):
_msg_fmt = _("A volume target with UUID %(uuid)s already exists.")
class VolumeTargetBootIndexAlreadyExists(Conflict):
_msg_fmt = _("A volume target with boot index '%(boot_index)s' "
"for the same node already exists.")
class VifAlreadyAttached(Conflict):
_msg_fmt = _("Unable to attach VIF because VIF %(vif)s is already "
"attached to Ironic %(object_type)s %(object_uuid)s")
class NoFreePhysicalPorts(Invalid):
_msg_fmt = _("Unable to attach VIF %(vif)s, not "
"enough free physical ports.")
class VifNotAttached(Invalid):
_msg_fmt = _("Unable to detach VIF %(vif)s from node %(node)s "
"because it is not attached to it.")
class InvalidUUID(Invalid):
_msg_fmt = _("Expected a UUID but received %(uuid)s.")
class InvalidUuidOrName(Invalid):
_msg_fmt = _("Expected a logical name or UUID but received %(name)s.")
class InvalidName(Invalid):
_msg_fmt = _("Expected a logical name but received %(name)s.")
class InvalidConductorGroup(Invalid):
_msg_fmt = _("Expected a conductor group but received %(group)s.")
class InvalidIdentity(Invalid):
_msg_fmt = _("Expected a UUID or int but received %(identity)s.")
class InvalidMAC(Invalid):
_msg_fmt = _("Expected a MAC address but received %(mac)s.")
class InvalidSwitchID(Invalid):
_msg_fmt = _("Expected a MAC address or OpenFlow datapath ID but "
"received %(switch_id)s.")
class InvalidDatapathID(Invalid):
_msg_fmt = _("Expected an OpenFlow datapath ID but received "
"%(datapath_id)s.")
class InvalidStateRequested(Invalid):
_msg_fmt = _('The requested action "%(action)s" can not be performed '
'on node "%(node)s" while it is in state "%(state)s".')
class PatchError(Invalid):
_msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
class InstanceDeployFailure(IronicException):
_msg_fmt = _("Failed to deploy instance: %(reason)s")
class ImageUnacceptable(IronicException):
_msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageConvertFailed(IronicException):
_msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
_msg_fmt = "%(err)s"
class MissingParameterValue(InvalidParameterValue):
_msg_fmt = "%(err)s"
class Duplicate(IronicException):
_msg_fmt = _("Resource already exists.")
class NotFound(IronicException):
_msg_fmt = _("Resource could not be found.")
code = http_client.NOT_FOUND
class DHCPLoadError(IronicException):
_msg_fmt = _("Failed to load DHCP provider %(dhcp_provider_name)s, "
"reason: %(reason)s")
# TODO(dtantsur): word "driver" is overused in class names here, and generally
# means stevedore driver, not ironic driver. Rename them in the future.
class DriverNotFound(NotFound):
_msg_fmt = _("Could not find the following driver(s) or hardware type(s): "
"%(driver_name)s.")
class DriverNotFoundInEntrypoint(DriverNotFound):
_msg_fmt = _("Could not find the following items in the "
"'%(entrypoint)s' entrypoint: %(names)s.")
class InterfaceNotFoundInEntrypoint(InvalidParameterValue):
_msg_fmt = _("Could not find the following interface in the "
"'%(entrypoint)s' entrypoint: %(iface)s. Valid interfaces "
"are %(valid)s.")
class IncompatibleInterface(InvalidParameterValue):
_msg_fmt = _("%(interface_type)s interface implementation "
"'%(interface_impl)s' is not supported by hardware type "
"%(hardware_type)s.")
class NoValidDefaultForInterface(InvalidParameterValue):
# NOTE(rloo): in the line below, there is no blank space after 'For'
# because node_info could be an empty string. If node_info
# is not empty, it should start with a space.
_msg_fmt = _("For%(node_info)s hardware type '%(driver)s', no default "
"value found for %(interface_type)s interface.")
class ImageNotFound(NotFound):
_msg_fmt = _("Image %(image_id)s could not be found.")
class NoValidHost(NotFound):
_msg_fmt = _("No valid host was found. Reason: %(reason)s")
class InstanceNotFound(NotFound):
_msg_fmt = _("Instance %(instance)s could not be found.")
class InputFileError(IronicException):
_msg_fmt = _("Error with file %(file_name)s. Reason: %(reason)s")
class NodeNotFound(NotFound):
_msg_fmt = _("Node %(node)s could not be found.")
class PortgroupNotFound(NotFound):
_msg_fmt = _("Portgroup %(portgroup)s could not be found.")
class PortgroupNotEmpty(Invalid):
_msg_fmt = _("Cannot complete the requested action because portgroup "
"%(portgroup)s contains ports.")
class NodeAssociated(InvalidState):
_msg_fmt = _("Node %(node)s is associated with instance %(instance)s.")
class PortNotFound(NotFound):
_msg_fmt = _("Port %(port)s could not be found.")
class FailedToUpdateDHCPOptOnPort(IronicException):
_msg_fmt = _("Update DHCP options on port: %(port_id)s failed.")
class FailedToCleanDHCPOpts(IronicException):
_msg_fmt = _("Clean up DHCP options on node: %(node)s failed.")
class FailedToGetIPAddressOnPort(IronicException):
_msg_fmt = _("Retrieve IP address on port: %(port_id)s failed.")
class InvalidIPv4Address(IronicException):
_msg_fmt = _("Invalid IPv4 address %(ip_address)s.")
class FailedToUpdateMacOnPort(IronicException):
_msg_fmt = _("Update MAC address on port: %(port_id)s failed.")
class ChassisNotFound(NotFound):
_msg_fmt = _("Chassis %(chassis)s could not be found.")
class VolumeConnectorNotFound(NotFound):
_msg_fmt = _("Volume connector %(connector)s could not be found.")
class VolumeTargetNotFound(NotFound):
_msg_fmt = _("Volume target %(target)s could not be found.")
class NoDriversLoaded(IronicException):
_msg_fmt = _("Conductor %(conductor)s cannot be started "
"because no hardware types were loaded.")
class ConductorNotFound(NotFound):
_msg_fmt = _("Conductor %(conductor)s could not be found.")
class ConductorAlreadyRegistered(IronicException):
_msg_fmt = _("Conductor %(conductor)s already registered.")
class ConductorHardwareInterfacesAlreadyRegistered(IronicException):
_msg_fmt = _("At least one of these (hardware type %(hardware_type)s, "
"interface type %(interface_type)s, interfaces "
"%(interfaces)s) combinations are already registered for "
"this conductor.")
class PowerStateFailure(InvalidState):
_msg_fmt = _("Failed to set node power state to %(pstate)s.")
class ExclusiveLockRequired(NotAuthorized):
_msg_fmt = _("An exclusive lock is required, "
"but the current context has a shared lock.")
class NodeMaintenanceFailure(Invalid):
_msg_fmt = _("Failed to toggle maintenance-mode flag "
"for node %(node)s: %(reason)s")
class NodeConsoleNotEnabled(Invalid):
_msg_fmt = _("Console access is not enabled on node %(node)s")
class NodeInMaintenance(Invalid):
_msg_fmt = _("The %(op)s operation can't be performed on node "
"%(node)s because it's in maintenance mode.")
class ChassisNotEmpty(Invalid):
_msg_fmt = _("Cannot complete the requested action because chassis "
"%(chassis)s contains nodes.")
class IPMIFailure(IronicException):
_msg_fmt = _("IPMI call failed: %(cmd)s.")
class UnsupportedDriverExtension(Invalid):
_msg_fmt = _('Driver %(driver)s does not support %(extension)s '
'(disabled or not implemented).')
class GlanceConnectionFailed(IronicException):
_msg_fmt = _("Connection to glance endpoint %(endpoint)s failed: "
"%(reason)s")
class ImageNotAuthorized(NotAuthorized):
_msg_fmt = _("Not authorized for image %(image_id)s.")
class InvalidImageRef(Invalid):
_msg_fmt = _("Invalid image href %(image_href)s.")
class ImageRefValidationFailed(IronicException):
_msg_fmt = _("Validation of image href %(image_href)s failed, "
"reason: %(reason)s")
class ImageDownloadFailed(IronicException):
_msg_fmt = _("Failed to download image %(image_href)s, reason: %(reason)s")
class KeystoneUnauthorized(IronicException):
_msg_fmt = _("Not authorized in Keystone.")
class KeystoneFailure(IronicException):
pass
class CatalogNotFound(IronicException):
_msg_fmt = _("Service type %(service_type)s with endpoint type "
"%(endpoint_type)s not found in keystone service catalog.")
class ServiceUnavailable(IronicException):
_msg_fmt = _("Connection failed")
class Forbidden(IronicException):
_msg_fmt = _("Requested OpenStack Images API is forbidden")
class BadRequest(IronicException):
pass
class InvalidEndpoint(IronicException):
_msg_fmt = _("The provided endpoint is invalid")
class CommunicationError(IronicException):
_msg_fmt = _("Unable to communicate with the server.")
class HTTPForbidden(NotAuthorized):
_msg_fmt = _("Access was denied to the following resource: %(resource)s")
class Unauthorized(IronicException):
pass
class HTTPNotFound(NotFound):
pass
class ConfigNotFound(IronicException):
_msg_fmt = _("Could not find config at %(path)s")
class NodeLocked(Conflict):
_msg_fmt = _("Node %(node)s is locked by host %(host)s, please retry "
"after the current operation is completed.")
class NodeNotLocked(Invalid):
_msg_fmt = _("Node %(node)s found not to be locked on release")
class NoFreeConductorWorker(TemporaryFailure):
_msg_fmt = _('Requested action cannot be performed due to lack of free '
'conductor workers.')
code = http_client.SERVICE_UNAVAILABLE
class VendorPassthruException(IronicException):
pass
class ConfigInvalid(IronicException):
_msg_fmt = _("Invalid configuration file. %(error_msg)s")
class DriverLoadError(IronicException):
_msg_fmt = _("Driver, hardware type or interface %(driver)s could not be "
"loaded. Reason: %(reason)s.")
class DriverOperationError(IronicException):
_msg_fmt = _("Runtime driver %(driver)s failure. Reason: %(reason)s.")
class ConsoleError(IronicException):
pass
class NoConsolePid(ConsoleError):
_msg_fmt = _("Could not find pid in pid file %(pid_path)s")
class ConsoleSubprocessFailed(ConsoleError):
_msg_fmt = _("Console subprocess failed to start. %(error)s")
class PasswordFileFailedToCreate(IronicException):
_msg_fmt = _("Failed to create the password file. %(error)s")
class IloOperationError(DriverOperationError):
_msg_fmt = _("%(operation)s failed, error: %(error)s")
class IloOperationNotSupported(DriverOperationError):
_msg_fmt = _("%(operation)s not supported. error: %(error)s")
class DracOperationError(DriverOperationError):
_msg_fmt = _('DRAC operation failed. Reason: %(error)s')
class FailedToGetSensorData(IronicException):
_msg_fmt = _("Failed to get sensor data for node %(node)s. "
"Error: %(error)s")
class FailedToParseSensorData(IronicException):
_msg_fmt = _("Failed to parse sensor data for node %(node)s. "
"Error: %(error)s")
class InsufficientDiskSpace(IronicException):
_msg_fmt = _("Disk volume where '%(path)s' is located doesn't have "
"enough disk space. Required %(required)d MiB, "
"only %(actual)d MiB available space present.")
class ImageCreationFailed(IronicException):
_msg_fmt = _('Creating %(image_type)s image failed: %(error)s')
class SwiftOperationError(IronicException):
_msg_fmt = _("Swift operation '%(operation)s' failed: %(error)s")
class SwiftObjectNotFoundError(SwiftOperationError):
_msg_fmt = _("Swift object %(obj)s from container %(container)s "
"not found. Operation '%(operation)s' failed.")
class SNMPFailure(DriverOperationError):
_msg_fmt = _("SNMP operation '%(operation)s' failed: %(error)s")
class FileSystemNotSupported(IronicException):
_msg_fmt = _("Failed to create a file system. "
"File system %(fs)s is not supported.")
class IRMCOperationError(DriverOperationError):
_msg_fmt = _('iRMC %(operation)s failed. Reason: %(error)s')
class IRMCSharedFileSystemNotMounted(DriverOperationError):
_msg_fmt = _("iRMC shared file system '%(share)s' is not mounted.")
class HardwareInspectionFailure(IronicException):
_msg_fmt = _("Failed to inspect hardware. Reason: %(error)s")
class NodeCleaningFailure(IronicException):
_msg_fmt = _("Failed to clean node %(node)s: %(reason)s")
class PathNotFound(IronicException):
_msg_fmt = _("Path %(dir)s does not exist.")
class DirectoryNotWritable(IronicException):
_msg_fmt = _("Directory %(dir)s is not writable.")
class UcsOperationError(DriverOperationError):
_msg_fmt = _("Cisco UCS client: operation %(operation)s failed for node"
" %(node)s. Reason: %(error)s")
class UcsConnectionError(IronicException):
_msg_fmt = _("Cisco UCS client: connection failed for node "
"%(node)s. Reason: %(error)s")
class ImageUploadFailed(IronicException):
_msg_fmt = _("Failed to upload %(image_name)s image to web server "
"%(web_server)s, reason: %(reason)s")
class CIMCException(DriverOperationError):
_msg_fmt = _("Cisco IMC exception occurred for node %(node)s: %(error)s")
class NodeTagNotFound(IronicException):
_msg_fmt = _("Node %(node_id)s doesn't have a tag '%(tag)s'")
class NetworkError(IronicException):
_msg_fmt = _("Network operation failure.")
class IncompleteLookup(Invalid):
_msg_fmt = _("At least one of 'addresses' and 'node_uuid' parameters "
"is required")
class NotificationSchemaObjectError(IronicException):
_msg_fmt = _("Expected object %(obj)s when populating notification payload"
" but got object %(source)s")
class NotificationSchemaKeyError(IronicException):
_msg_fmt = _("Object %(obj)s doesn't have the field \"%(field)s\" "
"required for populating notification schema key "
"\"%(key)s\"")
class NotificationPayloadError(IronicException):
_msg_fmt = _("Payload not populated when trying to send notification "
"\"%(class_name)s\"")
class StorageError(IronicException):
_msg_fmt = _("Storage operation failure.")
class RedfishError(DriverOperationError):
_msg_fmt = _("Redfish exception occurred. Error: %(error)s")
class RedfishConnectionError(RedfishError):
_msg_fmt = _("Redfish connection failed for node %(node)s: %(error)s")
class PortgroupPhysnetInconsistent(IronicException):
_msg_fmt = _("Port group %(portgroup)s has member ports with inconsistent "
"physical networks (%(physical_networks)s). All ports in a "
"port group must have the same physical network.")
class VifInvalidForAttach(Conflict):
_msg_fmt = _("Unable to attach VIF %(vif)s to node %(node)s. Reason: "
"%(reason)s")
class AgentAPIError(IronicException):
_msg_fmt = _('Agent API for node %(node)s returned HTTP status code '
'%(status)s with error: %(error)s')
class NodeTraitNotFound(NotFound):
_msg_fmt = _("Node %(node_id)s doesn't have a trait '%(trait)s'")
class InstanceRescueFailure(IronicException):
_msg_fmt = _('Failed to rescue instance %(instance)s for node '
'%(node)s: %(reason)s')
class InstanceUnrescueFailure(IronicException):
_msg_fmt = _('Failed to unrescue instance %(instance)s for node '
'%(node)s: %(reason)s')
class XClarityError(IronicException):
_msg_fmt = _("XClarity exception occurred. Error: %(error)s")
class BIOSSettingAlreadyExists(Conflict):
_msg_fmt = _('A BIOS setting %(name)s for node %(node)s already exists.')
class BIOSSettingNotFound(NotFound):
_msg_fmt = _("Node %(node)s doesn't have a BIOS setting '%(name)s'")
class BIOSSettingListNotFound(NotFound):
_msg_fmt = _("Node %(node)s doesn't have BIOS settings '%(names)s'")
class DatabaseVersionTooOld(IronicException):
_msg_fmt = _("Database version is too old")
class AgentConnectionFailed(IronicException):
_msg_fmt = _("Connection to agent failed: %(reason)s")
class NodeProtected(HTTPForbidden):
_msg_fmt = _("Node %(node)s is protected and cannot be undeployed, "
"rebuilt or deleted")
class AllocationNotFound(NotFound):
_msg_fmt = _("Allocation %(allocation)s could not be found.")
class AllocationDuplicateName(Conflict):
_msg_fmt = _("An allocation with name %(name)s already exists.")
class AllocationAlreadyExists(Conflict):
_msg_fmt = _("An allocation with UUID %(uuid)s already exists.")
class AllocationFailed(IronicException):
_msg_fmt = _("Failed to process allocation %(uuid)s: %(error)s.")
| 30.398765 | 79 | 0.680055 |
d1caa6c5e37005067a08f162ca6e1b91ee2b2b30 | 2,556 | py | Python | webapp/candysorter/ext/google/cloud/ml/training.py | https-giithub-com/findyoucandy | 869b4a696e7cb2ce11d003b383cced694f0bc5ec | [
"Apache-2.0"
] | null | null | null | webapp/candysorter/ext/google/cloud/ml/training.py | https-giithub-com/findyoucandy | 869b4a696e7cb2ce11d003b383cced694f0bc5ec | [
"Apache-2.0"
] | 3 | 2021-04-07T23:19:04.000Z | 2021-09-23T23:24:22.000Z | webapp/candysorter/ext/google/cloud/ml/training.py | https-giithub-com/findyoucandy | 869b4a696e7cb2ce11d003b383cced694f0bc5ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
class ScaleTier(object):
BASIC = 'BASIC'
STANDARD_1 = 'STANDARD_1'
PREMIUM_1 = 'PREMIUM_1'
BASIC_GPU = 'BASIC_GPU'
CUSTOM = 'CUSTOM'
class TrainingInput(object):
def __init__(self, package_uris, python_module, scale_tier=ScaleTier.BASIC,
region='us-central1', runtime_version='1.0'):
self.package_uris = package_uris
self.python_module = python_module
self.scale_tier = scale_tier
self.region = region
self.runtime_version = runtime_version
self._properties = {}
@classmethod
def from_api_repr(cls, resource):
training_input = cls(package_uris=resource.get('packageUris'),
python_module=resource.get('pythonModule'),
scale_tier=resource.get('scaleTier'),
region=resource.get('region'),
runtime_version=resource.get('runtimeVersion'))
if 'args' in resource:
training_input._properties['args'] = resource['args']
return training_input
def to_api_repr(self):
resource = {
'scaleTier': self.scale_tier,
'packageUris': self.package_uris,
'pythonModule': self.python_module,
'region': self.region,
'runtimeVersion': self.runtime_version,
}
_args = self._properties.get('args')
if _args is not None:
resource['args'] = _args
return resource
@property
def args(self, value):
return self._properties.get('args')
@args.setter
def args(self, value):
self._properties['args'] = value
def with_args(self, *args):
_args = self._properties.setdefault('args', [])
_args.extend(args)
| 35.5 | 82 | 0.622066 |
300b475c0ef21ee4735e1b6a34b4ce504676dea4 | 5,485 | py | Python | examples/rllib/multiagent_exps/multiagent_stabilizing_the_ring.py | remusionita/flow_rl | f26cd13bdcc89c00fe980cd8d7873c88af8e2744 | [
"MIT"
] | 1 | 2020-06-09T14:40:54.000Z | 2020-06-09T14:40:54.000Z | examples/rllib/multiagent_exps/multiagent_stabilizing_the_ring.py | remusionita/flow_rl | f26cd13bdcc89c00fe980cd8d7873c88af8e2744 | [
"MIT"
] | 1 | 2019-12-05T09:04:05.000Z | 2019-12-05T21:23:49.000Z | examples/rllib/multiagent_exps/multiagent_stabilizing_the_ring.py | remusionita/flow_rl | f26cd13bdcc89c00fe980cd8d7873c88af8e2744 | [
"MIT"
] | 3 | 2019-12-07T11:36:21.000Z | 2020-01-04T16:29:57.000Z | """Ring road example.
Creates a set of stabilizing the ring experiments to test if
more agents -> fewer needed batches
"""
import json
import ray
try:
from ray.rllib.agents.agent import get_agent_class
except ImportError:
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from ray import tune
from ray.tune.registry import register_env
from ray.tune import run_experiments
from flow.envs.multiagent import MultiWaveAttenuationPOEnv
from flow.networks import MultiRingNetwork
from flow.controllers import ContinuousRouter
from flow.controllers import IDMController
from flow.controllers import RLController
from flow.core.params import EnvParams
from flow.core.params import InitialConfig
from flow.core.params import NetParams
from flow.core.params import SumoParams
from flow.core.params import VehicleParams
from flow.utils.registry import make_create_env
from flow.utils.rllib import FlowParamsEncoder
# make sure (sample_batch_size * num_workers ~= train_batch_size)
# time horizon of a single rollout
HORIZON = 3000
# Number of rings
NUM_RINGS = 1
# number of rollouts per training iteration
N_ROLLOUTS = 20 # int(20/NUM_RINGS)
# number of parallel workers
N_CPUS = 2 # int(20/NUM_RINGS)
# We place one autonomous vehicle and 21 human-driven vehicles in the network
vehicles = VehicleParams()
for i in range(NUM_RINGS):
vehicles.add(
veh_id='human_{}'.format(i),
acceleration_controller=(IDMController, {
'noise': 0.2
}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=21)
vehicles.add(
veh_id='rl_{}'.format(i),
acceleration_controller=(RLController, {}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=1)
flow_params = dict(
# name of the experiment
exp_tag='lord_of_numrings{}'.format(NUM_RINGS),
# name of the flow environment the experiment is running on
env_name=MultiWaveAttenuationPOEnv,
# name of the network class the experiment is running on
network=MultiRingNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
sim_step=0.1,
render=False,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
warmup_steps=750,
additional_params={
'max_accel': 1,
'max_decel': 1,
'ring_length': [230, 230],
'target_velocity': 4
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
additional_params={
'length': 230,
'lanes': 1,
'speed_limit': 30,
'resolution': 40,
'num_rings': NUM_RINGS
}, ),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(bunching=20.0, spacing='custom'),
)
def setup_exps():
"""Return the relevant components of an RLlib experiment.
Returns
-------
str
name of the training algorithm
str
name of the gym environment to be trained
dict
training configuration parameters
"""
alg_run = 'PPO'
agent_cls = get_agent_class(alg_run)
config = agent_cls._default_config.copy()
config['num_workers'] = N_CPUS
config['train_batch_size'] = HORIZON * N_ROLLOUTS
config['simple_optimizer'] = True
config['gamma'] = 0.999 # discount rate
config['model'].update({'fcnet_hiddens': [32, 32]})
config['lr'] = tune.grid_search([1e-5])
config['horizon'] = HORIZON
config['clip_actions'] = False # FIXME(ev) temporary ray bug
config['observation_filter'] = 'NoFilter'
# save the flow params for replay
flow_json = json.dumps(
flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4)
config['env_config']['flow_params'] = flow_json
config['env_config']['run'] = alg_run
create_env, env_name = make_create_env(params=flow_params, version=0)
# Register as rllib env
register_env(env_name, create_env)
test_env = create_env()
obs_space = test_env.observation_space
act_space = test_env.action_space
def gen_policy():
return PPOTFPolicy, obs_space, act_space, {}
# Setup PG with an ensemble of `num_policies` different policy graphs
policy_graphs = {'av': gen_policy()}
def policy_mapping_fn(_):
return 'av'
config.update({
'multiagent': {
'policies': policy_graphs,
'policy_mapping_fn': tune.function(policy_mapping_fn),
'policies_to_train': ['av']
}
})
return alg_run, env_name, config
if __name__ == '__main__':
alg_run, env_name, config = setup_exps()
ray.init(num_cpus=N_CPUS + 1)
run_experiments({
flow_params['exp_tag']: {
'run': alg_run,
'env': env_name,
'checkpoint_freq': 1,
'stop': {
'training_iteration': 1
},
'config': config,
# 'upload_dir': 's3://<BUCKET NAME>'
},
})
| 29.489247 | 77 | 0.668551 |
75e4aab1a5be601e94a90b8ac1c56fa30bd6a5db | 122 | py | Python | surfstat/python/need_not_convert/SurfStatWriteVol1.py | rudimeier/BrainStat | a5ef474ffd70300ecf5fa464fff4a41e71f4b7a1 | [
"BSD-3-Clause"
] | null | null | null | surfstat/python/need_not_convert/SurfStatWriteVol1.py | rudimeier/BrainStat | a5ef474ffd70300ecf5fa464fff4a41e71f4b7a1 | [
"BSD-3-Clause"
] | null | null | null | surfstat/python/need_not_convert/SurfStatWriteVol1.py | rudimeier/BrainStat | a5ef474ffd70300ecf5fa464fff4a41e71f4b7a1 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
def py_SurfStatWriteVol(d, Z, T):
sys.exit("Function py_SurfStatWriteVol is not implemented yet")
| 24.4 | 67 | 0.762295 |
c4de5896b377698f9748507b9d91862852ada050 | 702 | py | Python | PytoTests/Tests/python/test_lib.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 701 | 2018-10-22T11:54:09.000Z | 2022-03-31T14:39:30.000Z | PytoTests/Tests/python/test_lib.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 229 | 2018-10-24T09:15:31.000Z | 2021-12-24T16:51:37.000Z | PytoTests/Tests/python/test_lib.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 131 | 2018-11-25T18:33:03.000Z | 2022-03-24T03:18:07.000Z | from pip import main as pip
from console import clear
import sys
import os
import numpy
print(numpy)
tests = sys.argv[1]
if not "PYTEST_INSTALLED" in os.environ:
os.environ["PYTEST_INSTALLED"] = "1"
try:
pip(["install", "pytest"])
pip(["install", "hypothesis"])
except SystemExit:
pass
clear()
from pytest import main as pytest
excluded = [
"test_import_lazy_import",
"test_numpy_namespace",
"test_full_reimport",
"test_api_importable",
"test_all_modules_are_expected",
"test_all_modules_are_expected_2",
"test_numpy_reloading",
"test_pep338",
]
excluded = " and not ".join(excluded)
pytest([tests, "-k", "not "+excluded])
| 19.5 | 40 | 0.673789 |
54976ab720038b07f640869441e0a84d7faf23cc | 6,248 | py | Python | manage.py | isabella232/comport | 117123862415261095a917ed7f2037c1f986b474 | [
"BSD-3-Clause"
] | 35 | 2015-11-14T18:32:45.000Z | 2022-01-23T15:15:05.000Z | manage.py | codeforamerica/comport | 117123862415261095a917ed7f2037c1f986b474 | [
"BSD-3-Clause"
] | 119 | 2015-11-20T22:45:34.000Z | 2022-02-10T23:02:36.000Z | manage.py | isabella232/comport | 117123862415261095a917ed7f2037c1f986b474 | [
"BSD-3-Clause"
] | 19 | 2015-11-20T20:41:52.000Z | 2022-01-26T04:12:34.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import datetime
from copy import deepcopy
from dateutil.relativedelta import relativedelta
from random import randint
from flask_script import Manager, Shell, Server, prompt_pass, prompt_bool
from flask_migrate import MigrateCommand, upgrade
from comport.content.defaults import ChartBlockDefaults
from comport.app import create_app
from comport.user.models import User, Role
from comport.department.models import Department, Extractor
from comport.settings import DevConfig, ProdConfig
from comport.database import db
from comport.data.models import DenominatorValue, DemographicValue, IncidentsUpdated
from testclient.JSON_test_client import JSONTestClient
import importlib
# set environment variables from the .env file
if os.path.exists('.env'):
for line in open('.env'):
var = [item.strip() for item in line.strip().split('=')]
if len(var) == 2:
os.environ[var[0]] = var[1]
# pick a configuration object
if os.environ.get("COMPORT_ENV") == 'prod':
config_object = ProdConfig
else:
# No Slack webhook URL for testing
if 'SLACK_WEBHOOK_URL' in os.environ:
del(os.environ['SLACK_WEBHOOK_URL'])
config_object = DevConfig
# create the app
app = create_app(config_object)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
''' Return context dict for a shell session.
'''
# generate a list of all the incident classes
class_lookup = [
{"UseOfForceIncident": ["IMPD", "BPD", "LMPD"]},
{"CitizenComplaint": ["IMPD", "BPD"]},
{"OfficerInvolvedShooting": ["IMPD", "BPD"]},
{"AssaultOnOfficer": ["IMPD"]}
]
incident_classes = {}
for guide in class_lookup:
prefix, departments = guide.popitem()
for name in departments:
class_name = prefix + name
incident_classes[class_name] = getattr(importlib.import_module("comport.data.models"), class_name)
context = {'app': app, 'db': db, 'User': User, 'Department': Department, 'Extractor': Extractor, 'IncidentsUpdated': IncidentsUpdated, 'JSONTestClient': JSONTestClient}
# update the context with the incident classes
context.update(incident_classes)
return context
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '-x', '--verbose'])
return exit_code
@manager.command
def make_admin_user():
password = prompt_pass("Password")
user = User.create(username="admin", email="email@example.com", password=password, active=True)
admin_role = Role(name='admin')
admin_role.save()
user.roles.append(admin_role)
user.save()
@manager.command
def delete_everything():
db.reflect()
db.drop_all()
upgrade()
@manager.command
def add_new_blocks():
for department in Department.query.all():
for block in ChartBlockDefaults.defaults:
if block.slug not in [x.slug for x in department.chart_blocks]:
print("adding {} to {}".format(block.slug, department.name))
# Attempting to save had removed identical chart blocks from previously
# iterated departments. Passing in a deep copy here prevents that.
department.chart_blocks.append(deepcopy(block))
department.save()
db.session.commit()
@manager.command
def test_client():
''' Erase the database and load in a full suite of test data
'''
if not prompt_bool("Are you sure you want to destroy and recreate Comport's database?"):
return
delete_everything()
# create a fake PD and admin user
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=True)
user = User.create(username="user", email="user@example.com", password="password", active=True, is_admin=True)
user.departments.append(department)
user.save()
# create some fake officer out on service data
date_now = datetime.datetime.now()
date_step = date_now - relativedelta(months=30)
while date_step.year < date_now.year or date_step.month < date_now.month:
DenominatorValue.create(
department_id=department.id,
month=date_step.month,
year=date_step.year,
officers_out_on_service=(100000 + (randint(0, 46000) - 23000))
)
date_step = date_step + relativedelta(months=1)
# create some fake demographic data
demo_template = [
dict(race="Asian", city_factor=0.0194, dept_factor=0.0013),
dict(race="Black", city_factor=0.2452, dept_factor=0.1402),
dict(race="Hispanic", city_factor=0.0861, dept_factor=0.0253),
dict(race="Other", city_factor=0.0699, dept_factor=0.0101),
dict(race="White", city_factor=0.5794, dept_factor=0.8231)
]
# for the city
city_population = 100000 + round(100000 * ((randint(0, 16) / 100) - .08))
for value in demo_template:
DemographicValue.create(
department_id=department.id,
race=value["race"],
count=round(city_population * value["city_factor"]),
department_value=False
)
# for the department
dept_population = 1500 + round(1500 * ((randint(0, 16) / 100) - .08))
for value in demo_template:
DemographicValue.create(
department_id=department.id,
race=value["race"],
count=round(dept_population * value["dept_factor"]),
department_value=True
)
# create a JSON test client and run it
test_client = JSONTestClient()
mutations = []
# mutations.append(MissingDataMutator())
# mutations.append(FuzzedDataMutator())
# mutations.append(KnownBadDataMutator())
# mutations.append(EmptyDataMutator())
# mutations.append(CasingMutator())
# mutations.append(CondenisngDateMutator())
# mutations.append(GapDateMutator())
test_client.run(department, mutations)
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 34.711111 | 172 | 0.678777 |
25949c4e0ed933665b8be0577884c8bcd1c8ba74 | 2,327 | py | Python | liualgotrader/fincalcs/vwap.py | ajmal017/LiuAlgoTrader | 85924391c139fdca84d6bd98494385405d3b799f | [
"MIT"
] | 1 | 2020-12-01T02:39:52.000Z | 2020-12-01T02:39:52.000Z | liualgotrader/fincalcs/vwap.py | ajmal017/LiuAlgoTrader | 85924391c139fdca84d6bd98494385405d3b799f | [
"MIT"
] | null | null | null | liualgotrader/fincalcs/vwap.py | ajmal017/LiuAlgoTrader | 85924391c139fdca84d6bd98494385405d3b799f | [
"MIT"
] | null | null | null | from datetime import datetime
import pandas as pd
from pandas import DataFrame as df
from pandas import Timestamp as ts
from tabulate import tabulate
from liualgotrader.common import config
from liualgotrader.common.tlog import tlog
def add_daily_vwap(minute_data: df, debug=False) -> bool:
back_time = ts(config.market_open)
print("before vwap", minute_data)
try:
back_time_index = minute_data["close"].index.get_loc(
back_time, method="nearest"
)
except Exception as e:
if debug:
tlog(
f"IndexError exception {e} in add_daily_vwap for {minute_data}"
)
return False
minute_data["pv"] = minute_data.apply(
lambda x: (x["close"] + x["high"] + x["low"]) / 3 * x["volume"], axis=1
)
minute_data["apv"] = minute_data["pv"][back_time_index:].cumsum()
minute_data["av"] = minute_data["volume"][back_time_index:].cumsum()
minute_data["average"] = minute_data["apv"] / minute_data["av"]
minute_data["vwap"] = minute_data.apply(
lambda x: (x["close"] + x["high"] + x["low"]) / 3, axis=1
)
if debug:
tlog(
f"\n{tabulate(minute_data[-110:-100], headers='keys', tablefmt='psql')}"
)
tlog(
f"\n{tabulate(minute_data[-10:], headers='keys', tablefmt='psql')}"
)
return True
def anchored_vwap(
ohlc_data: df, start_time: datetime, debug=False
) -> pd.Series:
try:
start_time_index = ohlc_data["close"].index.get_loc(
start_time, method="nearest"
)
except Exception as e:
if debug:
tlog(f"IndexError exception {e} in anchored_vwap for {ohlc_data}")
return pd.Series()
df = ohlc_data.copy()
df["pv"] = df.apply(
lambda x: (x["close"] + x["high"] + x["low"]) / 3 * x["volume"], axis=1
)
df["apv"] = df["pv"][start_time_index:].cumsum()
df["av"] = df["volume"][start_time_index:].cumsum()
df["average"] = df["apv"] / df["av"]
if debug:
tlog(
f"\n{tabulate(df.average[start_time_index:][-15:], headers='keys', tablefmt='psql')}"
)
tlog(
f"\n{tabulate(df.average[start_time_index:][:15], headers='keys', tablefmt='psql')}"
)
return df.average[start_time_index:]
| 29.455696 | 97 | 0.59089 |
c0cde2eecbce7918e2ea619e2f25f6c89a01fb5f | 664 | py | Python | qlang/tutorial/maxprime/maxprime.py | redshiftkeying/slowflow | 561d2ec9c83a712bad1239a292897e367fc4c97c | [
"MIT"
] | 271 | 2015-09-18T14:14:07.000Z | 2022-02-06T04:21:32.000Z | tutorial/maxprime/maxprime.py | aaple/qlang | dd8656b55eaf19f3813ca4e98f829e67df472c96 | [
"MIT"
] | 3 | 2015-09-20T04:27:06.000Z | 2017-05-28T14:14:40.000Z | tutorial/maxprime/maxprime.py | aaple/qlang | dd8656b55eaf19f3813ca4e98f829e67df472c96 | [
"MIT"
] | 77 | 2015-09-23T14:56:50.000Z | 2022-03-15T02:34:52.000Z | import sys
primes = [2, 3]
n = 1
limit = 9
def isPrime(v):
i = 0
while i < n:
if v % primes[i] == 0:
return False
i += 1
return True
def listPrimes(max):
global n, limit
v = 5
while True:
while v < limit:
if isPrime(v):
primes.append(v)
if v * v >= max:
return
v += 2
v += 2
n += 1
limit = primes[n] * primes[n]
def maxPrimeOf(max):
global n
if max % 2 == 0:
max -= 1
listPrimes(max)
n = len(primes)
while True:
if isPrime(max):
return max
max -= 2
if len(sys.argv) < 2:
print 'Usage: maxprime <Value>'
sys.exit(1)
max = int(sys.argv[1])
if max < 8:
sys.exit(2)
max -= 1
v = maxPrimeOf(max)
print v
| 12.769231 | 32 | 0.567771 |
1e0dcc87ed4a453ffdffef9a4ad32f8da766fd62 | 27,950 | py | Python | Lib/random.py | laomaiweng/cpython | bcd3a1a18d841338f57c39f6a7de8cf14d0c3e03 | [
"PSF-2.0"
] | 2 | 2018-05-23T05:18:49.000Z | 2021-08-01T08:43:23.000Z | Lib/random.py | laomaiweng/cpython | bcd3a1a18d841338f57c39f6a7de8cf14d0c3e03 | [
"PSF-2.0"
] | 1 | 2019-03-19T04:19:09.000Z | 2019-03-19T04:19:09.000Z | Lib/random.py | laomaiweng/cpython | bcd3a1a18d841338f57c39f6a7de8cf14d0c3e03 | [
"PSF-2.0"
] | 1 | 2018-10-16T15:14:17.000Z | 2018-10-16T15:14:17.000Z | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
pick weighted random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
import itertools as _itertools
import bisect as _bisect
import os as _os
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits", "choices",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def __init_subclass__(cls, **kwargs):
"""Control how subclasses generate random integers.
The algorithm a subclass can use depends on the random() and/or
getrandbits() implementation available to it and determines
whether it can generate random integers from arbitrarily large
ranges.
"""
for c in cls.__mro__:
if '_randbelow' in c.__dict__:
# just inherit it
break
if 'getrandbits' in c.__dict__:
cls._randbelow = cls._randbelow_with_getrandbits
break
if 'random' in c.__dict__:
cls._randbelow = cls._randbelow_without_getrandbits
break
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If *a* is an int, all bits are used.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1 (provided for reproducing random
sequences from older versions of Python), the algorithm for str and
bytes generates a narrower range of seeds.
"""
if version == 1 and isinstance(a, (str, bytes)):
a = a.decode('latin-1') if isinstance(a, bytes) else a
x = ord(a[0]) << 7 if a else 0
for c in map(ord, a):
x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF
x ^= len(a)
a = -2 if x == -1 else x
if version == 2 and isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow_with_getrandbits(self, n):
"Return a random int in the range [0,n). Raises ValueError if n==0."
getrandbits = self.getrandbits
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
def _randbelow_without_getrandbits(self, n, int=int, maxsize=1<<BPF):
"""Return a random int in the range [0,n). Raises ValueError if n==0.
The implementation does not use getrandbits, but only random.
"""
random = self.random
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
if n == 0:
raise ValueError("Boundary cannot be zero")
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
_randbelow = _randbelow_with_getrandbits
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence') from None
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population or is negative")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
def choices(self, population, weights=None, *, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
random = self.random
if cum_weights is None:
if weights is None:
_int = int
total = len(population)
return [population[_int(random() * total)] for i in range(k)]
cum_weights = list(_itertools.accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != len(population):
raise ValueError('The number of weights does not match the population')
bisect = _bisect.bisect
total = cum_weights[-1]
return [population[bisect(cum_weights, random() * total)] for i in range(k)]
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * _sqrt(u * c)
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1/beta)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.0)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.0))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g\n' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
choices = _inst.choices
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if hasattr(_os, "fork"):
_os.register_at_fork(after_in_child=_inst.seed)
if __name__ == '__main__':
_test()
| 35.025063 | 96 | 0.566655 |
f75d593cc638bc8959e81ee779359c2bb78579de | 682 | py | Python | instaclone/email.py | morrisonGithinji/morris-Instagram | a0145452c8ea05b0d2818989c36aa409c23b3ad2 | [
"MIT"
] | null | null | null | instaclone/email.py | morrisonGithinji/morris-Instagram | a0145452c8ea05b0d2818989c36aa409c23b3ad2 | [
"MIT"
] | null | null | null | instaclone/email.py | morrisonGithinji/morris-Instagram | a0145452c8ea05b0d2818989c36aa409c23b3ad2 | [
"MIT"
] | null | null | null | from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_welcome_email(name,receiver):
# Creating message subject and sender
subject = 'Welcome to Morrisinsta,for allyour Photo uploads and also get to view your friends photos'
sender = 'morrison.githinji@student.moringaschool.com'
#passing in the context vairables
text_content = render_to_string('email/instaemail.txt',{"name": name})
html_content = render_to_string('email/instaemail.html',{"name": name})
msg = EmailMultiAlternatives(subject,text_content,sender,[receiver])
msg.attach_alternative(html_content,'text/html')
msg.send() | 45.466667 | 105 | 0.771261 |
0275fc10dc6753351261742ac71fc32e76576e11 | 30,028 | py | Python | src/syft/lib/python/__init__.py | godormad/PySyft | fcb3374b6318dcccf377175fb8db6f70e9e1d1e3 | [
"Apache-2.0"
] | null | null | null | src/syft/lib/python/__init__.py | godormad/PySyft | fcb3374b6318dcccf377175fb8db6f70e9e1d1e3 | [
"Apache-2.0"
] | null | null | null | src/syft/lib/python/__init__.py | godormad/PySyft | fcb3374b6318dcccf377175fb8db6f70e9e1d1e3 | [
"Apache-2.0"
] | null | null | null | # stdlib
from typing import Optional
# syft relative
from . import collections
from ...ast import add_classes
from ...ast import add_methods
from ...ast import add_modules
from ...ast.globals import Globals
from ...core.node.abstract.node import AbstractNodeClient
from ..misc.union import UnionGenerator
from .bool import Bool
from .complex import Complex
from .dict import Dict
from .dict import DictWrapper
from .float import Float
from .int import Int
from .iterator import Iterator
from .list import List
from .namedtuple import ValuesIndices
from .namedtuple import ValuesIndicesWrapper
from .none import SyNone
from .none import _SyNone
from .primitive_container import Any
from .primitive_interface import PyPrimitive
from .protobuf import GenerateProtobufWrapper # noqa: 401
from .set import Set
from .string import String
from .tuple import Tuple
for syft_type in [
Bool,
Complex,
Dict,
DictWrapper,
Float,
Int,
SyNone,
_SyNone,
Any,
PyPrimitive,
String,
Tuple,
ValuesIndices,
ValuesIndicesWrapper,
]:
syft_type.__module__ = __name__
def create_python_ast(client: Optional[AbstractNodeClient] = None) -> Globals:
ast = Globals(client)
modules = ["syft", "syft.lib", "syft.lib.python", "syft.lib.python.collections"]
classes = [
("syft.lib.python.Bool", "syft.lib.python.Bool", Bool),
("syft.lib.python.Complex", "syft.lib.python.Complex", Complex),
("syft.lib.python.Dict", "syft.lib.python.Dict", Dict),
("syft.lib.python.Float", "syft.lib.python.Float", Float),
("syft.lib.python.Int", "syft.lib.python.Int", Int),
("syft.lib.python.List", "syft.lib.python.List", List),
("syft.lib.python.String", "syft.lib.python.String", String),
("syft.lib.python._SyNone", "syft.lib.python._SyNone", _SyNone),
("syft.lib.python.PyPrimitive", "syft.lib.python.PyPrimitive", PyPrimitive),
("syft.lib.python.Any", "syft.lib.python.Any", Any),
("syft.lib.python.Tuple", "syft.lib.python.Tuple", Tuple),
("syft.lib.python.Iterator", "syft.lib.python.Iterator", Iterator),
("syft.lib.python.Set", "syft.lib.python.Set", Set),
(
"syft.lib.python.collections.OrderedDict",
"syft.lib.python.collections.OrderedDict",
collections.OrderedDict,
),
(
"syft.lib.python.ValuesIndices",
"syft.lib.python.ValuesIndices",
ValuesIndices,
),
]
methods = [
# List methods - quite there
("syft.lib.python.List.__len__", "syft.lib.python.Int"),
("syft.lib.python.List.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.List.__iter__", "syft.lib.python.Iterator"),
("syft.lib.python.List.__add__", "syft.lib.python.List"),
("syft.lib.python.List.append", "syft.lib.python._SyNone"),
("syft.lib.python.List.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.List.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.List.__le__", "syft.lib.python.Bool"),
("syft.lib.python.List.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.List.__iadd__", "syft.lib.python.List"),
("syft.lib.python.List.__imul__", "syft.lib.python.List"),
("syft.lib.python.List.__iadd__", "syft.lib.python.List"),
("syft.lib.python.List.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.List.__delattr__", "syft.lib.python.None"),
("syft.lib.python.List.__delitem__", "syft.lib.python.None"),
("syft.lib.python.List.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.List.__mul__", "syft.lib.python.List"),
("syft.lib.python.List.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.List.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.List.__len__", "syft.lib.python.Int"),
("syft.lib.python.List.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.List.__setitem__", "syft.lib.python._SyNone"),
("syft.lib.python.List.__rmul__", "syft.lib.python.List"),
("syft.lib.python.List.copy", "syft.lib.python.List"),
("syft.lib.python.List.count", "syft.lib.python.Int"),
("syft.lib.python.List.sort", "syft.lib.python._SyNone"),
("syft.lib.python.List.reverse", "syft.lib.python._SyNone"),
("syft.lib.python.List.remove", "syft.lib.python._SyNone"),
("syft.lib.python.List.pop", "syft.lib.python.Any"),
("syft.lib.python.List.index", "syft.lib.python.Any"),
("syft.lib.python.List.insert", "syft.lib.python._SyNone"),
("syft.lib.python.List.clear", "syft.lib.python._SyNone"),
("syft.lib.python.List.extend", "syft.lib.python._SyNone"),
("syft.lib.python.List.__reversed__", "syft.lib.python.Iterator"),
("syft.lib.python.List.__delitem__", "syft.lib.python._SyNone"),
# Bool methods - quite there
("syft.lib.python.Bool.__abs__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__add__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__and__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__ceil__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__divmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Bool.__floor__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__float__", "syft.lib.python.Float"),
("syft.lib.python.Bool.__floordiv__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__invert__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__lshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__mod__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__mul__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__neg__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__or__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__pos__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__pow__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__radd__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rand__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__rdivmod__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__rfloordiv__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__rlshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rmod__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rmul__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__ror__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__round__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rpow__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rrshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rsub__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rtruediv__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rxor__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__sub__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__truediv__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__xor__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__trunc__", "syft.lib.python.Int"),
("syft.lib.python.Bool.conjugate", "syft.lib.python.Int"),
("syft.lib.python.Bool.bit_length", "syft.lib.python.Int"),
("syft.lib.python.Bool.as_integer_ratio", "syft.lib.python.Tuple"),
("syft.lib.python.Bool.numerator", "syft.lib.python.Int"),
("syft.lib.python.Bool.real", "syft.lib.python.Int"),
("syft.lib.python.Bool.imag", "syft.lib.python.Int"),
("syft.lib.python.Bool.denominator", "syft.lib.python.Int"),
# Float methods - subject to further change due
("syft.lib.python.Float.__add__", "syft.lib.python.Float"),
("syft.lib.python.Float.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__divmod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__add__", "syft.lib.python.Float"),
("syft.lib.python.Float.__abs__", "syft.lib.python.Float"),
("syft.lib.python.Float.__bool__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__sub__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rsub__", "syft.lib.python.Float"),
("syft.lib.python.Float.__mul__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rmul__", "syft.lib.python.Float"),
("syft.lib.python.Float.__divmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Float.__int__", "syft.lib.python.Int"),
("syft.lib.python.Float.__neg__", "syft.lib.python.Float"),
("syft.lib.python.Float.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__floordiv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__mod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rmod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rdivmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Float.__rfloordiv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__round__", "syft.lib.python.Int"),
("syft.lib.python.Float.__rtruediv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.Float.__trunc__", "syft.lib.python.Int"),
("syft.lib.python.Float.as_integer_ratio", "syft.lib.python.Tuple"),
("syft.lib.python.Float.is_integer", "syft.lib.python.Bool"),
("syft.lib.python.Float.__pow__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rpow__", "syft.lib.python.Float"),
("syft.lib.python.Float.__iadd__", "syft.lib.python.Float"),
("syft.lib.python.Float.__isub__", "syft.lib.python.Float"),
("syft.lib.python.Float.__imul__", "syft.lib.python.Float"),
("syft.lib.python.Float.__imod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__ipow__", "syft.lib.python.Float"),
("syft.lib.python.Float.__pos__", "syft.lib.python.Float"),
("syft.lib.python.Float.conjugate", "syft.lib.python.Float"),
("syft.lib.python.Float.imag", "syft.lib.python.Int"),
("syft.lib.python.Float.real", "syft.lib.python.Float"),
# String Methods
("syft.lib.python.String.__add__", "syft.lib.python.String"),
("syft.lib.python.String.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.String.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.String.__float__", "syft.lib.python.Float"),
("syft.lib.python.String.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.String.__getitem__", "syft.lib.python.String"),
("syft.lib.python.String.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.String.__int__", "syft.lib.python.Int"),
("syft.lib.python.String.__iter__", "syft.lib.python.Any"),
("syft.lib.python.String.__le__", "syft.lib.python.Bool"),
("syft.lib.python.String.__len__", "syft.lib.python.Int"),
("syft.lib.python.String.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.String.__mod__", "syft.lib.python.String"),
("syft.lib.python.String.__mul__", "syft.lib.python.String"),
("syft.lib.python.String.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.String.__reversed__", "syft.lib.python.String"),
("syft.lib.python.String.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.String.__str__", "syft.lib.python.String"),
("syft.lib.python.String.capitalize", "syft.lib.python.String"),
("syft.lib.python.String.casefold", "syft.lib.python.String"),
("syft.lib.python.String.center", "syft.lib.python.String"),
("syft.lib.python.String.count", "syft.lib.python.Int"),
("syft.lib.python.String.encode", "syft.lib.python.String"),
("syft.lib.python.String.expandtabs", "syft.lib.python.String"),
("syft.lib.python.String.find", "syft.lib.python.Int"),
("syft.lib.python.String.format", "syft.lib.python.String"),
("syft.lib.python.String.format_map", "syft.lib.python.String"),
("syft.lib.python.String.index", "syft.lib.python.Int"),
("syft.lib.python.String.isalnum", "syft.lib.python.Bool"),
("syft.lib.python.String.isalpha", "syft.lib.python.Bool"),
("syft.lib.python.String.isdecimal", "syft.lib.python.Bool"),
("syft.lib.python.String.isdigit", "syft.lib.python.Bool"),
("syft.lib.python.String.isidentifier", "syft.lib.python.Bool"),
("syft.lib.python.String.islower", "syft.lib.python.Bool"),
("syft.lib.python.String.isnumeric", "syft.lib.python.Bool"),
("syft.lib.python.String.isprintable", "syft.lib.python.Bool"),
("syft.lib.python.String.isspace", "syft.lib.python.Bool"),
("syft.lib.python.String.isupper", "syft.lib.python.Bool"),
("syft.lib.python.String.join", "syft.lib.python.String"),
("syft.lib.python.String.ljust", "syft.lib.python.String"),
("syft.lib.python.String.lower", "syft.lib.python.String"),
("syft.lib.python.String.lstrip", "syft.lib.python.String"),
("syft.lib.python.String.partition", "syft.lib.python.Tuple"),
("syft.lib.python.String.replace", "syft.lib.python.String"),
("syft.lib.python.String.rfind", "syft.lib.python.Int"),
("syft.lib.python.String.rindex", "syft.lib.python.Int"),
("syft.lib.python.String.rjust", "syft.lib.python.String"),
("syft.lib.python.String.rpartition", "syft.lib.python.Tuple"),
("syft.lib.python.String.rsplit", "syft.lib.python.List"),
("syft.lib.python.String.rstrip", "syft.lib.python.String"),
("syft.lib.python.String.split", "syft.lib.python.List"),
("syft.lib.python.String.splitlines", "syft.lib.python.List"),
("syft.lib.python.String.startswith", "syft.lib.python.Bool"),
("syft.lib.python.String.strip", "syft.lib.python.String"),
("syft.lib.python.String.swapcase", "syft.lib.python.String"),
("syft.lib.python.String.title", "syft.lib.python.String"),
("syft.lib.python.String.translate", "syft.lib.python.String"),
("syft.lib.python.String.upper", "syft.lib.python.String"),
("syft.lib.python.String.zfill", "syft.lib.python.String"),
("syft.lib.python.String.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.String.__rmul__", "syft.lib.python.String"),
("syft.lib.python.String.endswith", "syft.lib.python.Bool"),
("syft.lib.python.String.isascii", "syft.lib.python.Bool"),
("syft.lib.python.String.istitle", "syft.lib.python.Bool"),
# Dict methods
("syft.lib.python.Dict.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__format__", "syft.lib.python.String"),
("syft.lib.python.Dict.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.Dict.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__iter__", "syft.lib.python.Any"),
("syft.lib.python.Dict.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__len__", "syft.lib.python.Int"),
("syft.lib.python.Dict.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.Dict.__str__", "syft.lib.python.String"),
("syft.lib.python.Dict.copy", "syft.lib.python.Dict"),
("syft.lib.python.Dict.fromkeys", "syft.lib.python.Dict"),
# TODO: name conflict with syft.get()
# ("syft.lib.python.Dict.get", "syft.lib.python.Any"),
("syft.lib.python.Dict.items", "syft.lib.python.List"),
("syft.lib.python.Dict.keys", "syft.lib.python.List"),
("syft.lib.python.Dict.pop", "syft.lib.python.Any"),
("syft.lib.python.Dict.popitem", "syft.lib.python.Tuple"),
("syft.lib.python.Dict.setdefault", "syft.lib.python.Any"),
("syft.lib.python.Dict.values", "syft.lib.python.List"),
# Int methods - subject to further change
("syft.lib.python.Int.__add__", "syft.lib.python.Int"),
("syft.lib.python.Int.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Int.__divmod__", "syft.lib.python.Float"),
("syft.lib.python.Int.__floordiv__", "syft.lib.python.Float"),
("syft.lib.python.Int.__invert__", "syft.lib.python.Int"),
("syft.lib.python.Int.__abs__", "syft.lib.python.Int"),
("syft.lib.python.Int.__bool__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__divmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Int.__rdivmod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__radd__", "syft.lib.python.Int"),
("syft.lib.python.Int.__sub__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rsub__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rtruediv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__mul__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rmul__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ceil__", "syft.lib.python.Int"),
("syft.lib.python.Int.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__float__", "syft.lib.python.Float"),
("syft.lib.python.Int.__floor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__floordiv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rfloordiv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Int.__mod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rmod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__pow__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rpow__", "syft.lib.python.Int"),
("syft.lib.python.Int.__lshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rlshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__round__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rrshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__and__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rand__", "syft.lib.python.Int"),
("syft.lib.python.Int.__xor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__xor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rxor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__or__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ror__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__iadd__", "syft.lib.python.Int"),
("syft.lib.python.Int.__isub__", "syft.lib.python.Int"),
("syft.lib.python.Int.__imul__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ifloordiv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__itruediv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__imod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ipow__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__neg__", "syft.lib.python.Int"),
("syft.lib.python.Int.__pos__", "syft.lib.python.Int"),
("syft.lib.python.Int.as_integer_ratio", "syft.lib.python.Tuple"),
("syft.lib.python.Int.bit_length", "syft.lib.python.Int"),
("syft.lib.python.Int.denominator", "syft.lib.python.Int"),
("syft.lib.python.Int.from_bytes", "syft.lib.python.Int"),
("syft.lib.python.Int.real", "syft.lib.python.Int"),
("syft.lib.python.Int.imag", "syft.lib.python.Int"),
("syft.lib.python.Int.numerator", "syft.lib.python.Int"),
("syft.lib.python.Int.conjugate", "syft.lib.python.Int"),
("syft.lib.python.Int.__trunc__", "syft.lib.python.Int"),
# Tuple
("syft.lib.python.Tuple.__add__", "syft.lib.python.Tuple"),
("syft.lib.python.Tuple.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__mul__", "syft.lib.python.Tuple"),
("syft.lib.python.Tuple.__rmul__", "syft.lib.python.Tuple"),
("syft.lib.python.Tuple.__len__", "syft.lib.python.Int"),
("syft.lib.python.Tuple.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.Tuple.count", "syft.lib.python.Int"),
("syft.lib.python.Tuple.index", "syft.lib.python.Int"),
("syft.lib.python.Tuple.__iter__", "syft.lib.python.Any"),
# PyContainer - quite there
("syft.lib.python.Any.__add__", "syft.lib.python.Any"),
("syft.lib.python.Any.__iter__", "syft.lib.python.Any"),
("syft.lib.python.Any.__next__", "syft.lib.python.Any"),
("syft.lib.python.Any.__radd__", "syft.lib.python.Any"),
("syft.lib.python.Any.__truediv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rtruediv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__floordiv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rfloordiv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__mul__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rmul__", "syft.lib.python.Any"),
("syft.lib.python.Any.__sub__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rsub__", "syft.lib.python.Any"),
(
"syft.lib.python.Iterator.__next__",
UnionGenerator[
"syft.lib.python.Int",
"syft.lib.python.Float",
"syft.lib.python.String",
"torch.nn.Parameter",
"torch.Tensor",
],
), # temp until casting
("syft.lib.python.Iterator.__iter__", "syft.lib.python.Any"),
("syft.lib.python.Set.__and__", "syft.lib.python.Set"),
("syft.lib.python.Set.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__iand__", "syft.lib.python.Set"),
("syft.lib.python.Set.__ior__", "syft.lib.python.Set"),
("syft.lib.python.Set.__isub__", "syft.lib.python.Set"),
("syft.lib.python.Set.__ixor__", "syft.lib.python.Set"),
("syft.lib.python.Set.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__len__", "syft.lib.python.Int"),
("syft.lib.python.Set.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__or__", "syft.lib.python.Set"),
("syft.lib.python.Set.__sub__", "syft.lib.python.Set"),
("syft.lib.python.Set.__xor__", "syft.lib.python.Set"),
("syft.lib.python.Set.add", "syft.lib.python._SyNone"),
("syft.lib.python.Set.clear", "syft.lib.python._SyNone"),
("syft.lib.python.Set.difference", "syft.lib.python.Set"),
("syft.lib.python.Set.difference_update", "syft.lib.python._SyNone"),
("syft.lib.python.Set.discard", "syft.lib.python._SyNone"),
("syft.lib.python.Set.intersection", "syft.lib.python.Set"),
("syft.lib.python.Set.intersection_update", "syft.lib.python._SyNone"),
("syft.lib.python.Set.isdisjoint", "syft.lib.python.Bool"),
("syft.lib.python.Set.issuperset", "syft.lib.python.Bool"),
("syft.lib.python.Set.pop", "syft.lib.python._SyNone"),
("syft.lib.python.Set.remove", "syft.lib.python._SyNone"),
(
"syft.lib.python.Set.symmetric_difference_update",
"syft.lib.python._SyNone",
),
("syft.lib.python.Set.symmetric_difference", "syft.lib.python.Set"),
("syft.lib.python.Set.union", "syft.lib.python.Set"),
("syft.lib.python.Set.update", "syft.lib.python._SyNone"),
# ValueIndicies
(
"syft.lib.python.ValuesIndices.values",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.indices",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.eigenvalues",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.eigenvectors",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.solution",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.QR",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.sign",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.logabsdet",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.Q",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.R",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.LU",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.cloned_coefficient",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.U",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.S",
"torch.Tensor",
),
(
"syft.lib.python.ValuesIndices.V",
"torch.Tensor",
),
(
"syft.lib.python.collections.OrderedDict.__contains__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__delitem__",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.__eq__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__ge__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__getitem__",
"syft.lib.python.Any",
),
(
"syft.lib.python.collections.OrderedDict.__gt__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__le__",
"syft.lib.python.Bool",
),
("syft.lib.python.collections.OrderedDict.__len__", "syft.lib.python.Int"),
(
"syft.lib.python.collections.OrderedDict.__lt__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__ne__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__setitem__",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.clear",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.copy",
"syft.lib.python.collections.OrderedDict",
),
(
"syft.lib.python.collections.OrderedDict.fromkeys",
"syft.lib.python.collections.OrderedDict",
),
("syft.lib.python.collections.OrderedDict.items", "syft.lib.python.List"),
("syft.lib.python.collections.OrderedDict.keys", "syft.lib.python.List"),
(
"syft.lib.python.collections.OrderedDict.move_to_end",
"syft.lib.python._SyNone",
),
("syft.lib.python.collections.OrderedDict.pop", "syft.lib.python.Any"),
("syft.lib.python.collections.OrderedDict.popitem", "syft.lib.python.Any"),
(
"syft.lib.python.collections.OrderedDict.setdefault",
"syft.lib.python.Any",
),
(
"syft.lib.python.collections.OrderedDict.update",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.values",
"syft.lib.python.List",
),
("syft.lib.python.collections.OrderedDict.items", "syft.lib.python.List"),
(
"syft.lib.python.collections.OrderedDict.dict_get",
"syft.lib.python.Any",
),
]
add_modules(ast, modules)
add_classes(ast, classes)
add_methods(ast, methods)
for klass in ast.classes:
klass.create_pointer_class()
klass.create_send_method()
klass.create_serialization_methods()
klass.create_storable_object_attr_convenience_methods()
return ast
| 51.329915 | 84 | 0.610963 |
1fb372f8b978dc1514e999757ceec34d0522893b | 1,508 | py | Python | experiments/ashvin/vae/fixed/pointmass/test.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/vae/fixed/pointmass/test.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/vae/fixed/pointmass/test.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | from rlkit.envs.multitask.point2d import MultitaskImagePoint2DEnv
from rlkit.envs.multitask.pusher2d import FullPusher2DEnv
from rlkit.launchers.arglauncher import run_variants
import rlkit.misc.hyperparameter as hyp
from rlkit.torch.vae.vae_experiment import experiment
if __name__ == "__main__":
# noinspection PyTypeChecker
vae_paths = {
"2": "ashvin/vae/new-point2d/run0/id0/params.pkl",
}
variant = dict(
algo_kwargs=dict(
num_epochs=100,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
tau=1e-2,
batch_size=128,
max_path_length=100,
discount=0.99,
# qf_learning_rate=1e-3,
# policy_learning_rate=1e-4,
),
env_kwargs=dict(
render_onscreen=False,
render_size=84,
ignore_multitask_goal=True,
ball_radius=1,
),
algorithm='TD3',
normalize=False,
rdim=4,
render=False,
env=MultitaskImagePoint2DEnv,
use_env_goals=False,
vae_paths=vae_paths,
)
n_seeds = 3
search_space = {
'exploration_type': [
'ou',
],
'algo_kwargs.reward_scale': [1e-6],
'rdim': [2],
'seedid': range(n_seeds),
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
run_variants(experiment, sweeper.iterate_hyperparameters(), run_id=0)
| 27.418182 | 73 | 0.607427 |
d1cb360e9917617ad5c904374b3eb8dd6957d1be | 4,865 | py | Python | personal_blog/books/routes.py | JulianHysi/personal-blog | 42cf296cce7791cca76da5653dd3570deb48849a | [
"MIT"
] | null | null | null | personal_blog/books/routes.py | JulianHysi/personal-blog | 42cf296cce7791cca76da5653dd3570deb48849a | [
"MIT"
] | 3 | 2021-09-08T03:13:42.000Z | 2022-03-12T00:56:51.000Z | personal_blog/books/routes.py | JulianHysi/personal_blog | 42cf296cce7791cca76da5653dd3570deb48849a | [
"MIT"
] | null | null | null | """Module containing route functions for the books blueprint.
---
Functions
---------
add_book(): return http response
the route for adding books
all_books(): return http response
the route for displaying all books
book(book_id): return http response
the route for displaying a single book
update_book(book_id): return http response
the route for updating a book
delete_book(book_id): return http response
the route for deleting a book
"""
from flask import Blueprint, render_template, url_for, flash, redirect, abort,\
request
from flask_login import current_user, login_required
from personal_blog import db
from personal_blog.models import Book
from personal_blog.books.forms import BookForm
books = Blueprint('books', __name__)
@books.route("/book/new", methods=['GET', 'POST'])
@login_required
def add_book():
"""The route function for adding books.
If the user isn't admin, don't do anything.
If the form validates, add book to db.
Flash the message, and redirect to all books.
Else, just load the form (render the template).
---
Returns
-------
http response
"""
if not current_user.is_admin: # only the admin adds books
abort(403)
form = BookForm()
if form.validate_on_submit():
book = Book(title=form.title.data, authors=form.authors.data,
link=form.link.data, edition=form.edition.data,
description=form.description.data)
db.session.add(book)
db.session.commit()
flash('Book has been added!', 'success')
return redirect(url_for('books.all_books'))
return render_template('books/add_book.html', title='New Book',
form=form, legend='Add Book', hide_sidebar=True)
@books.route("/all_books")
def all_books():
"""The route function for displaying all books.
Get all the records from the books table.
Render the template.
---
Returns
-------
http response
"""
books = Book.query.all()
return render_template('books/all_books.html', books=books)
@books.route("/book/<int:book_id>")
def book(book_id):
"""The route for displaying a single book.
Query the books table for the record with that book id.
Render the template.
----
Parameters
----------
book_id: int
the id for the book to be displayed
Returns
-------
http response
"""
book = Book.query.get_or_404(book_id)
return render_template('books/book.html', title=book.title, book=book)
@books.route("/book/<int:book_id>/update", methods=['GET', 'POST'])
@login_required
def update_book(book_id):
"""The route for updating a book.
Get the book with that id, or return a 404.
If the user isn't admin, don't do anything.
If the form validates, apply changes to db.
Flash the message, and redirect to that book's route.
Else, render the template with the loaded record.
---
Parameters
----------
book_id: int
the id of the book to be updated
Returns
-------
http response
"""
book = Book.query.get_or_404(book_id)
if not current_user.is_admin: # only the admin can update books
abort(403) # forbidden route
form = BookForm()
form.submit.label.text = 'Update'
if form.validate_on_submit():
book.title = form.title.data
book.authors = form.authors.data
book.edition = form.edition.data
if form.link.data:
book.link = form.link.data
book.description = form.description.data
db.session.commit()
flash('Book has been updated!', 'success')
return redirect(url_for('books.book', book_id=book.id))
elif request.method == 'GET':
form.title.data = book.title
form.authors.data = book.authors
form.edition.data = book.edition
form.link.data = book.link
form.description.data = book.description
return render_template('books/add_book.html', title='Update Book',
form=form, legend='Update Book', hide_sidebar=True)
@books.route("/book/<int:book_id>/delete", methods=['POST'])
@login_required
def delete_book(book_id):
"""The route for deleting a book.
Get the book with that id, or return a 404.
If the user isn't admin, don't do anything.
Delete the book from the db.
Flash the message, and redirect to all books.
---
Parameters
----------
book_id: int
the id of the book to be deleted
Returns
-------
http response
"""
book = Book.query.get_or_404(book_id)
if not current_user.is_admin:
abort(403) # forbidden route
db.session.delete(book)
db.session.commit()
flash('Book has been deleted!', 'success')
return redirect(url_for('books.all_books'))
| 27.027778 | 79 | 0.646249 |
84de8898171181653bbafd0fe5c756213daefbf9 | 908 | py | Python | var/spack/repos/builtin.mock/packages/printing-package/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin.mock/packages/printing-package/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin.mock/packages/printing-package/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PrintingPackage(Package):
"""This package prints some output from its install method.
We use this to test whether that output is properly logged.
"""
homepage = "http://www.example.com/printing_package"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
def install(self, spec, prefix):
print("BEFORE INSTALL")
configure('--prefix=%s' % prefix)
make()
make('install')
print("AFTER INSTALL")
def test(self):
print("BEFORE TEST")
self.run_test('true') # run /bin/true
print("AFTER TEST")
| 28.375 | 88 | 0.662996 |
ab4eb60bfc9f45133235346b5a9cbe8cf7091602 | 573 | py | Python | event/eventapp/migrations/0010_auto_20190506_0611.py | easywaytostudy123/eventhandler | 07d36c5e83c49016b50f901455a4088b8548da3c | [
"Apache-2.0"
] | null | null | null | event/eventapp/migrations/0010_auto_20190506_0611.py | easywaytostudy123/eventhandler | 07d36c5e83c49016b50f901455a4088b8548da3c | [
"Apache-2.0"
] | 8 | 2020-02-12T00:25:27.000Z | 2022-02-10T11:21:11.000Z | event/eventapp/migrations/0010_auto_20190506_0611.py | PushpinderSingh21/eventhandler | 4e7aafedf6bb171c9cd3fb6d44d9072e9fa537d6 | [
"Apache-2.0"
] | 1 | 2020-03-04T06:58:05.000Z | 2020-03-04T06:58:05.000Z | # Generated by Django 2.2 on 2019-05-06 06:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eventapp', '0009_auto_20190506_0609'),
]
operations = [
migrations.AlterField(
model_name='createevent',
name='date',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='createevent',
name='time1',
field=models.DateTimeField(max_length=100, null=True),
),
]
| 23.875 | 66 | 0.589878 |
eb6fbd224b9abd6354d76e8bb0a6280eebb7ad1f | 701 | py | Python | novaclient/v2/contrib/metadata_extensions.py | dtroyer/python-novaclient | 4c483322fe5454c8ece66cc9c86cbc0702e14368 | [
"Apache-1.1"
] | null | null | null | novaclient/v2/contrib/metadata_extensions.py | dtroyer/python-novaclient | 4c483322fe5454c8ece66cc9c86cbc0702e14368 | [
"Apache-1.1"
] | null | null | null | novaclient/v2/contrib/metadata_extensions.py | dtroyer/python-novaclient | 4c483322fe5454c8ece66cc9c86cbc0702e14368 | [
"Apache-1.1"
] | 1 | 2019-01-11T16:15:52.000Z | 2019-01-11T16:15:52.000Z | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.v2 import contrib
contrib.warn(alternative=False)
| 35.05 | 78 | 0.738944 |
12655df6ba00c529411bb7a97828de8f3d6e2d34 | 1,530 | py | Python | tests/test_rename_custom_annotations.py | opengenomebrowser/opengenomebrowser-tools | b6ef2340b2fd67a61373d1d8a0f3ef71cc892d1e | [
"MIT"
] | null | null | null | tests/test_rename_custom_annotations.py | opengenomebrowser/opengenomebrowser-tools | b6ef2340b2fd67a61373d1d8a0f3ef71cc892d1e | [
"MIT"
] | null | null | null | tests/test_rename_custom_annotations.py | opengenomebrowser/opengenomebrowser-tools | b6ef2340b2fd67a61373d1d8a0f3ef71cc892d1e | [
"MIT"
] | null | null | null | from unittest import TestCase
import os
from opengenomebrowser_tools.rename_custom_annotations import *
ROOT = os.path.dirname(os.path.dirname(__file__))
TMPFILE = '/tmp/renamed_custom_annotations.KG'
custom_files = [
f'{ROOT}/test-data/prokka-bad/custom_annotations.KG',
]
def cleanup():
if os.path.isfile(TMPFILE):
os.remove(TMPFILE)
class Test(TestCase):
def test_detect_locus_tag_prefix(self):
for custom_file in custom_files:
locus_tag_prefix = CustomAnnotationFile(custom_file).detect_locus_tag_prefix()
self.assertEqual(locus_tag_prefix, 'tmp_')
def test_validate_locus_tags(self):
for custom_file in custom_files:
CustomAnnotationFile(custom_file).validate_locus_tags(locus_tag_prefix=None)
CustomAnnotationFile(custom_file).validate_locus_tags(locus_tag_prefix='tmp_')
with self.assertRaises(AssertionError):
CustomAnnotationFile(custom_file).validate_locus_tags(locus_tag_prefix='xxx_')
def test_rename_custom_annotations(self):
for custom_file in custom_files:
cleanup()
CustomAnnotationFile(custom_file).rename(new_locus_tag_prefix='YOLO_', out=TMPFILE, validate=True)
with open(TMPFILE) as f:
content = f.read()
count = content.count('YOLO_')
self.assertNotIn(member='tmp', container=content)
self.assertEqual(count, 3)
@classmethod
def tearDownClass(cls) -> None:
cleanup()
| 34 | 110 | 0.696732 |
2b4df20f0bb072b8c8f72b6e3ebce1e997d800cb | 29,180 | py | Python | codes_src/fairseq/sequence_generator.py | ZhenYangIACAS/WeTS | bba33ad64e10efd7d3d95b5a0b6ad125216542cf | [
"Unlicense"
] | 50 | 2021-11-15T02:34:43.000Z | 2021-11-18T07:24:46.000Z | codes_src/fairseq/sequence_generator.py | yujun531/WeTS | bba33ad64e10efd7d3d95b5a0b6ad125216542cf | [
"Unlicense"
] | null | null | null | codes_src/fairseq/sequence_generator.py | yujun531/WeTS | bba33ad64e10efd7d3d95b5a0b6ad125216542cf | [
"Unlicense"
] | 26 | 2021-11-15T02:35:14.000Z | 2021-11-15T08:25:42.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq import search, utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
stop_early=True,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
sampling=False,
sampling_topk=-1,
sampling_topp=-1.0,
temperature=1.,
diverse_beam_groups=-1,
diverse_beam_strength=0.5,
match_source_len=False,
no_repeat_ngram_size=0,
constraints = False,
prefix_allowed_tokens_fn=None,
):
"""Generates translations of a given source sentence.
Args:
tgt_dict (~fairseq.data.Dictionary): target dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
stop_early (bool, optional): stop generation immediately after we
finalize beam_size hypotheses, even though longer hypotheses
might have better normalized scores (default: True)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
sampling (bool, optional): sample outputs instead of beam search
(default: False)
sampling_topk (int, optional): only sample among the top-k choices
at each step (default: -1)
sampling_topp (float, optional): only sample among the smallest set
of words whose cumulative probability mass exceeds p
at each step (default: -1.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
diverse_beam_groups/strength (float, optional): parameters for
Diverse Beam Search sampling
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.stop_early = stop_early
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
self.constriants = constraints
self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'
assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling'
assert temperature > 0, '--temperature must be greater than 0'
if sampling:
self.search = search.Sampling(tgt_dict, sampling_topk, sampling_topp)
elif diverse_beam_groups > 0:
self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength)
elif match_source_len:
self.search = search.LengthConstrainedBeamSearch(
tgt_dict, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0,
)
elif constraints:
self.search = search.LexicallyConstrainedBeamSearch(
tgt_dict, constraints
)
elif prefix_allowed_tokens_fn:
self.search = search.PrefixConstrainedBeamSearch(
tgt_dict, prefix_allowed_tokens_fn
)
else:
self.search = search.BeamSearch(tgt_dict)
@torch.no_grad()
def generate(
self,
models,
sample,
prefix_tokens=None,
constraints=None,
bos_token=None,
**kwargs
):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
"""
model = EnsembleModel(models)
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support"
)
self.search.init_constraints(constraints, beam_size)
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.data.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn, attn_buf = None, None
nonpad_idxs = None
if prefix_tokens is not None:
partial_prefix_mask_buf = torch.zeros_like(src_lengths).byte()
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
worst_finalized = [{'idx': None, 'score': -math.inf} for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
if self.stop_early or step == max_len or unfinalized_scores is None:
return True
# stop if the best unfinalized score is worse than the worst
# finalized one
best_unfinalized_score = unfinalized_scores[unfin_idx].max()
if self.normalize_scores:
best_unfinalized_score /= max_len ** self.len_penalty
if worst_finalized[sent]['score'] >= best_unfinalized_score:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
unfinalized_scores: A vector containing scores for all
unfinalized hypotheses
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i][nonpad_idxs[sent]]
_, alignment = hypo_attn.max(dim=0)
else:
hypo_attn = None
alignment = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': alignment,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
elif not self.stop_early and score > worst_finalized[sent]['score']:
# replace worst hypo for this sentence with new/better one
worst_idx = worst_finalized[sent]['idx']
if worst_idx is not None:
finalized[sent][worst_idx] = get_hypo()
# find new worst finalized hypo for this sentence
idx, s = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
worst_finalized[sent] = {
'score': s['score'],
'idx': idx,
}
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx, unfinalized_scores):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
nonpad_idxs = src_tokens.ne(self.pad)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if step < max_len:
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
if prefix_tokens is not None and step < prefix_tokens.size(1):
assert isinstance(self.search, search.BeamSearch) or bsz == 1, \
"currently only BeamSearch supports decoding with prefix_tokens"
probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :]
cand_scores = torch.gather(
probs_slice, dim=1,
index=prefix_tokens[:, step].view(-1, 1)
).view(-1, 1).repeat(1, cand_size)
if step > 0:
# save cumulative scores for each hypothesis
cand_scores.add_(scores[:, step - 1].view(bsz, beam_size).repeat(1, 2))
cand_indices = prefix_tokens[:, step].view(-1, 1).repeat(1, cand_size)
cand_beams = torch.zeros_like(cand_indices)
# handle prefixes of different lengths
# when step == prefix_tokens.size(1), we'll have new free-decoding batches
if prefix_tokens is not None and step <= prefix_tokens.size(1):
if step < prefix_tokens.size(1):
partial_prefix_mask = prefix_tokens[:, step].eq(self.pad)
else: # all prefixes finished force-decoding
partial_prefix_mask = torch.ones(bsz).to(prefix_tokens).byte()
if partial_prefix_mask.any():
# track new free-decoding batches, at whose very first step
# only use the first beam to eliminate repeats
prefix_step0_mask = partial_prefix_mask ^ partial_prefix_mask_buf
lprobs.view(bsz, beam_size, -1)[prefix_step0_mask, 1:] = -math.inf
partial_scores, partial_indices, partial_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
cand_scores[partial_prefix_mask] = partial_scores[partial_prefix_mask]
cand_indices[partial_prefix_mask] = partial_indices[partial_prefix_mask]
cand_beams[partial_prefix_mask] = partial_beams[partial_prefix_mask]
partial_prefix_mask_buf = partial_prefix_mask
else:
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, step - 1].unsqueeze(-1))
# finalize all active hypotheses once we hit max_len
# pick the hypothesis with the highest prob of EOS right now
torch.sort(
lprobs[:, self.eos],
descending=True,
out=(eos_scores, eos_bbsz_idx),
)
num_remaining_sent -= len(finalize_hypos(step, eos_bbsz_idx, eos_scores))
assert num_remaining_sent == 0
break
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
eos_mask = cand_indices.eq(self.eos)
finalized_sents = set()
if step >= self.min_len:
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores, cand_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
partial_prefix_mask_buf = partial_prefix_mask_buf[batch_idxs]
src_lengths = src_lengths[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
active_mask = buffer('active_mask')
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, _ignore = buffer('active_hypos'), buffer('_ignore')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(_ignore, active_hypos)
)
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# update constraints based on which candidates were selected for the enxt beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
class EnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.incremental_states = None
if all(isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):
self.incremental_states = {m: {} for m in models}
def has_encoder(self):
return hasattr(self.models[0], 'encoder')
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.encoder(**encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, tokens, encoder_outs, temperature=1.):
if len(self.models) == 1:
return self._decode_one(
tokens,
self.models[0],
encoder_outs[0] if self.has_encoder() else None,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs = []
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(
tokens,
model,
encoder_out,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.decoder(tokens, encoder_out, incremental_state=self.incremental_states[model]))
else:
decoder_out = list(model.decoder(tokens, encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
def reorder_encoder_out(self, encoder_outs, new_order):
if not self.has_encoder():
return
return [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
def reorder_incremental_state(self, new_order):
if self.incremental_states is None:
return
for model in self.models:
model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)
| 43.813814 | 117 | 0.566587 |
ca9b496e1efdce87aedfc759c8eb1eac29adc572 | 407 | py | Python | CommonChecklist/asgi.py | pandeyarjun242/submission | 811fb8ed5854531848aa430c5c998b8e6c463137 | [
"MIT"
] | null | null | null | CommonChecklist/asgi.py | pandeyarjun242/submission | 811fb8ed5854531848aa430c5c998b8e6c463137 | [
"MIT"
] | null | null | null | CommonChecklist/asgi.py | pandeyarjun242/submission | 811fb8ed5854531848aa430c5c998b8e6c463137 | [
"MIT"
] | null | null | null | """
ASGI config for CommonChecklist project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CommonChecklist.settings')
application = get_asgi_application()
| 23.941176 | 78 | 0.793612 |
4470afe5bb7e9fc7c1099e2c187baf09a3868362 | 6,203 | py | Python | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/managed_database_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/managed_database_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/managed_database_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource_py3 import TrackedResource
class ManagedDatabase(TrackedResource):
"""A managed database resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param collation: Collation of the managed database.
:type collation: str
:ivar status: Status for the database. Possible values include: 'Online',
'Offline', 'Shutdown', 'Creating', 'Inaccessible'
:vartype status: str or ~azure.mgmt.sql.models.ManagedDatabaseStatus
:ivar creation_date: Creation date of the database.
:vartype creation_date: datetime
:ivar earliest_restore_point: Earliest restore point in time for point in
time restore.
:vartype earliest_restore_point: datetime
:param restore_point_in_time: Conditional. If createMode is
PointInTimeRestore, this value is required. Specifies the point in time
(ISO8601 format) of the source database that will be restored to create
the new database.
:type restore_point_in_time: datetime
:ivar default_secondary_location: Geo paired region.
:vartype default_secondary_location: str
:param catalog_collation: Collation of the metadata catalog. Possible
values include: 'DATABASE_DEFAULT', 'SQL_Latin1_General_CP1_CI_AS'
:type catalog_collation: str or
~azure.mgmt.sql.models.CatalogCollationType
:param create_mode: Managed database create mode. PointInTimeRestore:
Create a database by restoring a point in time backup of an existing
database. SourceDatabaseName, SourceManagedInstanceName and PointInTime
must be specified. RestoreExternalBackup: Create a database by restoring
from external backup files. Collation, StorageContainerUri and
StorageContainerSasToken must be specified. Possible values include:
'Default', 'RestoreExternalBackup', 'PointInTimeRestore'
:type create_mode: str or ~azure.mgmt.sql.models.ManagedDatabaseCreateMode
:param storage_container_uri: Conditional. If createMode is
RestoreExternalBackup, this value is required. Specifies the uri of the
storage container where backups for this restore are stored.
:type storage_container_uri: str
:param source_database_id: The resource identifier of the source database
associated with create operation of this database.
:type source_database_id: str
:param storage_container_sas_token: Conditional. If createMode is
RestoreExternalBackup, this value is required. Specifies the storage
container sas token.
:type storage_container_sas_token: str
:ivar failover_group_id: Instance Failover Group resource identifier that
this managed database belongs to.
:vartype failover_group_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'status': {'readonly': True},
'creation_date': {'readonly': True},
'earliest_restore_point': {'readonly': True},
'default_secondary_location': {'readonly': True},
'failover_group_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'collation': {'key': 'properties.collation', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'creation_date': {'key': 'properties.creationDate', 'type': 'iso-8601'},
'earliest_restore_point': {'key': 'properties.earliestRestorePoint', 'type': 'iso-8601'},
'restore_point_in_time': {'key': 'properties.restorePointInTime', 'type': 'iso-8601'},
'default_secondary_location': {'key': 'properties.defaultSecondaryLocation', 'type': 'str'},
'catalog_collation': {'key': 'properties.catalogCollation', 'type': 'str'},
'create_mode': {'key': 'properties.createMode', 'type': 'str'},
'storage_container_uri': {'key': 'properties.storageContainerUri', 'type': 'str'},
'source_database_id': {'key': 'properties.sourceDatabaseId', 'type': 'str'},
'storage_container_sas_token': {'key': 'properties.storageContainerSasToken', 'type': 'str'},
'failover_group_id': {'key': 'properties.failoverGroupId', 'type': 'str'},
}
def __init__(self, *, location: str, tags=None, collation: str=None, restore_point_in_time=None, catalog_collation=None, create_mode=None, storage_container_uri: str=None, source_database_id: str=None, storage_container_sas_token: str=None, **kwargs) -> None:
super(ManagedDatabase, self).__init__(location=location, tags=tags, **kwargs)
self.collation = collation
self.status = None
self.creation_date = None
self.earliest_restore_point = None
self.restore_point_in_time = restore_point_in_time
self.default_secondary_location = None
self.catalog_collation = catalog_collation
self.create_mode = create_mode
self.storage_container_uri = storage_container_uri
self.source_database_id = source_database_id
self.storage_container_sas_token = storage_container_sas_token
self.failover_group_id = None
| 50.024194 | 263 | 0.684991 |
dbf8977714d81f9e4c7ab447488de3c4e10b0795 | 21,146 | py | Python | src/rubrix/client/api.py | leireropl/rubrix | c1e14f79b0284300a27be482f6edcc6d4c9a220d | [
"Apache-2.0"
] | null | null | null | src/rubrix/client/api.py | leireropl/rubrix | c1e14f79b0284300a27be482f6edcc6d4c9a220d | [
"Apache-2.0"
] | null | null | null | src/rubrix/client/api.py | leireropl/rubrix | c1e14f79b0284300a27be482f6edcc6d4c9a220d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import re
from asyncio import Future
from functools import wraps
from inspect import signature
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import pandas
from tqdm.auto import tqdm
from rubrix._constants import (
DATASET_NAME_REGEX_PATTERN,
DEFAULT_API_KEY,
RUBRIX_WORKSPACE_HEADER_NAME,
)
from rubrix.client.datasets import (
Dataset,
DatasetForText2Text,
DatasetForTextClassification,
DatasetForTokenClassification,
)
from rubrix.client.metrics.models import MetricResults
from rubrix.client.models import (
BulkResponse,
Record,
Text2TextRecord,
TextClassificationRecord,
TokenClassificationRecord,
)
from rubrix.client.sdk.client import AuthenticatedClient
from rubrix.client.sdk.commons.api import async_bulk, bulk
from rubrix.client.sdk.commons.errors import RubrixClientError
from rubrix.client.sdk.datasets import api as datasets_api
from rubrix.client.sdk.datasets.models import CopyDatasetRequest, TaskType
from rubrix.client.sdk.metrics import api as metrics_api
from rubrix.client.sdk.metrics.models import MetricInfo
from rubrix.client.sdk.text2text import api as text2text_api
from rubrix.client.sdk.text2text.models import (
CreationText2TextRecord,
Text2TextBulkData,
Text2TextQuery,
)
from rubrix.client.sdk.text_classification import api as text_classification_api
from rubrix.client.sdk.text_classification.models import (
CreationTextClassificationRecord,
LabelingRule,
LabelingRuleMetricsSummary,
TextClassificationBulkData,
TextClassificationQuery,
)
from rubrix.client.sdk.token_classification import api as token_classification_api
from rubrix.client.sdk.token_classification.models import (
CreationTokenClassificationRecord,
TokenClassificationBulkData,
TokenClassificationQuery,
)
from rubrix.client.sdk.users.api import whoami
from rubrix.client.sdk.users.models import User
from rubrix.utils import setup_loop_in_thread
_LOGGER = logging.getLogger(__name__)
_WARNED_ABOUT_AS_PANDAS = False
class _RubrixLogAgent:
def __init__(self, api: "Api"):
self.__api__ = api
self.__loop__, self.__thread__ = setup_loop_in_thread()
@staticmethod
async def __log_internal__(api: "Api", *args, **kwargs):
try:
return await api.log_async(*args, **kwargs)
except Exception as ex:
_LOGGER.error(
f"Cannot log data {args, kwargs}\n"
f"Error of type {type(ex)}\n: {ex}. ({ex.args})"
)
raise ex
def log(self, *args, **kwargs) -> Future:
return asyncio.run_coroutine_threadsafe(
self.__log_internal__(self.__api__, *args, **kwargs), self.__loop__
)
def __del__(self):
self.__loop__.stop()
del self.__loop__
del self.__thread__
class Api:
# Larger sizes will trigger a warning
_MAX_CHUNK_SIZE = 5000
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
workspace: Optional[str] = None,
timeout: int = 60,
):
"""Init the Python client.
We will automatically init a default client for you when calling other client methods.
The arguments provided here will overwrite your corresponding environment variables.
Args:
api_url: Address of the REST API. If `None` (default) and the env variable ``RUBRIX_API_URL`` is not set,
it will default to `http://localhost:6900`.
api_key: Authentification key for the REST API. If `None` (default) and the env variable ``RUBRIX_API_KEY``
is not set, it will default to `rubrix.apikey`.
workspace: The workspace to which records will be logged/loaded. If `None` (default) and the
env variable ``RUBRIX_WORKSPACE`` is not set, it will default to the private user workspace.
timeout: Wait `timeout` seconds for the connection to timeout. Default: 60.
Examples:
>>> import rubrix as rb
>>> rb.init(api_url="http://localhost:9090", api_key="4AkeAPIk3Y")
"""
api_url = api_url or os.getenv("RUBRIX_API_URL", "http://localhost:6900")
# Checking that the api_url does not end in '/'
api_url = re.sub(r"\/$", "", api_url)
api_key = api_key or os.getenv("RUBRIX_API_KEY", DEFAULT_API_KEY)
workspace = workspace or os.getenv("RUBRIX_WORKSPACE")
self._client: AuthenticatedClient = AuthenticatedClient(
base_url=api_url, token=api_key, timeout=timeout
)
self._user: User = whoami(client=self._client)
if workspace is not None:
self.set_workspace(workspace)
self._agent = _RubrixLogAgent(self)
@property
def client(self):
"""The underlying authenticated client"""
return self._client
def set_workspace(self, workspace: str):
"""Sets the active workspace.
Args:
workspace: The new workspace
"""
if workspace is None:
raise Exception("Must provide a workspace")
if workspace != self.get_workspace():
if workspace == self._user.username:
self._client.headers.pop(RUBRIX_WORKSPACE_HEADER_NAME, workspace)
elif (
self._user.workspaces is not None
and workspace not in self._user.workspaces
):
raise Exception(f"Wrong provided workspace {workspace}")
self._client.headers[RUBRIX_WORKSPACE_HEADER_NAME] = workspace
def get_workspace(self) -> str:
"""Returns the name of the active workspace.
Returns:
The name of the active workspace as a string.
"""
return self._client.headers.get(
RUBRIX_WORKSPACE_HEADER_NAME, self._user.username
)
def copy(self, dataset: str, name_of_copy: str, workspace: str = None):
"""Creates a copy of a dataset including its tags and metadata
Args:
dataset: Name of the source dataset
name_of_copy: Name of the copied dataset
workspace: If provided, dataset will be copied to that workspace
Examples:
>>> import rubrix as rb
>>> rb.copy("my_dataset", name_of_copy="new_dataset")
>>> rb.load("new_dataset")
"""
response = datasets_api.copy_dataset(
client=self._client,
name=dataset,
json_body=CopyDatasetRequest(name=name_of_copy, target_workspace=workspace),
)
if response.status_code == 409:
raise RuntimeError(f"A dataset with name '{name_of_copy}' already exists.")
def delete(self, name: str) -> None:
"""Deletes a dataset.
Args:
name: The dataset name.
Examples:
>>> import rubrix as rb
>>> rb.delete(name="example-dataset")
"""
datasets_api.delete_dataset(client=self._client, name=name)
def log(
self,
records: Union[Record, Iterable[Record], Dataset],
name: str,
tags: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Any]] = None,
chunk_size: int = 500,
verbose: bool = True,
background: bool = False,
) -> Union[BulkResponse, Future]:
"""Logs Records to Rubrix.
The logging happens asynchronously in a background thread.
Args:
records: The record, an iterable of records, or a dataset to log.
name: The dataset name.
tags: A dictionary of tags related to the dataset.
metadata: A dictionary of extra info for the dataset.
chunk_size: The chunk size for a data bulk.
verbose: If True, shows a progress bar and prints out a quick summary at the end.
background: If True, we will NOT wait for the logging process to finish and return an ``asyncio.Future``
object. You probably want to set ``verbose`` to False in that case.
Returns:
Summary of the response from the REST API.
If the ``background`` argument is set to True, an ``asyncio.Future`` will be returned instead.
Examples:
>>> import rubrix as rb
>>> record = rb.TextClassificationRecord(
... text="my first rubrix example",
... prediction=[('spam', 0.8), ('ham', 0.2)]
... )
>>> rb.log(record, name="example-dataset")
1 records logged to http://localhost:6900/datasets/rubrix/example-dataset
BulkResponse(dataset='example-dataset', processed=1, failed=0)
>>>
>>> # Logging records in the background
>>> rb.log(record, name="example-dataset", background=True, verbose=False)
<Future at 0x7f675a1fffa0 state=pending>
"""
future = self._agent.log(
records=records,
name=name,
tags=tags,
metadata=metadata,
chunk_size=chunk_size,
verbose=verbose,
)
if background:
return future
return future.result()
async def log_async(
self,
records: Union[Record, Iterable[Record], Dataset],
name: str,
tags: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Any]] = None,
chunk_size: int = 500,
verbose: bool = True,
) -> BulkResponse:
"""Logs Records to Rubrix with asyncio.
Args:
records: The record, an iterable of records, or a dataset to log.
name: The dataset name.
tags: A dictionary of tags related to the dataset.
metadata: A dictionary of extra info for the dataset.
chunk_size: The chunk size for a data bulk.
verbose: If True, shows a progress bar and prints out a quick summary at the end.
Returns:
Summary of the response from the REST API
Examples:
>>> # Log asynchronously from your notebook
>>> import asyncio
>>> import rubrix as rb
>>> from rubrix.utils import setup_loop_in_thread
>>> loop, _ = setup_loop_in_thread()
>>> future_response = asyncio.run_coroutine_threadsafe(
... rb.log_async(my_records, dataset_name), loop
... )
"""
tags = tags or {}
metadata = metadata or {}
if not name:
raise InputValueError("Empty dataset name has been passed as argument.")
if not re.match(DATASET_NAME_REGEX_PATTERN, name):
raise InputValueError(
f"Provided dataset name {name} does not match the pattern {DATASET_NAME_REGEX_PATTERN}. "
"Please, use a valid name for your dataset"
)
if chunk_size > self._MAX_CHUNK_SIZE:
_LOGGER.warning(
"""The introduced chunk size is noticeably large, timeout errors may occur.
Consider a chunk size smaller than %s""",
self._MAX_CHUNK_SIZE,
)
if isinstance(records, Record.__args__):
records = [records]
records = list(records)
try:
record_type = type(records[0])
except IndexError:
raise InputValueError("Empty record list has been passed as argument.")
if record_type is TextClassificationRecord:
bulk_class = TextClassificationBulkData
creation_class = CreationTextClassificationRecord
elif record_type is TokenClassificationRecord:
bulk_class = TokenClassificationBulkData
creation_class = CreationTokenClassificationRecord
elif record_type is Text2TextRecord:
bulk_class = Text2TextBulkData
creation_class = CreationText2TextRecord
else:
raise InputValueError(
f"Unknown record type {record_type}. Available values are {Record.__args__}"
)
processed, failed = 0, 0
progress_bar = tqdm(total=len(records), disable=not verbose)
for i in range(0, len(records), chunk_size):
chunk = records[i : i + chunk_size]
response = await async_bulk(
client=self._client,
name=name,
json_body=bulk_class(
tags=tags,
metadata=metadata,
records=[creation_class.from_client(r) for r in chunk],
),
)
processed += response.parsed.processed
failed += response.parsed.failed
progress_bar.update(len(chunk))
progress_bar.close()
# TODO: improve logging policy in library
if verbose:
_LOGGER.info(
f"Processed {processed} records in dataset {name}. Failed: {failed}"
)
workspace = self.get_workspace()
if (
not workspace
): # Just for backward comp. with datasets with no workspaces
workspace = "-"
print(
f"{processed} records logged to {self._client.base_url}/datasets/{workspace}/{name}"
)
# Creating a composite BulkResponse with the total processed and failed
return BulkResponse(dataset=name, processed=processed, failed=failed)
def load(
self,
name: str,
query: Optional[str] = None,
ids: Optional[List[Union[str, int]]] = None,
limit: Optional[int] = None,
as_pandas: bool = True,
) -> Union[pandas.DataFrame, Dataset]:
"""Loads a dataset as a pandas DataFrame or a Dataset.
Args:
name: The dataset name.
query: An ElasticSearch query with the
`query string syntax <https://rubrix.readthedocs.io/en/stable/guides/queries.html>`_
ids: If provided, load dataset records with given ids.
limit: The number of records to retrieve.
as_pandas: If True, return a pandas DataFrame. If False, return a Dataset.
Returns:
The dataset as a pandas Dataframe or a Dataset.
Examples:
>>> import rubrix as rb
>>> dataframe = rb.load(name="example-dataset")
>>> dataset = rb.load(name="example-dataset")
"""
response = datasets_api.get_dataset(client=self._client, name=name)
task = response.parsed.task
task_config = {
TaskType.text_classification: (
text_classification_api.data,
TextClassificationQuery,
DatasetForTextClassification,
),
TaskType.token_classification: (
token_classification_api.data,
TokenClassificationQuery,
DatasetForTokenClassification,
),
TaskType.text2text: (
text2text_api.data,
Text2TextQuery,
DatasetForText2Text,
),
}
try:
get_dataset_data, request_class, dataset_class = task_config[task]
except KeyError:
raise ValueError(
f"Load method not supported for the '{task}' task. Supported tasks: "
f"{[TaskType.text_classification, TaskType.token_classification, TaskType.text2text]}"
)
response = get_dataset_data(
client=self._client,
name=name,
request=request_class(ids=ids, query_text=query),
limit=limit,
)
records = [sdk_record.to_client() for sdk_record in response.parsed]
try:
records_sorted_by_id = sorted(records, key=lambda x: x.id)
# record ids can be a mix of int/str -> sort all as str type
except TypeError:
records_sorted_by_id = sorted(records, key=lambda x: str(x.id))
dataset = dataset_class(records_sorted_by_id)
global _WARNED_ABOUT_AS_PANDAS
if not _WARNED_ABOUT_AS_PANDAS:
_LOGGER.warning(
"The argument 'as_pandas' in `rb.load` will be deprecated in the future, and we will always return a `Dataset`. "
"To emulate the future behavior set `as_pandas=False`. To get a pandas DataFrame, call `Dataset.to_pandas()`"
)
_WARNED_ABOUT_AS_PANDAS = True
if as_pandas:
return dataset.to_pandas()
return dataset
def dataset_metrics(self, name: str) -> List[MetricInfo]:
response = datasets_api.get_dataset(self._client, name)
response = metrics_api.get_dataset_metrics(
self._client, name=name, task=response.parsed.task
)
return response.parsed
def get_metric(self, name: str, metric: str) -> Optional[MetricInfo]:
metrics = self.dataset_metrics(name)
for metric_ in metrics:
if metric_.id == metric:
return metric_
def compute_metric(
self,
name: str,
metric: str,
query: Optional[str] = None,
interval: Optional[float] = None,
size: Optional[int] = None,
) -> MetricResults:
response = datasets_api.get_dataset(self._client, name)
metric_ = self.get_metric(name, metric=metric)
assert metric_ is not None, f"Metric {metric} not found !!!"
response = metrics_api.compute_metric(
self._client,
name=name,
task=response.parsed.task,
metric=metric,
query=query,
interval=interval,
size=size,
)
return MetricResults(**metric_.dict(), results=response.parsed)
def fetch_dataset_labeling_rules(self, dataset: str) -> List[LabelingRule]:
response = text_classification_api.fetch_dataset_labeling_rules(
self._client, name=dataset
)
return [LabelingRule.parse_obj(data) for data in response.parsed]
def rule_metrics_for_dataset(
self, dataset: str, rule: LabelingRule
) -> LabelingRuleMetricsSummary:
response = text_classification_api.dataset_rule_metrics(
self._client, name=dataset, query=rule.query, label=rule.label
)
return LabelingRuleMetricsSummary.parse_obj(response.parsed)
__ACTIVE_API__: Optional[Api] = None
def active_api() -> Api:
"""Returns the active API.
If Active API is None, initialize a default one.
"""
global __ACTIVE_API__
if __ACTIVE_API__ is None:
__ACTIVE_API__ = Api()
return __ACTIVE_API__
def api_wrapper(api_method: Callable):
"""Decorator to wrap the API methods in module functions.
Propagates the docstrings and adapts the signature of the methods.
"""
def decorator(func):
if asyncio.iscoroutinefunction(api_method):
@wraps(api_method)
async def wrapped_func(*args, **kwargs):
return await func(*args, **kwargs)
else:
@wraps(api_method)
def wrapped_func(*args, **kwargs):
return func(*args, **kwargs)
sign = signature(api_method)
wrapped_func.__signature__ = sign.replace(
parameters=[val for key, val in sign.parameters.items() if key != "self"]
)
return wrapped_func
return decorator
@api_wrapper(Api.__init__)
def init(*args, **kwargs):
global __ACTIVE_API__
__ACTIVE_API__ = Api(*args, **kwargs)
@api_wrapper(Api.set_workspace)
def set_workspace(*args, **kwargs):
return active_api().set_workspace(*args, **kwargs)
@api_wrapper(Api.get_workspace)
def get_workspace(*args, **kwargs):
return active_api().get_workspace(*args, **kwargs)
@api_wrapper(Api.copy)
def copy(*args, **kwargs):
return active_api().copy(*args, **kwargs)
@api_wrapper(Api.delete)
def delete(*args, **kwargs):
return active_api().delete(*args, **kwargs)
@api_wrapper(Api.log)
def log(*args, **kwargs):
return active_api().log(*args, **kwargs)
@api_wrapper(Api.log_async)
def log_async(*args, **kwargs):
return active_api().log_async(*args, **kwargs)
@api_wrapper(Api.load)
def load(*args, **kwargs):
return active_api().load(*args, **kwargs)
class InputValueError(RubrixClientError):
pass
| 34.552288 | 129 | 0.62338 |
c0b6eb0cba5309a7e98ee50c72f15fd330010a1b | 9,269 | py | Python | hubblestack/audit/service.py | buddwm/hubble | b384ee48556ca144ae6f09dd0b45db29288e5293 | [
"Apache-2.0"
] | 363 | 2017-01-10T22:02:47.000Z | 2022-03-21T10:44:40.000Z | hubblestack/audit/service.py | buddwm/hubble | b384ee48556ca144ae6f09dd0b45db29288e5293 | [
"Apache-2.0"
] | 439 | 2017-01-12T22:39:42.000Z | 2021-10-11T18:43:28.000Z | hubblestack/audit/service.py | buddwm/hubble | b384ee48556ca144ae6f09dd0b45db29288e5293 | [
"Apache-2.0"
] | 138 | 2017-01-05T22:10:59.000Z | 2021-09-01T14:35:00.000Z | # -*- encoding: utf-8 -*-
"""
Module for running service command. Same can be used in both Audit/FDG
Note: Earlier systemctl module is also merged into this module only
Note: Now each module just returns its output (As Data gathering)
For Audit checks, comparison logic is now moved to comparators.
See below sections for more understanding
Usable in Modules
-----------------
- Audit
- FDG
Common Schema
-------------
- check_unique_id
Its a unique string within a yaml file.
It is present on top of a yaml block
- description
Description of the check
- tag
(Applicable only for Audit)
Check tag value
- sub_check (Optional, default: false)
(Applicable only for Audit)
If true, its individual result will not be counted in compliance
It might be referred in some boolean expression
- failure_reason (Optional)
(Applicable only for Audit)
By default, module will generate failure reason string at runtime
If this is passed, this will override module's actual failure reason
- invert_result (Optional, default: false)
(Applicable only for Audit)
This is used to flip the boolean output from a check
- implementations
(Applicable only for Audit)
Its an array of implementations, usually for multiple operating systems.
You can specify multiple implementations here for respective operating system.
Either one or none will be executed.
- grains (under filter)
(Applicable only for Audit)
Any grains with and/or/not supported. This is used to filter whether
this check can run on the current OS or not.
To run this check on all OS, put a '*'
Example:
G@docker_details:installed:True and G@docker_details:running:True and not G@osfinger:*Flatcar* and not G@osfinger:*CoreOS*
- hubble_version (Optional)
(Applicable only for Audit)
It acts as a second level filter where you can specify for which Hubble version,
this check is compatible with. You can specify a boolean expression as well
Example:
'>3.0 AND <5.0'
- module
The name of Hubble module.
- return_no_exec (Optional, Default: false)
(Applicable only for Audit)
It takes a boolean (true/false) value.
If its true, the implementation will not be executed. And true is returned
This can be useful in cases where you don't have any implementation for some OS,
and you want a result from the block. Else, your meta-check(bexpr) will be failed.
- items
(Applicable only for Audit)
An array of multiple module implementations. At least one block is necessary.
Each item in array will result into a boolean value.
If multiple module implementations exists, final result will be evaluated as
boolean AND (default, see parameter: check_eval_logic)
- check_eval_logic (Optional, default: and)
(Applicable only for Audit)
If there are multiple module implementations in "items" (above parameter), this parameter
helps in evaluating their result. Default value is "and"
It accepts only values: and/or
- args
Arguments specific to a module.
- comparator
For the purpose of comparing output of module with expected values.
Parameters depends upon the comparator used.
For detailed documentation on comparators,
read comparator's implementations at (/hubblestack/extmods/comparators/)
FDG Schema
----------
FDG schema is kept simple. Only following keywords allowed:
- Unique id
Unique string id
- description (Optional)
Some description
- module
Name of the module
- args
Module arguments
- comparator (Only in case of Audit-FDG connector)
FDG Chaining
------------
In normal execution, this module expects a service name ('*' supported)
In case of chaining, it expects service name from the chained parameter
Module Arguments
----------------
- name
Name of service. '*' is supported in name.
Module Output
-------------
Array of matched services with their statuses.
Example:
[
{name: "service1", "running": True, "enabled": True}
{name: "service2", "running": True, "enabled": False}
]
where,
running: indicates whether service is running or not
enabled: whether services is enabled to start on boot time or not
This flag will be used for systemctl module as alternate
Output: (True, "Above dictionary")
Note: Module returns a tuple
First value being the status of module
Second value is the actual output from module
Compatible Comparators
----------------------
Since output is pretty dynamic. Following comparators can be used:
- list
For detailed documentation on comparators,
read comparator's implementations at (/hubblestack/extmods/comparators/)
Audit Example:
---------------
check_id:
description: 'sample description'
tag: 'ADOBE-00041'
sub_check: false (Optional, default: false)
failure_reason: 'a sample failure reason' (Optional)
invert_result: false (Optional, default: false)
implementations:
- filter:
grains: 'G@osfinger:CentOS*Linux-7'
hubble_version: '>3 AND <7 AND <8'
# return_no_exec: true (Optional, default: false)
check_eval_logic: and (Optional, default: and)
module: service
items:
- args:
name: 'abc*'
comparator:
type: "list"
match_any:
- name: abc2
status: true
- name: xyz
status: false
FDG Example:
------------
main:
description: 'service'
module: service
args:
name: 'abc*'
"""
import logging
import fnmatch
import hubblestack.module_runner.runner_utils as runner_utils
from hubblestack.exceptions import HubbleCheckValidationError
log = logging.getLogger(__name__)
def validate_params(block_id, block_dict, extra_args=None):
"""
Validate all mandatory params required for this module
:param block_id:
id of the block
:param block_dict:
parameter for this module
:param extra_args:
Extra argument dictionary, (If any)
Example: {'chaining_args': {'result': "/some/path/file.txt", 'status': True},
'caller': 'Audit'}
Raises:
HubbleCheckValidationError: For any validation error
"""
log.debug('Module: service Start validating params for check-id: {0}'.format(block_id))
# fetch required param
error = {}
name = runner_utils.get_chained_param(extra_args)
if not name:
name = runner_utils.get_param_for_module(block_id, block_dict, 'name')
if not name:
error['name'] = 'Mandatory parameter: name not found for id: %s' % (block_id)
if error:
raise HubbleCheckValidationError(error)
log.debug('Validation success for check-id: {0}'.format(block_id))
def execute(block_id, block_dict, extra_args=None):
"""
Execute the module
:param block_id:
id of the block
:param block_dict:
parameter for this module
:param extra_args:
Extra argument dictionary, (If any)
Example: {'chaining_args': {'result': "/some/path/file.txt", 'status': True},
'caller': 'Audit'}
returns:
tuple of result(value) and status(boolean)
"""
log.debug('Executing stat module for id: {0}'.format(block_id))
# fetch required param
name = runner_utils.get_chained_param(extra_args)
if not name:
name = runner_utils.get_param_for_module(block_id, block_dict, 'name')
result = []
matched_services = fnmatch.filter(__mods__['service.get_all'](), name)
for matched_service in matched_services:
service_status = __mods__['service.status'](matched_service)
is_enabled = __mods__['service.enabled'](matched_service)
result.append({
"name": matched_service,
"running": service_status,
"enabled": is_enabled
})
return runner_utils.prepare_positive_result_for_module(block_id, result)
def get_filtered_params_to_log(block_id, block_dict, extra_args=None):
"""
For getting params to log, in non-verbose logging
:param block_id:
id of the block
:param block_dict:
parameter for this module
:param extra_args:
Extra argument dictionary, (If any)
Example: {'chaining_args': {'result': "/some/path/file.txt", 'status': True},
'caller': 'Audit'}
"""
log.debug('get_filtered_params_to_log for id: {0}'.format(block_id))
# fetch required param
name = runner_utils.get_chained_param(extra_args)
if not name:
name = runner_utils.get_param_for_module(block_id, block_dict, 'name')
return {'name': name}
def get_failure_reason(block_id, block_dict, extra_args=None):
"""
The function is used to find the action that was performed during the audit check
:param block_id:
id of the block
:param block_dict:
parameter for this module
:param extra_args:
Extra argument dictionary, (If any)
Example: {'chaining_args': {'result': "/some/path/file.txt", 'status': True},
'caller': 'Audit'}
:return:
"""
name = runner_utils.get_param_for_module(block_id, block_dict, 'name')
return "Fetching service information for '{0}'".format(name)
| 31.104027 | 126 | 0.682814 |
7225e39c09496534d23fe8749debd4eeba6a8d36 | 8,256 | py | Python | lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_switch_controller_sflow.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_switch_controller_sflow.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_switch_controller_sflow.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_sflow
short_description: Configure FortiSwitch sFlow in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and sflow category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.9"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
switch_controller_sflow:
description:
- Configure FortiSwitch sFlow.
default: null
type: dict
suboptions:
collector_ip:
description:
- Collector IP.
type: str
collector_port:
description:
- SFlow collector port (0 - 65535).
type: int
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure FortiSwitch sFlow.
fortios_switch_controller_sflow:
vdom: "{{ vdom }}"
switch_controller_sflow:
collector_ip: "<your_own_value>"
collector_port: "4"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_switch_controller_sflow_data(json):
option_list = ['collector_ip', 'collector_port']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_sflow(data, fos):
vdom = data['vdom']
switch_controller_sflow_data = data['switch_controller_sflow']
filtered_data = underscore_to_hyphen(filter_switch_controller_sflow_data(switch_controller_sflow_data))
return fos.set('switch-controller',
'sflow',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_sflow']:
resp = switch_controller_sflow(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('switch_controller_sflow'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = None
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"switch_controller_sflow": {
"required": False, "type": "dict", "default": None,
"options": {
"collector_ip": {"required": False, "type": "str"},
"collector_port": {"required": False, "type": "int"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 31.154717 | 137 | 0.677204 |
43a01f6f2d5a3896a1e37fbcfcf2207d623f989e | 325 | py | Python | reactivestreams/__init__.py | aksalj/rsocket-py | dd9d554621c60965efbaf18caa8a290da1680d53 | [
"MIT"
] | 50 | 2018-08-11T06:51:47.000Z | 2022-03-31T20:22:58.000Z | reactivestreams/__init__.py | linux-china/rsocket-py | 77ab3f826cb78c5e30de6035b818218bca787297 | [
"MIT"
] | 21 | 2017-04-17T21:44:40.000Z | 2022-03-31T09:20:49.000Z | reactivestreams/__init__.py | JIAWea/rsocket-py | c264f60eb1dcb3edff09b220dd049a94ff0771b8 | [
"MIT"
] | 21 | 2017-05-30T20:40:32.000Z | 2022-02-01T22:11:01.000Z | """
ReactiveStreams
~~~~~~~~~~~~~~~
Abstract base class definitions for ReactiveStreams Publisher/Subsciber.
"""
from reactivestreams.publisher import Publisher
from reactivestreams.subscriber import Subscriber
from reactivestreams.subscription import Subscription
def reactivestreams():
return 'reactivestreams-0.1'
| 21.666667 | 72 | 0.790769 |
cc2ee40cfc15df0fe44c1ed4549255d296eb785e | 333 | py | Python | cbsdng/schemas/plan.py | mekanix/cbsdng-backend | 0c0058521ffb97b0dd9948a257290c5489ae5216 | [
"BSD-2-Clause"
] | null | null | null | cbsdng/schemas/plan.py | mekanix/cbsdng-backend | 0c0058521ffb97b0dd9948a257290c5489ae5216 | [
"BSD-2-Clause"
] | 5 | 2020-10-19T21:54:55.000Z | 2020-11-20T13:02:49.000Z | cbsdng/schemas/plan.py | mekanix/cbsdng-backend | 0c0058521ffb97b0dd9948a257290c5489ae5216 | [
"BSD-2-Clause"
] | 1 | 2020-10-10T18:00:39.000Z | 2020-10-10T18:00:39.000Z | import sys
from freenit.schemas.base import BaseSchema
from freenit.schemas.paging import PageOutSchema
from marshmallow import fields
class PlanSchema(BaseSchema):
id = fields.Integer(description='ID', dump_only=True)
name = fields.String()
memory = fields.Integer()
PageOutSchema(PlanSchema, sys.modules[__name__])
| 23.785714 | 57 | 0.777778 |
c8638cc73a32ef61ab440922c730244e1f1f7e31 | 3,238 | py | Python | sentry_sdk/integrations/django/templates.py | rik/sentry-python | b72292c3575c0b115bb684f1f374fa333e365a78 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/django/templates.py | rik/sentry-python | b72292c3575c0b115bb684f1f374fa333e365a78 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/django/templates.py | rik/sentry-python | b72292c3575c0b115bb684f1f374fa333e365a78 | [
"BSD-2-Clause"
] | null | null | null | from django.template import TemplateSyntaxError # type: ignore
MYPY = False
if MYPY:
from typing import Any
from typing import Dict
from typing import Optional
try:
# support Django 1.9
from django.template.base import Origin # type: ignore
except ImportError:
# backward compatibility
from django.template.loader import LoaderOrigin as Origin # type: ignore
def get_template_frame_from_exception(exc_value):
# type: (Optional[BaseException]) -> Optional[Dict[str, Any]]
# As of Django 1.9 or so the new template debug thing showed up.
if hasattr(exc_value, "template_debug"):
return _get_template_frame_from_debug(exc_value.template_debug) # type: ignore
# As of r16833 (Django) all exceptions may contain a
# ``django_template_source`` attribute (rather than the legacy
# ``TemplateSyntaxError.source`` check)
if hasattr(exc_value, "django_template_source"):
return _get_template_frame_from_source(
exc_value.django_template_source # type: ignore
)
if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, "source"):
source = exc_value.source
if isinstance(source, (tuple, list)) and isinstance(source[0], Origin):
return _get_template_frame_from_source(source)
return None
def _get_template_frame_from_debug(debug):
# type: (Dict[str, Any]) -> Dict[str, Any]
if debug is None:
return None
lineno = debug["line"]
filename = debug["name"]
if filename is None:
filename = "<django template>"
pre_context = []
post_context = []
context_line = None
for i, line in debug["source_lines"]:
if i < lineno:
pre_context.append(line)
elif i > lineno:
post_context.append(line)
else:
context_line = line
return {
"filename": filename,
"lineno": lineno,
"pre_context": pre_context[-5:],
"post_context": post_context[:5],
"context_line": context_line,
"in_app": True,
}
def _linebreak_iter(template_source):
yield 0
p = template_source.find("\n")
while p >= 0:
yield p + 1
p = template_source.find("\n", p + 1)
def _get_template_frame_from_source(source):
if not source:
return None
origin, (start, end) = source
filename = getattr(origin, "loadname", None)
if filename is None:
filename = "<django template>"
template_source = origin.reload()
lineno = None
upto = 0
pre_context = []
post_context = []
context_line = None
for num, next in enumerate(_linebreak_iter(template_source)):
line = template_source[upto:next]
if start >= upto and end <= next:
lineno = num
context_line = line
elif lineno is None:
pre_context.append(line)
else:
post_context.append(line)
upto = next
if context_line is None or lineno is None:
return None
return {
"filename": filename,
"lineno": lineno,
"pre_context": pre_context[-5:],
"post_context": post_context[:5],
"context_line": context_line,
}
| 27.675214 | 87 | 0.634033 |
bfe6003c8e6ebe10d36e34c932517c16ee1b6bc0 | 10,907 | py | Python | contrib/testgen/gen_key_io_test_vectors.py | BakedInside/test | c411891206e72c0da9c9f7a69a2183703b71a988 | [
"MIT"
] | null | null | null | contrib/testgen/gen_key_io_test_vectors.py | BakedInside/test | c411891206e72c0da9c9f7a69a2183703b71a988 | [
"MIT"
] | null | null | null | contrib/testgen/gen_key_io_test_vectors.py | BakedInside/test | c411891206e72c0da9c9f7a69a2183703b71a988 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2012-2018 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58/bech32(m) address and private key test vectors.
Usage:
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 70 > ../../src/test/data/key_io_valid.json
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 70 > ../../src/test/data/key_io_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET, Encoding
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PUBKEY_ADDRESS_REGTEST = 111
SCRIPT_ADDRESS_REGTEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
PRIVKEY_REGTEST = 239
# script
OP_0 = 0x00
OP_1 = 0x51
OP_2 = 0x52
OP_3 = 0x53
OP_16 = 0x60
OP_DUP = 0x76
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_HASH160 = 0xa9
OP_CHECKSIG = 0xac
pubkey_prefix = (OP_DUP, OP_HASH160, 20)
pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG)
script_prefix = (OP_HASH160, 20)
script_suffix = (OP_EQUAL,)
p2wpkh_prefix = (OP_0, 20)
p2wsh_prefix = (OP_0, 32)
p2tr_prefix = (OP_1, 32)
metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata, output_prefix, output_suffix
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix),
((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()),
((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'signet', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'signet', True, None), (), ()),
((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()),
((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ())
]
# templates for valid bech32 sequences
bech32_templates = [
# hrp, version, witprog_size, metadata, encoding, output_prefix
('bc', 0, 20, (False, 'main', None, True), Encoding.BECH32, p2wpkh_prefix),
('bc', 0, 32, (False, 'main', None, True), Encoding.BECH32, p2wsh_prefix),
('bc', 1, 32, (False, 'main', None, True), Encoding.BECH32M, p2tr_prefix),
('bc', 2, 2, (False, 'main', None, True), Encoding.BECH32M, (OP_2, 2)),
('tb', 0, 20, (False, 'test', None, True), Encoding.BECH32, p2wpkh_prefix),
('tb', 0, 32, (False, 'test', None, True), Encoding.BECH32, p2wsh_prefix),
('tb', 1, 32, (False, 'test', None, True), Encoding.BECH32M, p2tr_prefix),
('tb', 3, 16, (False, 'test', None, True), Encoding.BECH32M, (OP_3, 16)),
('tb', 0, 20, (False, 'signet', None, True), Encoding.BECH32, p2wpkh_prefix),
('tb', 0, 32, (False, 'signet', None, True), Encoding.BECH32, p2wsh_prefix),
('tb', 1, 32, (False, 'signet', None, True), Encoding.BECH32M, p2tr_prefix),
('tb', 3, 32, (False, 'signet', None, True), Encoding.BECH32M, (OP_3, 32)),
('bcrt', 0, 20, (False, 'regtest', None, True), Encoding.BECH32, p2wpkh_prefix),
('bcrt', 0, 32, (False, 'regtest', None, True), Encoding.BECH32, p2wsh_prefix),
('bcrt', 1, 32, (False, 'regtest', None, True), Encoding.BECH32M, p2tr_prefix),
('bcrt', 16, 40, (False, 'regtest', None, True), Encoding.BECH32M, (OP_16, 40))
]
# templates for invalid bech32 sequences
bech32_ng_templates = [
# hrp, version, witprog_size, encoding, invalid_bech32, invalid_checksum, invalid_char
('tc', 0, 20, Encoding.BECH32, False, False, False),
('bt', 1, 32, Encoding.BECH32M, False, False, False),
('tb', 17, 32, Encoding.BECH32M, False, False, False),
('bcrt', 3, 1, Encoding.BECH32M, False, False, False),
('bc', 15, 41, Encoding.BECH32M, False, False, False),
('tb', 0, 16, Encoding.BECH32, False, False, False),
('bcrt', 0, 32, Encoding.BECH32, True, False, False),
('bc', 0, 16, Encoding.BECH32, True, False, False),
('tb', 0, 32, Encoding.BECH32, False, True, False),
('bcrt', 0, 20, Encoding.BECH32, False, False, True),
('bc', 0, 20, Encoding.BECH32M, False, False, False),
('tb', 0, 32, Encoding.BECH32M, False, False, False),
('bcrt', 0, 20, Encoding.BECH32M, False, False, False),
('bc', 1, 32, Encoding.BECH32, False, False, False),
('tb', 2, 16, Encoding.BECH32, False, False, False),
('bcrt', 16, 20, Encoding.BECH32, False, False, False),
]
def is_valid(v):
'''Check vector v for validity'''
if len(set(v) - set(b58chars)) > 0:
return is_valid_bech32(v)
result = b58decode_chk(v)
if result is None:
return is_valid_bech32(v)
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return is_valid_bech32(v)
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['bc', 'tb', 'bcrt']:
if decode_segwit_address(hrp, v) != (None, None):
return True
return False
def gen_valid_base58_vector(template):
'''Generate valid base58 vector'''
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
dst_prefix = bytearray(template[4])
dst_suffix = bytearray(template[5])
rv = b58encode_chk(prefix + payload + suffix)
return rv, dst_prefix + payload + dst_suffix
def gen_valid_bech32_vector(template):
'''Generate valid bech32 vector'''
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
encoding = template[4]
dst_prefix = bytearray(template[5])
rv = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5))
return rv, dst_prefix + witprog
def gen_valid_vectors():
'''Generate valid test vectors'''
glist = [gen_valid_base58_vector, gen_valid_bech32_vector]
tlist = [templates, bech32_templates]
while True:
for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = payload.hex()
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
'''Generate possibly invalid vector'''
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
corrupt_prefix = randbool(0.2)
randomize_payload_size = randbool(0.2)
corrupt_suffix = randbool(0.2)
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
val = b58encode_chk(prefix + payload + suffix)
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
return val
def gen_invalid_bech32_vector(template):
'''Generate possibly invalid bech32 vector'''
no_data = randbool(0.1)
to_upper = randbool(0.1)
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
encoding = template[3]
if no_data:
rv = bech32_encode(encoding, hrp, [])
else:
data = [witver] + convertbits(witprog, 8, 5)
if template[4] and not no_data:
if template[2] % 5 in {2, 4}:
data[-1] |= 1
else:
data.append(0)
rv = bech32_encode(encoding, hrp, data)
if template[5]:
i = len(rv) - random.randrange(1, 7)
rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:]
if template[6]:
i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4)
rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:]
if to_upper:
rv = rv.swapcase()
return rv
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
glist = [gen_invalid_base58_vector, gen_invalid_bech32_vector]
tlist = [templates, bech32_ng_templates]
while True:
for template, invalid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
val = invalid_vector_generator(template)
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 40.546468 | 133 | 0.617677 |
9ac885b24f11ca984ce7f56bc3020da12e4a1216 | 1,001 | py | Python | th2_common/schema/strategy/field_extraction/impl/th2_batch_msg_field_extraction.py | ConnectDIY/th2-common-py | 977758a68d6a7db91ee38c36667e90bf663f14ef | [
"Apache-2.0"
] | null | null | null | th2_common/schema/strategy/field_extraction/impl/th2_batch_msg_field_extraction.py | ConnectDIY/th2-common-py | 977758a68d6a7db91ee38c36667e90bf663f14ef | [
"Apache-2.0"
] | 18 | 2020-11-23T12:11:31.000Z | 2022-03-29T06:13:19.000Z | th2_common/schema/strategy/field_extraction/impl/th2_batch_msg_field_extraction.py | ConnectDIY/th2-common-py | 977758a68d6a7db91ee38c36667e90bf663f14ef | [
"Apache-2.0"
] | 1 | 2021-01-20T11:21:57.000Z | 2021-01-20T11:21:57.000Z | # Copyright 2020-2020 Exactpro (Exactpro Systems Limited)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import th2_grpc_common.common_pb2
from google.protobuf.message import Message
from th2_common.schema.strategy.field_extraction.abstract_th2_msg_field_extraction import AbstractTh2MsgFieldExtraction
class Th2BatchMsgFieldExtraction(AbstractTh2MsgFieldExtraction):
def parse_message(self, message: Message) -> th2_grpc_common.common_pb2.Message:
return message
| 38.5 | 119 | 0.782218 |
c5bd2066c894cddcce53af2c38fc0c744f3ab64c | 1,214 | py | Python | elasticsearch/client/monitoring.py | shdkpr2008/elasticsearch-py | 625c834d5b59f943c48c16565fdbc6e77ed8f3c1 | [
"Apache-2.0"
] | 2 | 2020-11-26T06:07:28.000Z | 2021-09-17T01:33:56.000Z | elasticsearch/client/monitoring.py | shdkpr2008/elasticsearch-py | 625c834d5b59f943c48c16565fdbc6e77ed8f3c1 | [
"Apache-2.0"
] | 3 | 2020-11-19T11:43:34.000Z | 2021-04-26T06:37:31.000Z | elasticsearch/client/monitoring.py | shdkpr2008/elasticsearch-py | 625c834d5b59f943c48c16565fdbc6e77ed8f3c1 | [
"Apache-2.0"
] | 2 | 2021-09-17T01:34:09.000Z | 2021-09-18T09:09:02.000Z | from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
class MonitoringClient(NamespacedClient):
@query_params("interval", "system_api_version", "system_id")
def bulk(self, body, doc_type=None, params=None, headers=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html>`_
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg doc_type: Default document type for items which don't
provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of
the payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request(
"POST",
_make_path("_monitoring", doc_type, "bulk"),
params=params,
headers=headers,
body=body,
)
| 40.466667 | 110 | 0.64827 |
07ca410c7fdd644fa22256b6d2d4232813b5cb73 | 1,255 | py | Python | hoodie/migrations/0004_auto_20181024_0641.py | danalvin/Django-IP4 | 708cfc17ba0165adb9f09b84360c6ac05cefdf76 | [
"MIT"
] | null | null | null | hoodie/migrations/0004_auto_20181024_0641.py | danalvin/Django-IP4 | 708cfc17ba0165adb9f09b84360c6ac05cefdf76 | [
"MIT"
] | null | null | null | hoodie/migrations/0004_auto_20181024_0641.py | danalvin/Django-IP4 | 708cfc17ba0165adb9f09b84360c6ac05cefdf76 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-24 06:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hoodie', '0003_auto_20181023_0443'),
]
operations = [
migrations.AlterField(
model_name='business',
name='email',
field=models.CharField(default='example@mail.com', max_length=100, null=True),
),
migrations.AlterField(
model_name='business',
name='name',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AlterField(
model_name='business',
name='neighbor_hood',
field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hoodbus', to='hoodie.nieghbor'),
),
migrations.AlterField(
model_name='business',
name='user',
field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='business', to=settings.AUTH_USER_MODEL),
),
]
| 33.026316 | 158 | 0.626295 |
1665333410518757acffe76a8da76e8fb83dc236 | 7,942 | py | Python | wireguard/wireguard.py | juweeks/wireguard-vpn-cdk | 6f5094b1a7f8b7a88ec16897905e2b11d46de727 | [
"MIT"
] | 1 | 2022-02-27T00:41:32.000Z | 2022-02-27T00:41:32.000Z | wireguard/wireguard.py | juweeks/wireguard-vpn-cdk | 6f5094b1a7f8b7a88ec16897905e2b11d46de727 | [
"MIT"
] | 1 | 2022-02-26T23:23:28.000Z | 2022-02-26T23:25:49.000Z | wireguard/wireguard.py | juweeks/wireguard-vpn-cdk | 6f5094b1a7f8b7a88ec16897905e2b11d46de727 | [
"MIT"
] | 1 | 2022-02-27T00:35:23.000Z | 2022-02-27T00:35:23.000Z | import aws_cdk as cdk
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_iam as iam
from constructs import Construct
class Network(Construct):
"""
- non-instance specific network resources- VPC, Subnets, etc.
- remove default NAT (saves $$$) and will use IGW
"""
def __init__(self, scope: Construct, id_: str):
super().__init__(scope, id_)
self._vpc = ec2.Vpc(
self,
id="vpc",
max_azs=1,
vpc_name="wireguard_vpn",
nat_gateways=0,
subnet_configuration=[
ec2.SubnetConfiguration(
name="public",
cidr_mask=24,
reserved=False,
subnet_type=ec2.SubnetType.PUBLIC,
),
],
)
@property
def vpc(self) -> ec2.Vpc:
return self._vpc
class NetworkInstance(Construct):
"""
instance specific network resources- Elastic IP and Security Group
"""
def __init__(self, scope: Construct, id_: str, network: Network):
super().__init__(scope, id_)
self._eip = None
self._eip_export = None
self._security_group = None
self.network = network
self.create_eip()
self.create_eip_export()
self.create_security_group()
@property
def vpc(self) -> ec2.Vpc:
return self.network.vpc
def create_eip(self) -> None:
self._eip = ec2.CfnEIP(self, id="eip", domain="vpc")
@property
def eip(self) -> ec2.CfnEIP:
return self._eip
def create_eip_export(self) -> None:
self._eip_export = cdk.CfnOutput(
self,
id="eipoutput",
description="wireguard vpn ip address",
export_name="WireguardEIPAddress",
value=self.eip.ref,
)
def create_security_group(self) -> None:
"""
- only necessary open port to run is 51820
- use EC2's Session Manager for SSH access
"""
self._security_group = ec2.SecurityGroup(
self,
id="sg",
vpc=self.network.vpc,
description="wireguard vpn",
security_group_name="wireguard-vpn",
)
self._security_group.add_ingress_rule(
peer=ec2.Peer.any_ipv4(), connection=ec2.Port.udp(51820), description="for wireguard vpn operations"
)
@property
def security_group(self) -> ec2.SecurityGroup:
return self._security_group
class Instance(Construct):
"""
creates the EC2 instance, user data and cfn-init commands
"""
def __init__(self, scope: Construct, id_: str, network_instance: NetworkInstance):
super().__init__(scope, id_)
self._vpn = None
self.network_instance = network_instance
self.create_instance()
@property
def ssm_param_name_for_keys(self) -> str:
return "VPNClientConfigs"
@property
def user_data(self) -> ec2.UserData:
"""
- set global instance configs
"""
_user_data = ec2.UserData.for_linux()
with open("wireguard/config/user_data.sh") as f:
data = f.read()
_user_data.add_commands(data)
return _user_data
@property
def cfn_init(self) -> ec2.CloudFormationInit:
yum_preinstall = ec2.InitConfig(
[
ec2.InitPackage.rpm("https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"),
ec2.InitPackage.yum("wireguard-dkms"),
ec2.InitPackage.yum("wireguard-tools"),
ec2.InitPackage.yum("kernel-devel"),
]
)
files = ec2.InitConfig(
[
ec2.InitFile.from_file_inline(
target_file_name="/etc/wireguard/wg0.conf",
source_file_name="wireguard/config/wg0.conf",
mode="000600",
owner="root",
group="root",
),
ec2.InitFile.from_file_inline(
target_file_name="/tmp/wg0-client.conf",
source_file_name="wireguard/config/wg0-client.conf",
mode="000600",
owner="root",
group="root",
),
ec2.InitFile.from_file_inline(
target_file_name="/usr/lib/systemd/system/wg-quick@.service",
source_file_name="wireguard/config/wg-quick@.service",
mode="000644",
owner="root",
group="root",
),
ec2.InitFile.from_file_inline(
target_file_name="/etc/wireguard/vpn_init.sh",
source_file_name="wireguard/config/vpn_init.sh",
mode="000600",
owner="root",
group="root",
),
]
)
run_commands = ec2.InitConfig(
[
ec2.InitCommand.shell_command(
shell_command="sh /etc/wireguard/vpn_init.sh && wg",
env=dict(
ELASTIC_IP=self.network_instance.eip.ref,
SSM_PARAM_NAME_FOR_KEYS=self.ssm_param_name_for_keys,
AWS_DEFAULT_REGION=cdk.Aws.REGION,
),
)
]
)
init_config = ec2.CloudFormationInit.from_config_sets(
config_sets={
# Applies the configs below in this order
"default": ["yum_preinstall", "files", "run_commands"]
},
configs={"yum_preinstall": yum_preinstall, "files": files, "run_commands": run_commands},
)
return init_config
def create_instance(self) -> None:
"""
- Nano EC2 on Amazon AMI
- permissions to push to SSM Parameter, enable Session Manager
"""
self._vpn = ec2.Instance(
self,
id="vpn",
instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3_AMD, ec2.InstanceSize.NANO),
machine_image=ec2.MachineImage.from_ssm_parameter(
parameter_name="/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2"
),
vpc=self.network_instance.vpc,
instance_name="wireguard_vpn",
security_group=self.network_instance.security_group,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
user_data=self.user_data,
init=self.cfn_init,
)
self._vpn.role.add_to_principal_policy(iam.PolicyStatement(actions=["ssm:PutParameter"], resources=["*"]))
self._vpn.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore")
)
ec2.CfnEIPAssociation(
self,
id="eipassoc",
eip=self.network_instance.eip.ref,
instance_id=self._vpn.instance_id,
)
class WireguardVPN(cdk.Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs):
super().__init__(scope, construct_id, **kwargs)
n = Network(self, "network")
n_i = NetworkInstance(self, "networkinstance", network=n)
i = Instance(self, "instance", network_instance=n_i)
i.node.add_dependency(n_i) # make sure the elatic ip is created before cfn_init runs
cdk.CfnOutput(
self,
id="vpnclientkeysoutput",
value=f"https://console.aws.amazon.com/systems-manager/parameters/"
f"{i.ssm_param_name_for_keys}/description?region={self.region}",
description="link to SSM parameter to obtain your VPN keys to add to the Wireguard client",
export_name="SSMKeysWireguardVPNClient",
)
| 33.091667 | 114 | 0.561823 |
b95c436717830c0030e8bca48c05e6b5381f4cda | 2,364 | py | Python | reader/things3d.py | klens-codes/MaskFlownet | 1d7e0c33dcd47888f67e86829f835e742980b2ea | [
"MIT"
] | null | null | null | reader/things3d.py | klens-codes/MaskFlownet | 1d7e0c33dcd47888f67e86829f835e742980b2ea | [
"MIT"
] | null | null | null | reader/things3d.py | klens-codes/MaskFlownet | 1d7e0c33dcd47888f67e86829f835e742980b2ea | [
"MIT"
] | null | null | null | import os
import numpy as np
from functools import lru_cache
import struct
# ======== PLEASE MODIFY ========
things3d_root = r'/data2/opticalflow/datasets/FlyingThings/'
def list_data(path = None, sub_type = 'clean'):
if path is None:
path = things3d_root
parts = ('train', ) # 'val')
sub_types = (sub_type,)
#if sub_type == 'mixed':
# sub_types = ('clean', 'final')
orients = ('into_future', 'into_past')
cameras = ('left', 'right')
dataset = dict()
dataset['image_0'] = []
dataset['image_1'] = []
dataset['flow'] = []
for part in parts:
for sub_type in sub_types:
for camera in cameras:
for orient in orients:
flow_ind = 1 if orient == 'into_future' else - 1
path_image = os.path.join(path, part, 'image_' + sub_type, camera)
path_flow = os.path.join(path, part, 'flow', camera, orient)
dirs_flow = os.listdir(path_flow)
for dir_flow in dirs_flow:
dataset['flow'].append(os.path.join(path_flow, dir_flow))
dataset['image_0'].append(os.path.join(path_image, dir_flow.replace('flo', 'png')))
ind = int(dir_flow[-11:-4])
dataset['image_1'].append(os.path.join(path_image, dir_flow.replace('flo', 'png').replace('%07d' % ind, '%07d' % (ind + flow_ind))))
return dataset
class Flo:
def __init__(self, w, h):
self.__floec1__ = float(202021.25)
self.__floec2__ = int(w)
self.__floec3__ = int(h)
self.__floheader__ = struct.pack('fii', self.__floec1__, self.__floec2__, self.__floec3__)
self.__floheaderlen__ = len(self.__floheader__)
self.__flow__ = w
self.__floh__ = h
self.__floshape__ = [self.__floh__, self.__flow__, 2]
if self.__floheader__[:4] != b'PIEH':
raise Exception('Expect machine to be LE.')
def load(self, file):
with open(file, 'rb') as fp:
if fp.read(self.__floheaderlen__) != self.__floheader__:
raise Exception('Bad flow header: ' + file)
result = np.ndarray(shape=self.__floshape__,
dtype=np.float32,
buffer=fp.read(),
order='C')
return result
def save(self, arr, fname):
with open(fname, 'wb') as fp:
fp.write(self.__floheader__)
fp.write(arr.astype(np.float32).tobytes())
@lru_cache(maxsize=None)
def load(fname):
flo = Flo(960, 540)
if fname.endswith('flo'):
return flo.load(fname)
if __name__ == '__main__':
dataset = list_data()
print(len(dataset['flow']))
print(dataset['flow'][-1])
| 30.701299 | 138 | 0.664129 |
31548a7d7ea6b7aeedda806380c3c956a6b09e99 | 2,421 | py | Python | tests/common.py | hriener/party-elli | e7636a8045a46cd988a3a05b4b888577afb97708 | [
"MIT"
] | 9 | 2016-04-04T07:13:02.000Z | 2021-11-08T12:15:33.000Z | tests/common.py | hriener/party-elli | e7636a8045a46cd988a3a05b4b888577afb97708 | [
"MIT"
] | 1 | 2020-04-12T18:44:26.000Z | 2020-04-12T18:44:26.000Z | tests/common.py | hriener/party-elli | e7636a8045a46cd988a3a05b4b888577afb97708 | [
"MIT"
] | 2 | 2019-02-25T13:54:55.000Z | 2020-04-07T14:00:36.000Z | import sys
from helpers.main_helper import get_root_dir
from helpers.python_ext import is_empty_str
from helpers.shell import execute_shell
BENCHMARKS_DIR = get_root_dir() + "benchmarks/"
def _get_cmd_result(result, out, err):
output = '-' * 20 + 'DETAILS' + '-' * 20 + '\n' + \
"output:" + out + '\n' + \
"error:" + err + '\n\n' + \
"result:" + str(result) + '\n\n' + \
'-' * 40
return output
def _print_failed(reason, cmd_args, result, out, err):
print()
print(_get_cmd_result(result, out, err))
error_message = 'FAILED: \n{cmd_args}\n' \
'REASON: {reason}'.format(cmd_args=cmd_args, reason=reason)
print(error_message)
print()
def _print_ok(cmd_args):
print('OK: ', cmd_args)
def _extract_model_size(out:str) -> int or None:
template1 = ' FOUND model for sys of size '
template2 = ' FOUND model for env of size '
for l in [l.strip() for l in out.splitlines() if l.strip()]:
if template1 in l or template2 in l:
return int(l.split()[-1])
return None
def run_benchmark(python_script_relative_path, benchmark, rc_expected, size_expected) -> bool:
cmd_args = BENCHMARKS_DIR + benchmark
exec_cmd = '{python3} {program} {args}'.format(python3=sys.executable,
program=get_root_dir() + python_script_relative_path,
args=cmd_args)
result, out, err = execute_shell(exec_cmd)
if not is_empty_str(err):
_print_failed('error while executing the command', exec_cmd, result, out, err)
return False
else:
size_actual = _extract_model_size(out)
if result == rc_expected and (not size_expected or size_expected == size_actual):
_print_ok(cmd_args)
return True
else:
_print_failed('invalid exit status or model size: \n'\
' rc_actual vs rc_expected: {rc_act} vs. {rc_exp}\n'\
' size_actual vs size_expected: {size_act} vs. {size_exp}'.
format(rc_act=result,
rc_exp=rc_expected,
size_act=size_actual,
size_exp=size_expected),
exec_cmd, result, out, err)
return False
| 36.134328 | 104 | 0.565882 |
a574d52d10f68079194f3787a05e718fd61d5f11 | 158 | py | Python | src/pgtonic/exceptions.py | olirice/pgtonic | a6218a4fcb1bc6eb2f2abb465b32ac8abcd0c7f4 | [
"MIT"
] | null | null | null | src/pgtonic/exceptions.py | olirice/pgtonic | a6218a4fcb1bc6eb2f2abb465b32ac8abcd0c7f4 | [
"MIT"
] | null | null | null | src/pgtonic/exceptions.py | olirice/pgtonic | a6218a4fcb1bc6eb2f2abb465b32ac8abcd0c7f4 | [
"MIT"
] | null | null | null | class PGTonicException(Exception):
pass
class LexFailureException(PGTonicException):
pass
class ParseFailureException(PGTonicException):
pass
| 14.363636 | 46 | 0.78481 |
fffddcd1aac2534ab8fa6082a2b13a807be5eee1 | 155 | py | Python | cowait/worker/__init__.py | ProgHaj/cowait | e95c30faab8caf8b0413de4e1784529a3a06475d | [
"Apache-2.0"
] | 51 | 2020-06-04T06:08:14.000Z | 2022-03-28T06:59:53.000Z | cowait/worker/__init__.py | ProgHaj/cowait | e95c30faab8caf8b0413de4e1784529a3a06475d | [
"Apache-2.0"
] | 121 | 2020-06-01T12:09:32.000Z | 2022-03-31T20:47:57.000Z | cowait/worker/__init__.py | ProgHaj/cowait | e95c30faab8caf8b0413de4e1784529a3a06475d | [
"Apache-2.0"
] | 6 | 2020-06-11T16:05:20.000Z | 2022-03-23T06:30:17.000Z | # flake8: noqa: F401
from .executor import execute
from .loader import load_task_class
from .env import env_get_cluster_provider, env_get_task_definition
| 25.833333 | 66 | 0.83871 |
8f94a99ec231e3bca8486b3fb2578924371d260d | 33 | py | Python | packages/amplify-e2e-tests/layerdata/python/testfunc.py | rosmu/amplify-cli | 8dc238083c74e4eac6c3e96c31490f071cd1cb28 | [
"Apache-2.0"
] | 6 | 2021-03-19T00:19:29.000Z | 2021-11-08T09:46:12.000Z | packages/amplify-e2e-tests/layerdata/python/testfunc.py | rosmu/amplify-cli | 8dc238083c74e4eac6c3e96c31490f071cd1cb28 | [
"Apache-2.0"
] | 1 | 2020-06-08T11:59:37.000Z | 2020-06-08T11:59:37.000Z | packages/amplify-e2e-tests/layerdata/python/testfunc.py | rosmu/amplify-cli | 8dc238083c74e4eac6c3e96c31490f071cd1cb28 | [
"Apache-2.0"
] | 3 | 2020-07-17T01:14:56.000Z | 2021-03-29T09:04:24.000Z | testString = "Hello from Lambda!" | 33 | 33 | 0.757576 |
f01064dee03840624481a0989b1e39f32b05de4e | 53,314 | py | Python | mefamo/custom/face_geometry.py | jobutsu/MeFaMo | 20c747225d8f4fcf05b5e0fe247363cc10e0a642 | [
"MIT"
] | 96 | 2022-03-21T21:57:00.000Z | 2022-03-31T07:05:37.000Z | mefamo/custom/face_geometry.py | jobutsu/MeFaMo | 20c747225d8f4fcf05b5e0fe247363cc10e0a642 | [
"MIT"
] | 1 | 2022-03-29T17:42:09.000Z | 2022-03-29T18:05:06.000Z | mefamo/custom/face_geometry.py | jobutsu/MeFaMo | 20c747225d8f4fcf05b5e0fe247363cc10e0a642 | [
"MIT"
] | 18 | 2022-03-22T08:40:10.000Z | 2022-03-31T10:22:00.000Z | # Many parts taken from the cpp implementation from github.com/google/mediapipe
#
# Copyright 2020 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken from: https://github.com/Rassibassi/mediapipeDemos
import numpy as np
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Debugger(metaclass=Singleton):
def set_debug(self, debug):
self.debug = debug
def toggle(self):
self.debug = not self.debug
def get_debug(self):
return self.debug
DEBUG = Debugger()
DEBUG.set_debug(False)
class PCF:
def __init__(
self,
near=1,
far=10000,
frame_height=1920,
frame_width=1080,
fy=1074.520446598223,
):
self.near = near
self.far = far
self.frame_height = frame_height
self.frame_width = frame_width
self.fy = fy
fov_y = 2 * np.arctan(frame_height / (2 * fy))
# kDegreesToRadians = np.pi / 180.0 # never used
height_at_near = 2 * near * np.tan(0.5 * fov_y)
width_at_near = frame_width * height_at_near / frame_height
self.fov_y = fov_y
self.left = -0.5 * width_at_near
self.right = 0.5 * width_at_near
self.bottom = -0.5 * height_at_near
self.top = 0.5 * height_at_near
# from https://github.com/google/mediapipe/blob/master/mediapipe/modules/face_geometry/data/canonical_face_model.obj
canonical_metric_landmarks = np.array(
[
0.000000,
-3.406404,
5.979507,
0.499977,
0.652534,
0.000000,
-1.126865,
7.475604,
0.500026,
0.547487,
0.000000,
-2.089024,
6.058267,
0.499974,
0.602372,
-0.463928,
0.955357,
6.633583,
0.482113,
0.471979,
0.000000,
-0.463170,
7.586580,
0.500151,
0.527156,
0.000000,
0.365669,
7.242870,
0.499910,
0.498253,
0.000000,
2.473255,
5.788627,
0.499523,
0.401062,
-4.253081,
2.577646,
3.279702,
0.289712,
0.380764,
0.000000,
4.019042,
5.284764,
0.499955,
0.312398,
0.000000,
4.885979,
5.385258,
0.499987,
0.269919,
0.000000,
8.261778,
4.481535,
0.500023,
0.107050,
0.000000,
-3.706811,
5.864924,
0.500023,
0.666234,
0.000000,
-3.918301,
5.569430,
0.500016,
0.679224,
0.000000,
-3.994436,
5.219482,
0.500023,
0.692348,
0.000000,
-4.542400,
5.404754,
0.499977,
0.695278,
0.000000,
-4.745577,
5.529457,
0.499977,
0.705934,
0.000000,
-5.019567,
5.601448,
0.499977,
0.719385,
0.000000,
-5.365123,
5.535441,
0.499977,
0.737019,
0.000000,
-6.149624,
5.071372,
0.499968,
0.781371,
0.000000,
-1.501095,
7.112196,
0.499816,
0.562981,
-0.416106,
-1.466449,
6.447657,
0.473773,
0.573910,
-7.087960,
5.434801,
0.099620,
0.104907,
0.254141,
-2.628639,
2.035898,
3.848121,
0.365930,
0.409576,
-3.198363,
1.985815,
3.796952,
0.338758,
0.413025,
-3.775151,
2.039402,
3.646194,
0.311120,
0.409460,
-4.465819,
2.422950,
3.155168,
0.274658,
0.389131,
-2.164289,
2.189867,
3.851822,
0.393362,
0.403706,
-3.208229,
3.223926,
4.115822,
0.345234,
0.344011,
-2.673803,
3.205337,
4.092203,
0.370094,
0.346076,
-3.745193,
3.165286,
3.972409,
0.319322,
0.347265,
-4.161018,
3.059069,
3.719554,
0.297903,
0.353591,
-5.062006,
1.934418,
2.776093,
0.247792,
0.410810,
-2.266659,
-7.425768,
4.389812,
0.396889,
0.842755,
-4.445859,
2.663991,
3.173422,
0.280098,
0.375600,
-7.214530,
2.263009,
0.073150,
0.106310,
0.399956,
-5.799793,
2.349546,
2.204059,
0.209925,
0.391353,
-2.844939,
-0.720868,
4.433130,
0.355808,
0.534406,
-0.711452,
-3.329355,
5.877044,
0.471751,
0.650404,
-0.606033,
-3.924562,
5.444923,
0.474155,
0.680192,
-1.431615,
-3.500953,
5.496189,
0.439785,
0.657229,
-1.914910,
-3.803146,
5.028930,
0.414617,
0.666541,
-1.131043,
-3.973937,
5.189648,
0.450374,
0.680861,
-1.563548,
-4.082763,
4.842263,
0.428771,
0.682691,
-2.650112,
-5.003649,
4.188483,
0.374971,
0.727805,
-0.427049,
-1.094134,
7.360529,
0.486717,
0.547629,
-0.496396,
-0.475659,
7.440358,
0.485301,
0.527395,
-5.253307,
3.881582,
3.363159,
0.257765,
0.314490,
-1.718698,
0.974609,
4.558359,
0.401223,
0.455172,
-1.608635,
-0.942516,
5.814193,
0.429819,
0.548615,
-1.651267,
-0.610868,
5.581319,
0.421352,
0.533741,
-4.765501,
-0.701554,
3.534632,
0.276896,
0.532057,
-0.478306,
0.295766,
7.101013,
0.483370,
0.499587,
-3.734964,
4.508230,
4.550454,
0.337212,
0.282883,
-4.588603,
4.302037,
4.048484,
0.296392,
0.293243,
-6.279331,
6.615427,
1.425850,
0.169295,
0.193814,
-1.220941,
4.142165,
5.106035,
0.447580,
0.302610,
-2.193489,
3.100317,
4.000575,
0.392390,
0.353888,
-3.102642,
-4.352984,
4.095905,
0.354490,
0.696784,
-6.719682,
-4.788645,
-1.745401,
0.067305,
0.730105,
-1.193824,
-1.306795,
5.737747,
0.442739,
0.572826,
-0.729766,
-1.593712,
5.833208,
0.457098,
0.584792,
-2.456206,
-4.342621,
4.283884,
0.381974,
0.694711,
-2.204823,
-4.304508,
4.162499,
0.392389,
0.694203,
-4.985894,
4.802461,
3.751977,
0.277076,
0.271932,
-1.592294,
-1.257709,
5.456949,
0.422552,
0.563233,
-2.644548,
4.524654,
4.921559,
0.385919,
0.281364,
-2.760292,
5.100971,
5.015990,
0.383103,
0.255840,
-3.523964,
8.005976,
3.729163,
0.331431,
0.119714,
-5.599763,
5.715470,
2.724259,
0.229924,
0.232003,
-3.063932,
6.566144,
4.529981,
0.364501,
0.189114,
-5.720968,
4.254584,
2.830852,
0.229622,
0.299541,
-6.374393,
4.785590,
1.591691,
0.173287,
0.278748,
-0.672728,
-3.688016,
5.737804,
0.472879,
0.666198,
-1.262560,
-3.787691,
5.417779,
0.446828,
0.668527,
-1.732553,
-3.952767,
5.000579,
0.422762,
0.673890,
-1.043625,
-1.464973,
5.662455,
0.445308,
0.580066,
-2.321234,
-4.329069,
4.258156,
0.388103,
0.693961,
-2.056846,
-4.477671,
4.520883,
0.403039,
0.706540,
-2.153084,
-4.276322,
4.038093,
0.403629,
0.693953,
-0.946874,
-1.035249,
6.512274,
0.460042,
0.557139,
-1.469132,
-4.036351,
4.604908,
0.431158,
0.692366,
-1.024340,
-3.989851,
4.926693,
0.452182,
0.692366,
-0.533422,
-3.993222,
5.138202,
0.475387,
0.692366,
-0.769720,
-6.095394,
4.985883,
0.465828,
0.779190,
-0.699606,
-5.291850,
5.448304,
0.472329,
0.736226,
-0.669687,
-4.949770,
5.509612,
0.473087,
0.717857,
-0.630947,
-4.695101,
5.449371,
0.473122,
0.704626,
-0.583218,
-4.517982,
5.339869,
0.473033,
0.695278,
-1.537170,
-4.423206,
4.745470,
0.427942,
0.695278,
-1.615600,
-4.475942,
4.813632,
0.426479,
0.703540,
-1.729053,
-4.618680,
4.854463,
0.423162,
0.711846,
-1.838624,
-4.828746,
4.823737,
0.418309,
0.720063,
-2.368250,
-3.106237,
4.868096,
0.390095,
0.639573,
-7.542244,
-1.049282,
-2.431321,
0.013954,
0.560034,
0.000000,
-1.724003,
6.601390,
0.499914,
0.580147,
-1.826614,
-4.399531,
4.399021,
0.413200,
0.695400,
-1.929558,
-4.411831,
4.497052,
0.409626,
0.701823,
-0.597442,
-2.013686,
5.866456,
0.468080,
0.601535,
-1.405627,
-1.714196,
5.241087,
0.422729,
0.585985,
-0.662449,
-1.819321,
5.863759,
0.463080,
0.593784,
-2.342340,
0.572222,
4.294303,
0.372120,
0.473414,
-3.327324,
0.104863,
4.113860,
0.334562,
0.496073,
-1.726175,
-0.919165,
5.273355,
0.411671,
0.546965,
-5.133204,
7.485602,
2.660442,
0.242176,
0.147676,
-4.538641,
6.319907,
3.683424,
0.290777,
0.201446,
-3.986562,
5.109487,
4.466315,
0.327338,
0.256527,
-2.169681,
-5.440433,
4.455874,
0.399510,
0.748921,
-1.395634,
5.011963,
5.316032,
0.441728,
0.261676,
-1.619500,
6.599217,
4.921106,
0.429765,
0.187834,
-1.891399,
8.236377,
4.274997,
0.412198,
0.108901,
-4.195832,
2.235205,
3.375099,
0.288955,
0.398952,
-5.733342,
1.411738,
2.431726,
0.218937,
0.435411,
-1.859887,
2.355757,
3.843181,
0.412782,
0.398970,
-4.988612,
3.074654,
3.083858,
0.257135,
0.355440,
-1.303263,
1.416453,
4.831091,
0.427685,
0.437961,
-1.305757,
-0.672779,
6.415959,
0.448340,
0.536936,
-6.465170,
0.937119,
1.689873,
0.178560,
0.457554,
-5.258659,
0.945811,
2.974312,
0.247308,
0.457194,
-4.432338,
0.722096,
3.522615,
0.286267,
0.467675,
-3.300681,
0.861641,
3.872784,
0.332828,
0.460712,
-2.430178,
1.131492,
4.039035,
0.368756,
0.447207,
-1.820731,
1.467954,
4.224124,
0.398964,
0.432655,
-0.563221,
2.307693,
5.566789,
0.476410,
0.405806,
-6.338145,
-0.529279,
1.881175,
0.189241,
0.523924,
-5.587698,
3.208071,
2.687839,
0.228962,
0.348951,
-0.242624,
-1.462857,
7.071491,
0.490726,
0.562401,
-1.611251,
0.339326,
4.895421,
0.404670,
0.485133,
-7.743095,
2.364999,
-2.005167,
0.019469,
0.401564,
-1.391142,
1.851048,
4.448999,
0.426243,
0.420431,
-1.785794,
-0.978284,
4.850470,
0.396993,
0.548797,
-4.670959,
2.664461,
3.084075,
0.266470,
0.376977,
-1.333970,
-0.283761,
6.097047,
0.439121,
0.518958,
-7.270895,
-2.890917,
-2.252455,
0.032314,
0.644357,
-1.856432,
2.585245,
3.757904,
0.419054,
0.387155,
-0.923388,
0.073076,
6.671944,
0.462783,
0.505747,
-5.000589,
-6.135128,
1.892523,
0.238979,
0.779745,
-5.085276,
-7.178590,
0.714711,
0.198221,
0.831938,
-7.159291,
-0.811820,
-0.072044,
0.107550,
0.540755,
-5.843051,
-5.248023,
0.924091,
0.183610,
0.740257,
-6.847258,
3.662916,
0.724695,
0.134410,
0.333683,
-2.412942,
-8.258853,
4.119213,
0.385764,
0.883154,
-0.179909,
-1.689864,
6.573301,
0.490967,
0.579378,
-2.103655,
-0.163946,
4.566119,
0.382385,
0.508573,
-6.407571,
2.236021,
1.560843,
0.174399,
0.397671,
-3.670075,
2.360153,
3.635230,
0.318785,
0.396235,
-3.177186,
2.294265,
3.775704,
0.343364,
0.400597,
-2.196121,
-4.598322,
4.479786,
0.396100,
0.710217,
-6.234883,
-1.944430,
1.663542,
0.187885,
0.588538,
-1.292924,
-9.295920,
4.094063,
0.430987,
0.944065,
-3.210651,
-8.533278,
2.802001,
0.318993,
0.898285,
-4.068926,
-7.993109,
1.925119,
0.266248,
0.869701,
0.000000,
6.545390,
5.027311,
0.500023,
0.190576,
0.000000,
-9.403378,
4.264492,
0.499977,
0.954453,
-2.724032,
2.315802,
3.777151,
0.366170,
0.398822,
-2.288460,
2.398891,
3.697603,
0.393207,
0.395537,
-1.998311,
2.496547,
3.689148,
0.410373,
0.391080,
-6.130040,
3.399261,
2.038516,
0.194993,
0.342102,
-2.288460,
2.886504,
3.775031,
0.388665,
0.362284,
-2.724032,
2.961810,
3.871767,
0.365962,
0.355971,
-3.177186,
2.964136,
3.876973,
0.343364,
0.355357,
-3.670075,
2.927714,
3.724325,
0.318785,
0.358340,
-4.018389,
2.857357,
3.482983,
0.301415,
0.363156,
-7.555811,
4.106811,
-0.991917,
0.058133,
0.319076,
-4.018389,
2.483695,
3.440898,
0.301415,
0.387449,
0.000000,
-2.521945,
5.932265,
0.499988,
0.618434,
-1.776217,
-2.683946,
5.213116,
0.415838,
0.624196,
-1.222237,
-1.182444,
5.952465,
0.445682,
0.566077,
-0.731493,
-2.536683,
5.815343,
0.465844,
0.620641,
0.000000,
3.271027,
5.236015,
0.499923,
0.351524,
-4.135272,
-6.996638,
2.671970,
0.288719,
0.819946,
-3.311811,
-7.660815,
3.382963,
0.335279,
0.852820,
-1.313701,
-8.639995,
4.702456,
0.440512,
0.902419,
-5.940524,
-6.223629,
-0.631468,
0.128294,
0.791941,
-1.998311,
2.743838,
3.744030,
0.408772,
0.373894,
-0.901447,
1.236992,
5.754256,
0.455607,
0.451801,
0.000000,
-8.765243,
4.891441,
0.499877,
0.908990,
-2.308977,
-8.974196,
3.609070,
0.375437,
0.924192,
-6.954154,
-2.439843,
-0.131163,
0.114210,
0.615022,
-1.098819,
-4.458788,
5.120727,
0.448662,
0.695278,
-1.181124,
-4.579996,
5.189564,
0.448020,
0.704632,
-1.255818,
-4.787901,
5.237051,
0.447112,
0.715808,
-1.325085,
-5.106507,
5.205010,
0.444832,
0.730794,
-1.546388,
-5.819392,
4.757893,
0.430012,
0.766809,
-1.953754,
-4.183892,
4.431713,
0.406787,
0.685673,
-2.117802,
-4.137093,
4.555096,
0.400738,
0.681069,
-2.285339,
-4.051196,
4.582438,
0.392400,
0.677703,
-2.850160,
-3.665720,
4.484994,
0.367856,
0.663919,
-5.278538,
-2.238942,
2.861224,
0.247923,
0.601333,
-0.946709,
1.907628,
5.196779,
0.452770,
0.420850,
-1.314173,
3.104912,
4.231404,
0.436392,
0.359887,
-1.780000,
2.860000,
3.881555,
0.416164,
0.368714,
-1.845110,
-4.098880,
4.247264,
0.413386,
0.692366,
-5.436187,
-4.030482,
2.109852,
0.228018,
0.683572,
-0.766444,
3.182131,
4.861453,
0.468268,
0.352671,
-1.938616,
-6.614410,
4.521085,
0.411362,
0.804327,
0.000000,
1.059413,
6.774605,
0.499989,
0.469825,
-0.516573,
1.583572,
6.148363,
0.479154,
0.442654,
0.000000,
1.728369,
6.316750,
0.499974,
0.439637,
-1.246815,
0.230297,
5.681036,
0.432112,
0.493589,
0.000000,
-7.942194,
5.181173,
0.499886,
0.866917,
0.000000,
-6.991499,
5.153478,
0.499913,
0.821729,
-0.997827,
-6.930921,
4.979576,
0.456549,
0.819201,
-3.288807,
-5.382514,
3.795752,
0.344549,
0.745439,
-2.311631,
-1.566237,
4.590085,
0.378909,
0.574010,
-2.680250,
-6.111567,
4.096152,
0.374293,
0.780185,
-3.832928,
-1.537326,
4.137731,
0.319688,
0.570738,
-2.961860,
-2.274215,
4.440943,
0.357155,
0.604270,
-4.386901,
-2.683286,
3.643886,
0.295284,
0.621581,
-1.217295,
-7.834465,
4.969286,
0.447750,
0.862477,
-1.542374,
-0.136843,
5.201008,
0.410986,
0.508723,
-3.878377,
-6.041764,
3.311079,
0.313951,
0.775308,
-3.084037,
-6.809842,
3.814195,
0.354128,
0.812553,
-3.747321,
-4.503545,
3.726453,
0.324548,
0.703993,
-6.094129,
-3.205991,
1.473482,
0.189096,
0.646300,
-4.588995,
-4.728726,
2.983221,
0.279777,
0.714658,
-6.583231,
-3.941269,
0.070268,
0.133823,
0.682701,
-3.492580,
-3.195820,
4.130198,
0.336768,
0.644733,
-1.255543,
0.802341,
5.307551,
0.429884,
0.466522,
-1.126122,
-0.933602,
6.538785,
0.455528,
0.548623,
-1.443109,
-1.142774,
5.905127,
0.437114,
0.558896,
-0.923043,
-0.529042,
7.003423,
0.467288,
0.529925,
-1.755386,
3.529117,
4.327696,
0.414712,
0.335220,
-2.632589,
3.713828,
4.364629,
0.377046,
0.322778,
-3.388062,
3.721976,
4.309028,
0.344108,
0.320151,
-4.075766,
3.675413,
4.076063,
0.312876,
0.322332,
-4.622910,
3.474691,
3.646321,
0.283526,
0.333190,
-5.171755,
2.535753,
2.670867,
0.241246,
0.382786,
-7.297331,
0.763172,
-0.048769,
0.102986,
0.468763,
-4.706828,
1.651000,
3.109532,
0.267612,
0.424560,
-4.071712,
1.476821,
3.476944,
0.297879,
0.433176,
-3.269817,
1.470659,
3.731945,
0.333434,
0.433878,
-2.527572,
1.617311,
3.865444,
0.366427,
0.426116,
-1.970894,
1.858505,
3.961782,
0.396012,
0.416696,
-1.579543,
2.097941,
4.084996,
0.420121,
0.410228,
-7.664182,
0.673132,
-2.435867,
0.007561,
0.480777,
-1.397041,
-1.340139,
5.630378,
0.432949,
0.569518,
-0.884838,
0.658740,
6.233232,
0.458639,
0.479089,
-0.767097,
-0.968035,
7.077932,
0.473466,
0.545744,
-0.460213,
-1.334106,
6.787447,
0.476088,
0.563830,
-0.748618,
-1.067994,
6.798303,
0.468472,
0.555057,
-1.236408,
-1.585568,
5.480490,
0.433991,
0.582362,
-0.387306,
-1.409990,
6.957705,
0.483518,
0.562984,
-0.319925,
-1.607931,
6.508676,
0.482483,
0.577849,
-1.639633,
2.556298,
3.863736,
0.426450,
0.389799,
-1.255645,
2.467144,
4.203800,
0.438999,
0.396495,
-1.031362,
2.382663,
4.615849,
0.450067,
0.400434,
-4.253081,
2.772296,
3.315305,
0.289712,
0.368253,
-4.530000,
2.910000,
3.339685,
0.276670,
0.363373,
0.463928,
0.955357,
6.633583,
0.517862,
0.471948,
4.253081,
2.577646,
3.279702,
0.710288,
0.380764,
0.416106,
-1.466449,
6.447657,
0.526227,
0.573910,
7.087960,
5.434801,
0.099620,
0.895093,
0.254141,
2.628639,
2.035898,
3.848121,
0.634070,
0.409576,
3.198363,
1.985815,
3.796952,
0.661242,
0.413025,
3.775151,
2.039402,
3.646194,
0.688880,
0.409460,
4.465819,
2.422950,
3.155168,
0.725342,
0.389131,
2.164289,
2.189867,
3.851822,
0.606630,
0.403705,
3.208229,
3.223926,
4.115822,
0.654766,
0.344011,
2.673803,
3.205337,
4.092203,
0.629906,
0.346076,
3.745193,
3.165286,
3.972409,
0.680678,
0.347265,
4.161018,
3.059069,
3.719554,
0.702097,
0.353591,
5.062006,
1.934418,
2.776093,
0.752212,
0.410805,
2.266659,
-7.425768,
4.389812,
0.602918,
0.842863,
4.445859,
2.663991,
3.173422,
0.719902,
0.375600,
7.214530,
2.263009,
0.073150,
0.893693,
0.399960,
5.799793,
2.349546,
2.204059,
0.790082,
0.391354,
2.844939,
-0.720868,
4.433130,
0.643998,
0.534488,
0.711452,
-3.329355,
5.877044,
0.528249,
0.650404,
0.606033,
-3.924562,
5.444923,
0.525850,
0.680191,
1.431615,
-3.500953,
5.496189,
0.560215,
0.657229,
1.914910,
-3.803146,
5.028930,
0.585384,
0.666541,
1.131043,
-3.973937,
5.189648,
0.549626,
0.680861,
1.563548,
-4.082763,
4.842263,
0.571228,
0.682692,
2.650112,
-5.003649,
4.188483,
0.624852,
0.728099,
0.427049,
-1.094134,
7.360529,
0.513050,
0.547282,
0.496396,
-0.475659,
7.440358,
0.515097,
0.527252,
5.253307,
3.881582,
3.363159,
0.742247,
0.314507,
1.718698,
0.974609,
4.558359,
0.598631,
0.454979,
1.608635,
-0.942516,
5.814193,
0.570338,
0.548575,
1.651267,
-0.610868,
5.581319,
0.578632,
0.533623,
4.765501,
-0.701554,
3.534632,
0.723087,
0.532054,
0.478306,
0.295766,
7.101013,
0.516446,
0.499639,
3.734964,
4.508230,
4.550454,
0.662801,
0.282918,
4.588603,
4.302037,
4.048484,
0.703624,
0.293271,
6.279331,
6.615427,
1.425850,
0.830705,
0.193814,
1.220941,
4.142165,
5.106035,
0.552386,
0.302568,
2.193489,
3.100317,
4.000575,
0.607610,
0.353888,
3.102642,
-4.352984,
4.095905,
0.645429,
0.696707,
6.719682,
-4.788645,
-1.745401,
0.932695,
0.730105,
1.193824,
-1.306795,
5.737747,
0.557261,
0.572826,
0.729766,
-1.593712,
5.833208,
0.542902,
0.584792,
2.456206,
-4.342621,
4.283884,
0.618026,
0.694711,
2.204823,
-4.304508,
4.162499,
0.607591,
0.694203,
4.985894,
4.802461,
3.751977,
0.722943,
0.271963,
1.592294,
-1.257709,
5.456949,
0.577414,
0.563167,
2.644548,
4.524654,
4.921559,
0.614083,
0.281387,
2.760292,
5.100971,
5.015990,
0.616907,
0.255886,
3.523964,
8.005976,
3.729163,
0.668509,
0.119914,
5.599763,
5.715470,
2.724259,
0.770092,
0.232021,
3.063932,
6.566144,
4.529981,
0.635536,
0.189249,
5.720968,
4.254584,
2.830852,
0.770391,
0.299556,
6.374393,
4.785590,
1.591691,
0.826722,
0.278755,
0.672728,
-3.688016,
5.737804,
0.527121,
0.666198,
1.262560,
-3.787691,
5.417779,
0.553172,
0.668527,
1.732553,
-3.952767,
5.000579,
0.577238,
0.673890,
1.043625,
-1.464973,
5.662455,
0.554692,
0.580066,
2.321234,
-4.329069,
4.258156,
0.611897,
0.693961,
2.056846,
-4.477671,
4.520883,
0.596961,
0.706540,
2.153084,
-4.276322,
4.038093,
0.596371,
0.693953,
0.946874,
-1.035249,
6.512274,
0.539958,
0.557139,
1.469132,
-4.036351,
4.604908,
0.568842,
0.692366,
1.024340,
-3.989851,
4.926693,
0.547818,
0.692366,
0.533422,
-3.993222,
5.138202,
0.524613,
0.692366,
0.769720,
-6.095394,
4.985883,
0.534090,
0.779141,
0.699606,
-5.291850,
5.448304,
0.527671,
0.736226,
0.669687,
-4.949770,
5.509612,
0.526913,
0.717857,
0.630947,
-4.695101,
5.449371,
0.526878,
0.704626,
0.583218,
-4.517982,
5.339869,
0.526967,
0.695278,
1.537170,
-4.423206,
4.745470,
0.572058,
0.695278,
1.615600,
-4.475942,
4.813632,
0.573521,
0.703540,
1.729053,
-4.618680,
4.854463,
0.576838,
0.711846,
1.838624,
-4.828746,
4.823737,
0.581691,
0.720063,
2.368250,
-3.106237,
4.868096,
0.609945,
0.639910,
7.542244,
-1.049282,
-2.431321,
0.986046,
0.560034,
1.826614,
-4.399531,
4.399021,
0.586800,
0.695400,
1.929558,
-4.411831,
4.497052,
0.590372,
0.701823,
0.597442,
-2.013686,
5.866456,
0.531915,
0.601537,
1.405627,
-1.714196,
5.241087,
0.577268,
0.585935,
0.662449,
-1.819321,
5.863759,
0.536915,
0.593786,
2.342340,
0.572222,
4.294303,
0.627543,
0.473352,
3.327324,
0.104863,
4.113860,
0.665586,
0.495951,
1.726175,
-0.919165,
5.273355,
0.588354,
0.546862,
5.133204,
7.485602,
2.660442,
0.757824,
0.147676,
4.538641,
6.319907,
3.683424,
0.709250,
0.201508,
3.986562,
5.109487,
4.466315,
0.672684,
0.256581,
2.169681,
-5.440433,
4.455874,
0.600409,
0.749005,
1.395634,
5.011963,
5.316032,
0.558266,
0.261672,
1.619500,
6.599217,
4.921106,
0.570304,
0.187871,
1.891399,
8.236377,
4.274997,
0.588166,
0.109044,
4.195832,
2.235205,
3.375099,
0.711045,
0.398952,
5.733342,
1.411738,
2.431726,
0.781070,
0.435405,
1.859887,
2.355757,
3.843181,
0.587247,
0.398932,
4.988612,
3.074654,
3.083858,
0.742870,
0.355446,
1.303263,
1.416453,
4.831091,
0.572156,
0.437652,
1.305757,
-0.672779,
6.415959,
0.551868,
0.536570,
6.465170,
0.937119,
1.689873,
0.821442,
0.457556,
5.258659,
0.945811,
2.974312,
0.752702,
0.457182,
4.432338,
0.722096,
3.522615,
0.713757,
0.467627,
3.300681,
0.861641,
3.872784,
0.667113,
0.460673,
2.430178,
1.131492,
4.039035,
0.631101,
0.447154,
1.820731,
1.467954,
4.224124,
0.600862,
0.432473,
0.563221,
2.307693,
5.566789,
0.523481,
0.405627,
6.338145,
-0.529279,
1.881175,
0.810748,
0.523926,
5.587698,
3.208071,
2.687839,
0.771046,
0.348959,
0.242624,
-1.462857,
7.071491,
0.509127,
0.562718,
1.611251,
0.339326,
4.895421,
0.595293,
0.485024,
7.743095,
2.364999,
-2.005167,
0.980531,
0.401564,
1.391142,
1.851048,
4.448999,
0.573500,
0.420000,
1.785794,
-0.978284,
4.850470,
0.602995,
0.548688,
4.670959,
2.664461,
3.084075,
0.733530,
0.376977,
1.333970,
-0.283761,
6.097047,
0.560611,
0.519017,
7.270895,
-2.890917,
-2.252455,
0.967686,
0.644357,
1.856432,
2.585245,
3.757904,
0.580985,
0.387160,
0.923388,
0.073076,
6.671944,
0.537728,
0.505385,
5.000589,
-6.135128,
1.892523,
0.760966,
0.779753,
5.085276,
-7.178590,
0.714711,
0.801779,
0.831938,
7.159291,
-0.811820,
-0.072044,
0.892441,
0.540761,
5.843051,
-5.248023,
0.924091,
0.816351,
0.740260,
6.847258,
3.662916,
0.724695,
0.865595,
0.333687,
2.412942,
-8.258853,
4.119213,
0.614074,
0.883246,
0.179909,
-1.689864,
6.573301,
0.508953,
0.579438,
2.103655,
-0.163946,
4.566119,
0.617942,
0.508316,
6.407571,
2.236021,
1.560843,
0.825608,
0.397675,
3.670075,
2.360153,
3.635230,
0.681215,
0.396235,
3.177186,
2.294265,
3.775704,
0.656636,
0.400597,
2.196121,
-4.598322,
4.479786,
0.603900,
0.710217,
6.234883,
-1.944430,
1.663542,
0.812086,
0.588539,
1.292924,
-9.295920,
4.094063,
0.568013,
0.944565,
3.210651,
-8.533278,
2.802001,
0.681008,
0.898285,
4.068926,
-7.993109,
1.925119,
0.733752,
0.869701,
2.724032,
2.315802,
3.777151,
0.633830,
0.398822,
2.288460,
2.398891,
3.697603,
0.606793,
0.395537,
1.998311,
2.496547,
3.689148,
0.589660,
0.391062,
6.130040,
3.399261,
2.038516,
0.805016,
0.342108,
2.288460,
2.886504,
3.775031,
0.611335,
0.362284,
2.724032,
2.961810,
3.871767,
0.634038,
0.355971,
3.177186,
2.964136,
3.876973,
0.656636,
0.355357,
3.670075,
2.927714,
3.724325,
0.681215,
0.358340,
4.018389,
2.857357,
3.482983,
0.698585,
0.363156,
7.555811,
4.106811,
-0.991917,
0.941867,
0.319076,
4.018389,
2.483695,
3.440898,
0.698585,
0.387449,
1.776217,
-2.683946,
5.213116,
0.584177,
0.624107,
1.222237,
-1.182444,
5.952465,
0.554318,
0.566077,
0.731493,
-2.536683,
5.815343,
0.534154,
0.620640,
4.135272,
-6.996638,
2.671970,
0.711218,
0.819975,
3.311811,
-7.660815,
3.382963,
0.664630,
0.852871,
1.313701,
-8.639995,
4.702456,
0.559100,
0.902632,
5.940524,
-6.223629,
-0.631468,
0.871706,
0.791941,
1.998311,
2.743838,
3.744030,
0.591234,
0.373894,
0.901447,
1.236992,
5.754256,
0.544341,
0.451584,
2.308977,
-8.974196,
3.609070,
0.624563,
0.924192,
6.954154,
-2.439843,
-0.131163,
0.885770,
0.615029,
1.098819,
-4.458788,
5.120727,
0.551338,
0.695278,
1.181124,
-4.579996,
5.189564,
0.551980,
0.704632,
1.255818,
-4.787901,
5.237051,
0.552888,
0.715808,
1.325085,
-5.106507,
5.205010,
0.555168,
0.730794,
1.546388,
-5.819392,
4.757893,
0.569944,
0.767035,
1.953754,
-4.183892,
4.431713,
0.593203,
0.685676,
2.117802,
-4.137093,
4.555096,
0.599262,
0.681069,
2.285339,
-4.051196,
4.582438,
0.607600,
0.677703,
2.850160,
-3.665720,
4.484994,
0.631938,
0.663500,
5.278538,
-2.238942,
2.861224,
0.752033,
0.601315,
0.946709,
1.907628,
5.196779,
0.547226,
0.420395,
1.314173,
3.104912,
4.231404,
0.563544,
0.359828,
1.780000,
2.860000,
3.881555,
0.583841,
0.368714,
1.845110,
-4.098880,
4.247264,
0.586614,
0.692366,
5.436187,
-4.030482,
2.109852,
0.771915,
0.683578,
0.766444,
3.182131,
4.861453,
0.531597,
0.352483,
1.938616,
-6.614410,
4.521085,
0.588371,
0.804441,
0.516573,
1.583572,
6.148363,
0.520797,
0.442565,
1.246815,
0.230297,
5.681036,
0.567985,
0.493479,
0.997827,
-6.930921,
4.979576,
0.543283,
0.819255,
3.288807,
-5.382514,
3.795752,
0.655317,
0.745515,
2.311631,
-1.566237,
4.590085,
0.621009,
0.574018,
2.680250,
-6.111567,
4.096152,
0.625560,
0.780312,
3.832928,
-1.537326,
4.137731,
0.680198,
0.570719,
2.961860,
-2.274215,
4.440943,
0.642764,
0.604338,
4.386901,
-2.683286,
3.643886,
0.704663,
0.621530,
1.217295,
-7.834465,
4.969286,
0.552012,
0.862592,
1.542374,
-0.136843,
5.201008,
0.589072,
0.508637,
3.878377,
-6.041764,
3.311079,
0.685945,
0.775357,
3.084037,
-6.809842,
3.814195,
0.645735,
0.812640,
3.747321,
-4.503545,
3.726453,
0.675343,
0.703978,
6.094129,
-3.205991,
1.473482,
0.810858,
0.646305,
4.588995,
-4.728726,
2.983221,
0.720122,
0.714667,
6.583231,
-3.941269,
0.070268,
0.866152,
0.682705,
3.492580,
-3.195820,
4.130198,
0.663187,
0.644597,
1.255543,
0.802341,
5.307551,
0.570082,
0.466326,
1.126122,
-0.933602,
6.538785,
0.544562,
0.548376,
1.443109,
-1.142774,
5.905127,
0.562759,
0.558785,
0.923043,
-0.529042,
7.003423,
0.531987,
0.530140,
1.755386,
3.529117,
4.327696,
0.585271,
0.335177,
2.632589,
3.713828,
4.364629,
0.622953,
0.322779,
3.388062,
3.721976,
4.309028,
0.655896,
0.320163,
4.075766,
3.675413,
4.076063,
0.687132,
0.322346,
4.622910,
3.474691,
3.646321,
0.716482,
0.333201,
5.171755,
2.535753,
2.670867,
0.758757,
0.382787,
7.297331,
0.763172,
-0.048769,
0.897013,
0.468769,
4.706828,
1.651000,
3.109532,
0.732392,
0.424547,
4.071712,
1.476821,
3.476944,
0.702114,
0.433163,
3.269817,
1.470659,
3.731945,
0.666525,
0.433866,
2.527572,
1.617311,
3.865444,
0.633505,
0.426088,
1.970894,
1.858505,
3.961782,
0.603876,
0.416587,
1.579543,
2.097941,
4.084996,
0.579658,
0.409945,
7.664182,
0.673132,
-2.435867,
0.992440,
0.480777,
1.397041,
-1.340139,
5.630378,
0.567192,
0.569420,
0.884838,
0.658740,
6.233232,
0.541366,
0.478899,
0.767097,
-0.968035,
7.077932,
0.526564,
0.546118,
0.460213,
-1.334106,
6.787447,
0.523913,
0.563830,
0.748618,
-1.067994,
6.798303,
0.531529,
0.555057,
1.236408,
-1.585568,
5.480490,
0.566036,
0.582329,
0.387306,
-1.409990,
6.957705,
0.516311,
0.563054,
0.319925,
-1.607931,
6.508676,
0.517472,
0.577877,
1.639633,
2.556298,
3.863736,
0.573595,
0.389807,
1.255645,
2.467144,
4.203800,
0.560698,
0.395332,
1.031362,
2.382663,
4.615849,
0.549756,
0.399751,
4.253081,
2.772296,
3.315305,
0.710288,
0.368253,
4.530000,
2.910000,
3.339685,
0.723330,
0.363373,
]
)
canonical_metric_landmarks = np.reshape(
canonical_metric_landmarks, (canonical_metric_landmarks.shape[0] // 5, 5)
).T
canonical_metric_landmarks = canonical_metric_landmarks[:3, :]
procrustes_landmark_basis = [
(4, 0.070909939706326),
(6, 0.032100144773722),
(10, 0.008446550928056),
(33, 0.058724168688059),
(54, 0.007667080033571),
(67, 0.009078059345484),
(117, 0.009791937656701),
(119, 0.014565368182957),
(121, 0.018591361120343),
(127, 0.005197994410992),
(129, 0.120625205338001),
(132, 0.005560018587857),
(133, 0.05328618362546),
(136, 0.066890455782413),
(143, 0.014816547743976),
(147, 0.014262833632529),
(198, 0.025462191551924),
(205, 0.047252278774977),
(263, 0.058724168688059),
(284, 0.007667080033571),
(297, 0.009078059345484),
(346, 0.009791937656701),
(348, 0.014565368182957),
(350, 0.018591361120343),
(356, 0.005197994410992),
(358, 0.120625205338001),
(361, 0.005560018587857),
(362, 0.05328618362546),
(365, 0.066890455782413),
(372, 0.014816547743976),
(376, 0.014262833632529),
(420, 0.025462191551924),
(425, 0.047252278774977),
]
landmark_weights = np.zeros((canonical_metric_landmarks.shape[1],))
for idx, weight in procrustes_landmark_basis:
landmark_weights[idx] = weight
def log(name, f):
if DEBUG.get_debug():
print(f"{name} logged:", f)
print()
def cpp_compare(name, np_matrix):
if DEBUG.get_debug():
# reorder cpp matrix as memory alignment is not correct
cpp_matrix = np.load(f"{name}_cpp.npy")
rows, cols = cpp_matrix.shape
cpp_matrix = np.split(np.reshape(cpp_matrix, -1), cols)
cpp_matrix = np.stack(cpp_matrix, 1)
print(f"{name}:", np.sum(np.abs(cpp_matrix - np_matrix[:rows, :cols]) ** 2))
print()
def get_metric_landmarks(screen_landmarks, pcf):
screen_landmarks = project_xy(screen_landmarks, pcf)
depth_offset = np.mean(screen_landmarks[2, :])
intermediate_landmarks = screen_landmarks.copy()
intermediate_landmarks = change_handedness(intermediate_landmarks)
first_iteration_scale = estimate_scale(intermediate_landmarks)
intermediate_landmarks = screen_landmarks.copy()
intermediate_landmarks = move_and_rescale_z(
pcf, depth_offset, first_iteration_scale, intermediate_landmarks
)
intermediate_landmarks = unproject_xy(pcf, intermediate_landmarks)
intermediate_landmarks = change_handedness(intermediate_landmarks)
second_iteration_scale = estimate_scale(intermediate_landmarks)
metric_landmarks = screen_landmarks.copy()
total_scale = first_iteration_scale * second_iteration_scale
metric_landmarks = move_and_rescale_z(
pcf, depth_offset, total_scale, metric_landmarks
)
metric_landmarks = unproject_xy(pcf, metric_landmarks)
metric_landmarks = change_handedness(metric_landmarks)
pose_transform_mat = solve_weighted_orthogonal_problem(
canonical_metric_landmarks, metric_landmarks, landmark_weights
)
cpp_compare("pose_transform_mat", pose_transform_mat)
inv_pose_transform_mat = np.linalg.inv(pose_transform_mat)
inv_pose_rotation = inv_pose_transform_mat[:3, :3]
inv_pose_translation = inv_pose_transform_mat[:3, 3]
metric_landmarks = (
inv_pose_rotation @ metric_landmarks + inv_pose_translation[:, None]
)
return metric_landmarks, pose_transform_mat
def project_xy(landmarks, pcf):
x_scale = pcf.right - pcf.left
y_scale = pcf.top - pcf.bottom
x_translation = pcf.left
y_translation = pcf.bottom
landmarks[1, :] = 1.0 - landmarks[1, :]
landmarks = landmarks * np.array([[x_scale, y_scale, x_scale]]).T
landmarks = landmarks + np.array([[x_translation, y_translation, 0]]).T
return landmarks
def change_handedness(landmarks):
landmarks[2, :] *= -1.0
return landmarks
def move_and_rescale_z(pcf, depth_offset, scale, landmarks):
landmarks[2, :] = (landmarks[2, :] - depth_offset + pcf.near) / scale
return landmarks
def unproject_xy(pcf, landmarks):
landmarks[0, :] = landmarks[0, :] * landmarks[2, :] / pcf.near
landmarks[1, :] = landmarks[1, :] * landmarks[2, :] / pcf.near
return landmarks
def estimate_scale(landmarks):
transform_mat = solve_weighted_orthogonal_problem(
canonical_metric_landmarks, landmarks, landmark_weights
)
return np.linalg.norm(transform_mat[:, 0])
def extract_square_root(point_weights):
return np.sqrt(point_weights)
def solve_weighted_orthogonal_problem(source_points, target_points, point_weights):
sqrt_weights = extract_square_root(point_weights)
transform_mat = internal_solve_weighted_orthogonal_problem(
source_points, target_points, sqrt_weights
)
return transform_mat
def internal_solve_weighted_orthogonal_problem(sources, targets, sqrt_weights):
cpp_compare("sources", sources)
cpp_compare("targets", targets)
# tranposed(A_w).
weighted_sources = sources * sqrt_weights[None, :]
# tranposed(B_w).
weighted_targets = targets * sqrt_weights[None, :]
cpp_compare("weighted_sources", weighted_sources)
cpp_compare("weighted_targets", weighted_targets)
# w = tranposed(j_w) j_w.
total_weight = np.sum(sqrt_weights * sqrt_weights)
log("total_weight", total_weight)
# Let C = (j_w tranposed(j_w)) / (tranposed(j_w) j_w).
# Note that C = tranposed(C), hence (I - C) = tranposed(I - C).
#
# tranposed(A_w) C = tranposed(A_w) j_w tranposed(j_w) / w =
# (tranposed(A_w) j_w) tranposed(j_w) / w = c_w tranposed(j_w),
#
# where c_w = tranposed(A_w) j_w / w is a k x 1 vector calculated here:
twice_weighted_sources = weighted_sources * sqrt_weights[None, :]
source_center_of_mass = np.sum(twice_weighted_sources, axis=1) / total_weight
log("source_center_of_mass", source_center_of_mass)
# tranposed((I - C) A_w) = tranposed(A_w) (I - C) =
# tranposed(A_w) - tranposed(A_w) C = tranposed(A_w) - c_w tranposed(j_w).
centered_weighted_sources = weighted_sources - np.matmul(
source_center_of_mass[:, None], sqrt_weights[None, :]
)
cpp_compare("centered_weighted_sources", centered_weighted_sources)
design_matrix = np.matmul(weighted_targets, centered_weighted_sources.T)
cpp_compare("design_matrix", design_matrix)
log("design_matrix_norm", np.linalg.norm(design_matrix))
rotation = compute_optimal_rotation(design_matrix)
scale = compute_optimal_scale(
centered_weighted_sources, weighted_sources, weighted_targets, rotation
)
log("scale", scale)
rotation_and_scale = scale * rotation
pointwise_diffs = weighted_targets - np.matmul(rotation_and_scale, weighted_sources)
cpp_compare("pointwise_diffs", pointwise_diffs)
weighted_pointwise_diffs = pointwise_diffs * sqrt_weights[None, :]
cpp_compare("weighted_pointwise_diffs", weighted_pointwise_diffs)
translation = np.sum(weighted_pointwise_diffs, axis=1) / total_weight
log("translation", translation)
transform_mat = combine_transform_matrix(rotation_and_scale, translation)
cpp_compare("transform_mat", transform_mat)
return transform_mat
def compute_optimal_rotation(design_matrix):
if np.linalg.norm(design_matrix) < 1e-9:
print("Design matrix norm is too small!")
u, _, vh = np.linalg.svd(design_matrix, full_matrices=True)
postrotation = u
prerotation = vh
if np.linalg.det(postrotation) * np.linalg.det(prerotation) < 0:
postrotation[:, 2] = -1 * postrotation[:, 2]
cpp_compare("postrotation", postrotation)
cpp_compare("prerotation", prerotation)
rotation = np.matmul(postrotation, prerotation)
cpp_compare("rotation", rotation)
return rotation
def compute_optimal_scale(
centered_weighted_sources, weighted_sources, weighted_targets, rotation
):
rotated_centered_weighted_sources = np.matmul(rotation, centered_weighted_sources)
numerator = np.sum(rotated_centered_weighted_sources * weighted_targets)
denominator = np.sum(centered_weighted_sources * weighted_sources)
if denominator < 1e-9:
print("Scale expression denominator is too small!")
if numerator / denominator < 1e-9:
print("Scale is too small!")
return numerator / denominator
def combine_transform_matrix(r_and_s, t):
result = np.eye(4)
result[:3, :3] = r_and_s
result[:3, 3] = t
return result | 19.900709 | 116 | 0.448738 |
b22225c076f9bafbbae48c8ab5b2631ee50e3c5c | 33,969 | py | Python | CacheMgr.py | SimiCode/Grail-Web-Browser | 16b86d3215068d334eacf6153b71a748eed53d3d | [
"CNRI-Jython"
] | 6 | 2019-06-17T06:13:01.000Z | 2021-08-22T02:13:36.000Z | CacheMgr.py | SimiCode/Grail-Web-Browser | 16b86d3215068d334eacf6153b71a748eed53d3d | [
"CNRI-Jython"
] | null | null | null | CacheMgr.py | SimiCode/Grail-Web-Browser | 16b86d3215068d334eacf6153b71a748eed53d3d | [
"CNRI-Jython"
] | 2 | 2016-08-18T12:14:44.000Z | 2019-12-06T17:20:21.000Z | from Cache import SharedItem, SharedAPI
from Assert import Assert
import urlparse
import string
import os
import time
import ht_time
import grailutil
import mimetypes
import regex
META, DATA, DONE = 'META', 'DATA', 'DONE' # Three stages
CacheMiss = 'Cache Miss'
CacheEmpty = 'Cache Empty'
CacheReadFailed = 'Cache Item Expired or Missing'
CacheFileError = 'Cache File Error'
try:
# Python 1.5.2:
from mimetypes import guess_extension
except ImportError:
# This is for users of Python 1.5.1:
def guess_extension(type):
type = string.lower(type)
for ext, stype in mimetypes.types_map.items():
if type == stype:
return ext
return None
def parse_cache_control(s):
def parse_directive(s):
i = string.find(s, '=')
if i >= 0:
return (s[:i], s[i+1:])
return (s, '')
elts = string.splitfields(s, ',')
return map(parse_directive, elts)
class CacheManager:
"""Manages one or more caches in hierarchy.
The only methods that should be used by the application is
open() and add_cache(). Other methods are intended for use by the
cache itself.
overview of CacheManager and Cache organization
CM has list of caches (could have more than one cache)
items = {}: contains an entry for all the URLs in all the
caches. value is a cache entry object (cf DiskCacheEntry
below), has a get method that returns an protocol API object
active = {}: contains an entry for each URL that is open in a
browser window. this is the shared object list. if there is a
request for an active page, a second SharedAPI for the same object
is returned. the list stores SharedItems, which contain a reference
count; when that cound reaches zero, it removes itself from the
list.
freshness: CM is partly responsible for checking the freshness of
pages. (pages with explicit TTL know when they expire.) freshness
tests are preference driven, can be never, per session, or per
time-unit. on each open, check to see if we should send an
If-Mod-Since to the original server (based on fresh_p method).
"""
def __init__(self, app):
"""Initializes cache manager, creates disk cache.
Basic disk cache characteristics loaded from """
self.app = app
self.caches = []
self.items = {}
self.active = {}
self.disk = None
self.disk = DiskCache(self, self.app.prefs.GetInt('disk-cache',
'size') * 1024,
self.app.prefs.Get('disk-cache', 'directory'))
self.set_freshness_test()
self.app.prefs.AddGroupCallback('disk-cache', self.update_prefs)
# check preferences
bool = self.app.prefs.GetInt('disk-cache', 'checkpoint')
if bool:
self.app.register_on_exit(lambda save=self.save_cache_state:save())
def save_cache_state(self):
for cache in self.caches:
cache._checkpoint_metadata()
def update_prefs(self):
self.set_freshness_test()
size = self.caches[0].max_size = self.app.prefs.GetInt('disk-cache',
'size') \
* 1024
new_dir = self.app.prefs.Get('disk-cache', 'directory')
if new_dir != self.disk.pref_dir:
self.disk._checkpoint_metadata()
self.reset_disk_cache(size, new_dir)
def reset_disk_cache(self, size=None, dir=None, flush_log=0):
"""Close the current disk cache and open a new one.
Used primarily to change the cache directory or to clear
everything out of the cache when it is erased. The flush_log
argument is passed to DiskCache.close(), allowing this routine
to be used in both of the cases described. On erase, we want
to write a new, empty log; on change directory, we want to
keep the old log intact.
"""
if not size:
size = self.disk.max_size
if not dir:
dir = self.disk.directory
self.disk.close(flush_log)
self.disk = DiskCache(self, size, dir)
def set_freshness_test(self):
# read preferences to determine when pages should be checked
# for freshness -- once per session, every n secs, or never
fresh_type = self.app.prefs.Get('disk-cache', 'freshness-test-type')
fresh_rate = int(self.app.prefs.GetFloat('disk-cache',
'freshness-test-period') * 3600.0)
if fresh_type == 'per session':
self.fresh_p = lambda key, self=self: \
self.fresh_every_session(self.items[key])
self.session_freshen = []
elif fresh_type == 'periodic':
self.fresh_p = lambda key, self=self, t=fresh_rate: \
self.fresh_periodic(self.items[key],t)
elif fresh_type == 'never':
self.fresh_p = lambda x: 1
else: # == 'always'
self.fresh_p = lambda x: 0
def open(self, url, mode, params, reload=0, data=None):
"""Opens a URL and returns a protocol API for it.
This is the method called by the Application to load a
URL. First, it checks the shared object list (and returns a
second reference to a URL that is currently active). Then it
calls open routines specialized for GET or POST.
"""
key = self.url2key(url, mode, params)
if mode == 'GET':
if self.active.has_key(key):
# XXX This appeared to be a bad idea!
## if reload:
## self.active[key].reset()
return SharedAPI(self.active[key])
return self.open_get(key, url, mode, params, reload, data)
elif mode == 'POST':
return self.open_post(key, url, mode, params, reload, data)
def open_get(self, key, url, mode, params, reload, data):
"""open() method specialized for GET request.
Performs several steps:
1. Check for the URL in the cache.
2. If it is in the cache,
1. Create a SharedItem for it.
2. Reload the cached copy if reload flag is on.
3. Refresh the page if the freshness test fails.
If it isn't in the cache,
1. Create a SharedItem (which will create a CacheEntry
after the page has been loaded.)
3. call activate(), which adds the URL to the shared object
list and creates a SharedAPI for the item
"""
try:
api = self.cache_read(key)
except CacheReadFailed, cache:
cache.evict(key)
api = None
if api:
# creating reference to cached item
if reload:
item = SharedItem(url, mode, params, self, key, data, api,
reload=reload)
self.touch(key)
elif not self.fresh_p(key):
item = SharedItem(url, mode, params, self, key, data, api,
refresh=self.items[key].lastmod)
self.touch(key,refresh=1)
else:
item = SharedItem(url, mode, params, self, key, data, api)
else:
# cause item to be loaded (and perhaps cached)
item = SharedItem(url, mode, params, self, key, data)
return self.activate(item)
def open_post(self, key, url, mode, params, reload, data):
"""Open a URL with a POST request. Do not cache."""
key = self.url2key(url, mode, params)
return self.activate(SharedItem(url, mode, params, None, key,
data))
def activate(self,item):
"""Adds a SharedItem to the shared object list and returns SharedAPI.
"""
self.active[item.key] = item
return SharedAPI(self.active[item.key])
def deactivate(self,key):
"""Removes a SharedItem from the shared object list."""
if self.active.has_key(key):
del self.active[key]
def add_cache(self, cache):
"""Called by cache to notify manager this it is ready."""
self.caches.append(cache)
def close_cache(self, cache):
self.caches.remove(cache)
def cache_read(self,key):
"""Checks cache for URL. Returns protocol API on hit.
Looks for a cache entry object in the items dictionary. If the
CE object is found, call its method get() to create a protocol
API for the item.
"""
if self.items.has_key(key):
return self.items[key].get()
else:
return None
def touch(self,key=None,url=None,refresh=0):
"""Calls touch() method of CacheEntry object."""
if url:
key = self.url2key(url,'GET',{})
if key and self.items.has_key(key):
self.items[key].touch(refresh)
def expire(self,key):
"""Should not be used."""
Assert('night' == 'day')
Assert(self.items.has_key(key))
self.items[key].evict()
def delete(self, keys, evict=1):
if type(keys) != type([]):
keys = [keys]
if evict:
for key in keys:
try:
self.items[key].cache.evict(key)
except KeyError:
pass
else:
for key in keys:
try:
del self.items[key]
except KeyError:
pass
def add(self,item,reload=0):
"""If item is not in the cache and is allowed to be cached, add it.
"""
try:
if not self.items.has_key(item.key) and self.okay_to_cache_p(item):
self.caches[0].add(item)
elif reload == 1:
self.caches[0].update(item)
except CacheFileError, err_tuple:
(file, err) = err_tuple
print "error adding item %s (file %s): %s" % (item.url,
file, err)
# list of protocols that we can cache
cache_protocols = ['http', 'ftp', 'hdl']
def okay_to_cache_p(self,item):
"""Check if this item should be cached.
This routine probably (definitely) needs more thought.
Currently, we do not cache URLs with the following properties:
1. The scheme is not on the list of cacheable schemes.
2. The item is bigger than a quarter of the cache size.
3. The 'Pragma: no-cache' header was sent
4. The 'Expires: 0' header was sent
5. The URL includes a query part '?'
"""
if len(self.caches) < 1:
return 0
(scheme, netloc, path, parm, query, frag) = \
urlparse.urlparse(item.url)
if query or scheme not in self.cache_protocols:
return 0
# don't cache really big things
#####
##### limit is hardcoded, please fix
#####
if item.datalen > self.caches[0].max_size / 4:
return 0
code, msg, params = item.meta
# don't cache things that don't want to be cached
if params.has_key('pragma'):
pragma = params['pragma']
if pragma == 'no-cache':
return 0
if params.has_key('expires'):
expires = params['expires']
if expires == 0:
return 0
# respond to http/1.1 cache control directives
if params.has_key('cache-control'):
for k, v in parse_cache_control(params['cache-control']):
if k in ('no-cache', 'no-store'):
return 0
if k == 'max-age':
expires = string.atoi(v)
return 1
def fresh_every_session(self,entry):
"""Refresh the page once per session"""
if not entry.key in self.session_freshen:
self.session_freshen.append(entry.key)
return 0
return 1
def fresh_periodic(self,entry,max_age):
"""Refresh it max_age seconds have passed since it was loaded."""
try:
age = time.time() - entry.date.get_secs()
if age > max_age:
return 0
return 1
except AttributeError:
# if you don't tell me the date, I don't tell you it's stale
return 1
def url2key(self, url, mode, params):
"""Normalize a URL for use as a caching key.
- change the hostname to all lowercase
- remove the port if it is the scheme's default port
- reformat the port using %d
- get rid of the fragment identifier
"""
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
i = string.find(netloc, '@')
if i > 0:
userpass = netloc[:i]
netloc = netloc[i+1:] # delete the '@'
else:
userpass = ""
scheme = string.lower(scheme)
netloc = string.lower(netloc)
i = string.find(netloc, ':')
if i >= 0:
try:
port = string.atoi(netloc[i+1:])
except string.atoi_error:
port = None
else:
port = None
if scheme == 'http' and port == 80:
netloc = netloc[:i]
elif type(port) == type(0):
netloc = netloc[:i] + ":%d" % port
return urlparse.urlunparse((scheme, netloc, path, params, query, ""))
class DiskCacheEntry:
"""Data about item stored in a disk cache.
__init__ only store the cache this entry is in. To place real data
in a cache item, you must call fill() to create a new item.
The DiskCacheEntry object is shared by the DiskCache and the
CacheManager. The method get() is called by the
CacheManager and change the state of the DiskCache.
The data members include:
date -- the date of the most recent HTTP request to the server
(either a regular load or an If-Modified-Since request)
"""
def __init__(self, cache=None):
self.cache = cache
def fill(self,key,url,size,date,lastmod,expires,ctype,
cencoding,ctencoding):
self.key = key
self.url = url
self.size = size
if date:
self.date = HTTime(date)
else:
self.date = None
if lastmod:
self.lastmod = HTTime(lastmod)
else:
self.lastmod = None
if expires:
self.expires = HTTime(expires)
else:
self.expires = None
self.type = ctype
self.encoding = cencoding
self.transfer_encoding = ctencoding
string_date = regex.compile('^[A-Za-z]')
def __repr__(self):
return self.unparse()
def parse(self,parsed_rep):
"""Reads transaction log entry.
"""
vars = string.splitfields(parsed_rep,'\t')
self.key = vars[0]
self.url = vars[1]
self.file = vars[2]
self.size = string.atoi(vars[3])
self.type = vars[7]
try:
self.encoding = vars[8]
except IndexError:
# log version 1.2
self.encoding = None
self.transfer_encoding = None
else:
if self.encoding == 'None':
self.encoding = None
try:
self.transfer_encoding = vars[9]
except IndexError:
self.transfer_encoding = None
else:
if self.transfer_encoding == 'None':
self.transfer_encoding = None
self.date = None
self.lastmod = None
self.expires = None
for tup in [(vars[4], 'date'), (vars[5], 'lastmod'),
(vars[6], 'expires')]:
self.parse_assign(tup[0],tup[1])
def parse_assign(self,rep,var):
if rep == 'None':
setattr(self,var,None)
elif self.string_date.match(rep) == 1:
setattr(self,var,HTTime(str=rep))
else:
setattr(self,var,HTTime(secs=string.atof(rep)))
def unparse(self):
"""Return entry for transaction log.
"""
if not hasattr(self, 'file'):
self.file = ''
stuff = [self.key, self.url, self.file, self.size, self.date,
self.lastmod, self.expires, self.type, self.encoding,
self.transfer_encoding]
s = string.join(map(str, stuff), '\t')
return s
def get(self):
"""Create a disk_cache_access API object and return it.
Calls cache.get() to update the LRU information.
Also checks to see if a page with an explicit Expire date has
expired; raises a CacheReadFaile if it has.
"""
if self.expires:
if self.expires and self.expires.get_secs() < time.time():
# we need to refresh the page; can we just reload?
raise CacheReadFailed, self.cache
self.cache.get(self.key)
try:
api = disk_cache_access(self.cache.get_file_path(self.file),
self.type, self.date, self.size,
self.encoding, self.transfer_encoding)
except IOError:
raise CacheReadFailed, self.cache
return api
def touch(self,refresh=0):
"""Change the date of most recent check with server."""
self.date = HTTime(secs=time.time())
if refresh:
self.cache.log_entry(self)
def delete(self):
pass
def compare_expire_items(item1,item2):
"""used with list.sort() to sort list of CacheEntries by expiry date."""
e1 = item1.expires.get_secs()
e2 = item2.expires.get_secs()
if e1 > e2:
return 1
elif e2 > e1:
return -1
else:
return 0
class DiskCache:
"""Persistent object cache.
need to discuss:
use_order
the log: writes every change to cache or use_order, writes
flushed, do a checkpoint run on startup, format is tuple (entry
type, object), where entry type is add, evict, update use_order,
version.
expires
evict
Note: Nowhere do we verify that the disk has enough space for a
full cache.
"""
def __init__(self, manager, size, directory):
self.max_size = size
self.size = 0
self.pref_dir = directory
if hasattr(os.path, 'expanduser'):
directory = os.path.expanduser(directory)
if not os.path.isabs(directory):
directory = os.path.join(grailutil.getgraildir(), directory)
self.directory = directory
self.manager = manager
self.manager.add_cache(self)
self.items = {}
self.use_order = []
self.log = None
self.checkpoint = 0
self.expires = []
self.types = {}
grailutil.establish_dir(self.directory)
self._read_metadata()
self._reinit_log()
log_version = "1.3"
log_ok_versions = ["1.2", "1.3"]
def close(self,log):
self.manager.delete(self.items.keys(), evict=0)
if log:
self.use_order = []
self._checkpoint_metadata()
del self.items
del self.expires
self.manager.close_cache(self)
self.dead = 1
def _read_metadata(self):
"""Read the transaction log from the cache directory.
Reads the pickled log entries and re-creates the cache's
current contents and use_order from the log.
A version number is included, but currently we only assert
that a the version number read is the same as the current
version number.
"""
logpath = os.path.join(self.directory, 'LOG')
try:
log = open(logpath)
except IOError:
# now what happens if there is an error here?
log = open(logpath, 'w')
log.close()
return
for line in log.readlines():
try:
kind = line[0:1]
if kind == '2': # use update
key = line[2:-1]
self.use_order.remove(key)
self.use_order.append(key)
elif kind == '1': # delete
key = line[2:-1]
if self.items.has_key(key):
self.size = self.size - self.items[key].size
del self.items[key]
del self.manager.items[key]
self.use_order.remove(key)
Assert(not key in self.use_order)
elif kind == '0': # add
newentry = DiskCacheEntry(self)
newentry.parse(line[2:-1])
if not self.items.has_key(newentry.key):
self.use_order.append(newentry.key)
newentry.cache = self
self.items[newentry.key] = newentry
self.manager.items[newentry.key] = newentry
self.size = self.size + newentry.size
elif kind == '3': # version (hopefully first)
ver = line[2:-1]
if ver not in self.log_ok_versions:
### clear out anything we might have read
### and bail. this is an old log file.
if len(self.use_order) > 0:
self.use_order = []
for key in self.items.keys():
del self.items[key]
del self.manager.items[key]
self.size = 0
return
Assert(ver in self.log_ok_versions)
except IndexError:
# ignore this line
pass
def _checkpoint_metadata(self):
"""Checkpoint the transaction log.
Creates a new log that contains only the current state of the
cache.
"""
import traceback
if self.log:
self.log.close()
try:
newpath = os.path.join(self.directory, 'CHECKPOINT')
newlog = open(newpath, 'w')
newlog.write('3 ' + self.log_version + '\n')
for key in self.use_order:
self.log_entry(self.items[key],alt_log=newlog,flush=None)
# don't flush writes during the checkpoint, because if
# we crash it won't matter
newlog.close()
logpath = os.path.join(self.directory, 'LOG')
os.unlink(logpath)
os.rename(newpath, logpath)
except:
print "exception during checkpoint"
traceback.print_exc()
def _reinit_log(self):
"""Open the log for writing new transactions."""
logpath = os.path.join(self.directory, 'LOG')
self.log = open(logpath, 'a')
def log_entry(self,entry,delete=0,alt_log=None,flush=1):
"""Write to the log adds and evictions."""
if alt_log:
dest = alt_log
else:
dest = self.log
if delete:
dest.write('1 ' + entry.key + '\n')
else:
dest.write('0 ' + entry.unparse() + '\n')
if flush:
dest.flush()
def log_use_order(self,key):
"""Write to the log changes in use_order."""
if self.items.has_key(key):
self.log.write('2 ' + key + '\n')
# should we flush() here? probably...
self.log.flush()
cache_file = regex.compile('^spam[0-9]+')
def erase_cache(self):
if hasattr(self,'dead'):
# they got me
self.manager.disk.erase_cache()
return
def walk_erase(regexp,dir,files):
for file in files:
if regexp.match(file) != -1:
path = os.path.join(dir,file)
if os.path.isfile(path):
os.unlink(path)
os.path.walk(self.directory, walk_erase, self.cache_file)
self.manager.reset_disk_cache(flush_log=1)
def erase_unlogged_files(self):
if hasattr(self,'dead'):
# they got me
self.manager.disk.erase_unlogged_files()
return
def walk_erase_unknown(known,dir,files,regexp=self.cache_file):
for file in files:
if not known.has_key(file) \
and regexp.match(file) != -1:
path = os.path.join(dir,file)
if os.path.isfile(path):
os.unlink(path)
files = map(lambda entry:entry.file, self.items.values())
file_dict = { 'LOG': 1 }
for file in files:
file_dict[file] = 1
os.path.walk(self.directory, walk_erase_unknown, file_dict)
def get(self,key):
"""Update and log use_order."""
Assert(self.items.has_key(key))
self.use_order.remove(key)
self.use_order.append(key)
self.log_use_order(key)
def update(self,object):
# this is simple, but probably not that efficient
self.evict(object.key)
self.add(object)
def add(self,object):
"""Creates a DiskCacheEntry for object and adds it to cache.
Examines the object and its headers for size, date, type,
etc. The DiskCacheEntry is placed in the DiskCache and the
CacheManager and the entry is logged.
XXX Need to handle replacement better?
"""
respcode, msg, headers = object.meta
size = object.datalen
self.make_space(size)
newitem = DiskCacheEntry(self)
(date, lastmod, expires, ctype, cencoding, ctencoding) \
= self.read_headers(headers)
newitem.fill(object.key, object.url, size, date, lastmod,
expires, ctype, cencoding, ctencoding)
newitem.file = self.get_file_name(newitem)
if expires:
self.add_expireable(newitem)
self.make_file(newitem,object)
self.log_entry(newitem)
self.items[object.key] = newitem
self.manager.items[object.key] = newitem
self.use_order.append(object.key)
return newitem
def read_headers(self,headers):
if headers.has_key('date'):
date = headers['date']
else:
date = time.time()
if headers.has_key('last-modified'):
lastmod = headers['last-modified']
else:
lastmod = date
if headers.has_key('expires'):
expires = headers['expires']
else:
expires = None
if headers.has_key('content-type'):
ctype = headers['content-type']
else:
# what is the proper default content type?
ctype = 'text/html'
if headers.has_key('content-encoding'):
cencoding = headers['content-encoding']
else:
cencoding = None
if headers.has_key('content-transfer-encoding'):
ctencoding = headers['content-transfer-encoding']
else:
ctencoding = None
return (date, lastmod, expires, ctype, cencoding, ctencoding)
def add_expireable(self,entry):
"""Adds entry to list of pages with explicit expire date."""
self.expires.append(entry)
def get_file_name(self,entry):
"""Invent a filename for a new cache entry."""
filename = 'spam' + str(time.time()) + self.get_suffix(entry.type)
return filename
def get_file_path(self,filename):
path = os.path.join(self.directory, filename)
return path
def get_suffix(self,type):
if self.types.has_key(type):
return self.types[type]
else:
return guess_extension(type) or ''
def make_file(self,entry,object):
"""Write the object's data to disk."""
path = self.get_file_path(entry.file)
try:
f = open(path, 'wb')
f.writelines(object.data)
f.close()
except IOError, err:
raise CacheFileError, (path, err)
def make_space(self,amount):
"""Ensures that there are amount bytes free in the disk cache.
If the cache does not have amount bytes free, pages are
evicted. First, we check the list of pages with explicit
expire dates and evict any that have expired. If we need more
space, evict the least recently used page. Continue LRU
eviction until enough space is available.
Raises CacheEmpty if there are no entries in the cache, but
amount bytes are not available.
"""
if self.size + amount > self.max_size:
self.evict_expired_pages()
try:
while self.size + amount > self.max_size:
self.evict_any_page()
except CacheEmpty:
print "Can't make more room in the cache"
pass
# this is not the right thing to do, probably
# but I don't think this should ever happen
self.size = self.size + amount
def evict_any_page(self):
"""Evict the least recently used page."""
# get ride of least-recently used thing
if len(self.items) > 0:
key = self.use_order[0]
self.evict(key)
else:
raise CacheEmpty
def evict_expired_pages(self):
"""Evict any pages on the expires list that have expired."""
self.expires.sort(compare_expire_items)
size = len(self.expires)
if size > 0 \
and self.expires[0].expires.get_secs() < time.time():
index = 0
t = time.time()
while index < size and self.expires[index].expires.get_secs() < t:
index = index + 1
for item in self.expires[0:index]:
self.evict(item.key)
del self.expires[0:index]
def evict(self,key):
"""Remove an entry from the cache and delete the file from disk."""
self.use_order.remove(key)
evictee = self.items[key]
del self.manager.items[key]
del self.items[key]
if key in self.expires:
self.expires.remove(key)
try:
os.unlink(self.get_file_path(evictee.file))
except (os.error, IOError), err:
# print "error deleteing %s from cache: %s" % (key, err)
pass
self.log_entry(evictee,1) # 1 indicates delete entry
evictee.delete()
self.size = self.size - evictee.size
class disk_cache_access:
"""protocol access interface for disk cache"""
def __init__(self, filename, content_type, date, len,
content_encoding, transfer_encoding):
self.headers = { 'content-type' : content_type,
'date' : date,
'content-length' : str(len) }
if content_encoding:
self.headers['content-encoding'] = content_encoding
if transfer_encoding:
self.headers['content-transfer-encoding'] = transfer_encoding
self.filename = filename
try:
self.fp = open(filename, 'rb')
except IOError, err:
print "io error opening %s: %s" % (filename, err)
# propogate error through
raise IOError, err
self.state = DATA
def pollmeta(self):
return "Ready", 1
def getmeta(self):
return 200, "OK", self.headers
def polldata(self):
return "Ready", 1
def getdata(self,maxbytes):
# get some data from the disk
data = self.fp.read(maxbytes)
if not data:
self.state = DONE
return data
def fileno(self):
try:
return self.fp.fileno()
except AttributeError:
return -1
def close(self):
fp = self.fp
self.fp = None
if fp:
fp.close()
def tk_img_access(self):
"""Return the cached filename and content-type.
Used by AsyncImage to create Tk image objects directly from
the file in the disk cache.
"""
return self.filename, self.headers['content-type']
class HTTime:
"""Stores time as HTTP string or seconds since epoch or both.
Lazy conversions from one format to the other.
"""
# HTTP defines three date formats, but only one is supposed to be
# produced by an HTTP application. (The other two you're supposed to
# handle for backwards compatibility.) It would be nice to accept
# the two old date formats as str input and convert them to the
# preferred format.
def __init__(self,any=None,str=None,secs=None):
if any:
if type(any) == type(''):
str = any
elif type(any) in [type(1), type(.1)]:
secs = any
if str and str != '':
self.str = str
else:
self.str = None
if secs:
self.secs = secs
else:
self.secs = None
def get_secs(self):
if not self.secs:
try:
self.secs = ht_time.parse(self.str)
except:
# if there is a parsing error, we bail
self.secs = 0
return self.secs
def get_str(self):
if not self.str:
self.str = ht_time.unparse(self.secs)
return self.str
def __repr__(self):
if self.secs:
return str(self.secs)
elif self.str:
return self.str
else:
return str(None)
| 33.666006 | 79 | 0.553828 |
801439c04ecdaab5fe6022f168eede9a56e8b556 | 482 | py | Python | 8. dicionarios/verifica_primos.py | andrebrito16/python-academy | 544516048c0a2f8cea42ef0f252b9c40e8f5b141 | [
"MIT"
] | 1 | 2021-08-19T19:40:14.000Z | 2021-08-19T19:40:14.000Z | 8. dicionarios/verifica_primos.py | andrebrito16/python-academy | 544516048c0a2f8cea42ef0f252b9c40e8f5b141 | [
"MIT"
] | null | null | null | 8. dicionarios/verifica_primos.py | andrebrito16/python-academy | 544516048c0a2f8cea42ef0f252b9c40e8f5b141 | [
"MIT"
] | null | null | null | def eh_primo(n):
if n == 1 or n == 0 or n < 0:
return False
if n == 2:
return True
if n % 2 == 0:
return False
c = 3
while c < n:
print(c)
if n % c == 0:
return False
else:
c += 2
return True
def verifica_primos(numeros):
primos = dict()
for n in numeros:
if eh_primo(n):
primos[n] = True
else:
primos[n] = False
return primos
| 17.851852 | 33 | 0.435685 |
82b48fc050c036887bd280fb9833cfab9992734b | 6,269 | py | Python | conf.py | ReinhardFrie/peer | ccf43e96fe973a3ebfce4425c10595a470ffe349 | [
"MIT"
] | null | null | null | conf.py | ReinhardFrie/peer | ccf43e96fe973a3ebfce4425c10595a470ffe349 | [
"MIT"
] | null | null | null | conf.py | ReinhardFrie/peer | ccf43e96fe973a3ebfce4425c10595a470ffe349 | [
"MIT"
] | 1 | 2019-09-17T15:18:21.000Z | 2019-09-17T15:18:21.000Z | # -*- coding: utf-8 -*-
#
# Nutanix Labs documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 27 12:18:41 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_bootstrap_theme
import sphinx_fontawesome
#sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinxcontrib.fulltoc',
'sphinx_fontawesome']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Peer and Nutanix'
copyright = u'2019 Nutanix'
author = u'Nutanix Global Technical Enablement'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = "sphinx_rtd_theme"
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
html_logo = "NutanixWorkshops.svg"
html_favicon = "favicon.ico"
html_title = ""
html_show_sphinx = False
#html_add_permalinks = ""
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
}
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': " ",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
#'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
#],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "cosmo" or "sandstone".
#'bootswatch_theme': "united",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
html_sidebars = {'**': ['localtoc.html']}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
#htmlhelp_basename = 'NutanixLabsdoc'
| 32.314433 | 79 | 0.689584 |
37f077bd7f59d90f626a33917cb7ef39ff8384bb | 8,622 | py | Python | StyleGAN/dnnlib/tflib/optimizer.py | yubin1219/GAN | 8345095f9816e548c968492efbe92b427b0e06a3 | [
"MIT"
] | null | null | null | StyleGAN/dnnlib/tflib/optimizer.py | yubin1219/GAN | 8345095f9816e548c968492efbe92b427b0e06a3 | [
"MIT"
] | null | null | null | StyleGAN/dnnlib/tflib/optimizer.py | yubin1219/GAN | 8345095f9816e548c968492efbe92b427b0e06a3 | [
"MIT"
] | 1 | 2021-09-17T01:28:50.000Z | 2021-09-17T01:28:50.000Z | import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import List, Union
from . import autosummary
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
try:
from tensorflow.python.ops import nccl_ops
except:
import tensorflow.contrib.nccl as nccl_ops
class Optimizer:
def __init__(self,
name: str = "Train",
tf_optimizer: str = "tf.train.AdamOptimizer",
learning_rate: TfExpressionEx = 0.001,
use_loss_scaling: bool = False,
loss_scaling_init: float = 64.0,
loss_scaling_inc: float = 0.0005,
loss_scaling_dec: float = 1.0,
**kwargs):
# Init fields.
self.name = name
self.learning_rate = tf.convert_to_tensor(learning_rate)
self.id = self.name.replace("/", ".")
self.scope = tf.get_default_graph().unique_name(self.id)
self.optimizer_class = util.get_obj_by_name(tf_optimizer)
self.optimizer_kwargs = dict(kwargs)
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self._grad_shapes = None # [shape, ...]
self._dev_opt = OrderedDict() # device => optimizer
self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
self._updates_applied = False
def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
assert not self._updates_applied
# Validate arguments.
if isinstance(trainable_vars, dict):
trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars
assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])
if self._grad_shapes is None:
self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars]
assert len(trainable_vars) == len(self._grad_shapes)
assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes))
dev = loss.device
assert all(var.device == dev for var in trainable_vars)
# Register device and compute gradients.
with tf.name_scope(self.id + "_grad"), tf.device(dev):
if dev not in self._dev_opt:
opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt)
assert callable(self.optimizer_class)
self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
self._dev_grads[dev] = []
loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros
self._dev_grads[dev].append(grads)
def apply_updates(self) -> tf.Operation:
tfutil.assert_tf_initialized()
assert not self._updates_applied
self._updates_applied = True
devices = list(self._dev_grads.keys())
total_grads = sum(len(grads) for grads in self._dev_grads.values())
assert len(devices) >= 1 and total_grads >= 1
ops = []
with tfutil.absolute_name_scope(self.scope):
# Cast gradients to FP32 and calculate partial sum within each device.
dev_grads = OrderedDict() # device => [(grad, var), ...]
for dev_idx, dev in enumerate(devices):
with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev):
sums = []
for gv in zip(*self._dev_grads[dev]):
assert all(v is gv[0][1] for g, v in gv)
g = [tf.cast(g, tf.float32) for g, v in gv]
g = g[0] if len(g) == 1 else tf.add_n(g)
sums.append((g, gv[0][1]))
dev_grads[dev] = sums
# Sum gradients across devices.
if len(devices) > 1:
with tf.name_scope("SumAcrossGPUs"), tf.device(None):
for var_idx, grad_shape in enumerate(self._grad_shapes):
g = [dev_grads[dev][var_idx][0] for dev in devices]
if np.prod(grad_shape): # nccl does not support zero-sized tensors
g = nccl_ops.all_sum(g)
for dev, gg in zip(devices, g):
dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
# Apply updates separately on each device.
for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev):
# Scale gradients as needed.
if self.use_loss_scaling or total_grads > 1:
with tf.name_scope("Scale"):
coef = tf.constant(np.float32(1.0 / total_grads), name="coef")
coef = self.undo_loss_scaling(coef)
grads = [(g * coef, v) for g, v in grads]
# Check for overflows.
with tf.name_scope("CheckOverflow"):
grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
# Update weights and adjust loss scaling.
with tf.name_scope("UpdateWeights"):
# pylint: disable=cell-var-from-loop
opt = self._dev_opt[dev]
ls_var = self.get_loss_scaling_var(dev)
if not self.use_loss_scaling:
ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
else:
ops.append(tf.cond(grad_ok,
lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
# Report statistics on the last device.
if dev == devices[-1]:
with tf.name_scope("Statistics"):
ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate))
ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1)))
if self.use_loss_scaling:
ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var))
# Initialize variables and group everything into a single op.
self.reset_optimizer_state()
tfutil.init_uninitialized_vars(list(self._dev_ls_var.values()))
return tf.group(*ops, name="TrainingOp")
def reset_optimizer_state(self) -> None:
tfutil.assert_tf_initialized()
tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
if not self.use_loss_scaling:
return None
if device not in self._dev_ls_var:
with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None):
self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var")
return self._dev_ls_var[device]
def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(-self.get_loss_scaling_var(value.device))
| 46.858696 | 164 | 0.591858 |
2cd1919488faa846571713365a73c4804f871646 | 3,861 | py | Python | proto/atdd-tutorial-berlin-2010/src/vacalc/employeestore.py | gdw2/robot-framework | f25068edf1502e76ba8664d4b5ed1aebe0ee2434 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | proto/atdd-tutorial-berlin-2010/src/vacalc/employeestore.py | gdw2/robot-framework | f25068edf1502e76ba8664d4b5ed1aebe0ee2434 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | proto/atdd-tutorial-berlin-2010/src/vacalc/employeestore.py | gdw2/robot-framework | f25068edf1502e76ba8664d4b5ed1aebe0ee2434 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from __future__ import with_statement
import os
import csv
import datetime
class VacalcError(RuntimeError): pass
class EmployeeStore(object):
def __init__(self, db_file):
self._db_file = db_file
if self._db_file and os.path.isfile(self._db_file):
self._employees = self._read_employees(self._db_file)
else:
self._employees = {}
def _read_employees(self, path):
employees = {}
with open(path) as db:
for row in csv.reader(db):
employee = Employee(row[0], self._parse_date(row[1]))
employees[employee.name] = employee
return employees
def refresh(self):
self.__init__(self._db_file)
def get_employee(self, name):
try:
return self._employees[name]
except KeyError:
raise VacalcError("Employee '%s' not found." % name)
def get_all_employees(self):
return self._employees.values()
def add_employee(self, name, startdate):
if name in self._employees:
raise VacalcError("Employee '%s' already exists in the system."
% name)
employee = Employee(name, self._parse_date(startdate))
self._employees[employee.name] = employee
self._serialize(employee)
return employee
def _serialize(self, employee):
if not self._db_file:
return
with open(self._db_file, 'a') as db:
writer = csv.writer(db, lineterminator='\n')
writer.writerow([employee.name, employee.startdate])
def _parse_date(self, datestring):
if not datestring:
raise VacalcError('No start date given.')
try:
year, month, day = (int(item) for item in datestring.split('-'))
except ValueError:
raise VacalcError('Invalid start date.')
try:
return datetime.date(year, month, day)
except ValueError, err:
raise VacalcError(err.args[0].capitalize() + '.')
class Employee(object):
max_vacation = int(12 * 2.5)
no_vacation = 0
vacation_per_month = 2
credit_start_month = 4
work_days_required= 14
def __init__(self, name, startdate):
self.name = name
self.startdate = startdate
def count_vacation(self, year):
return self._count_vacation(self.startdate, year)
def _count_vacation(self, startdate, year):
if self._has_worked_longer_than_year(startdate, year):
return self.max_vacation
if self._started_after_holiday_credit_year_ended(startdate, year):
return self.no_vacation
return self._count_working_months(startdate) * self.vacation_per_month
def _has_worked_longer_than_year(self, start, year):
return year-start.year > 1 or \
(year-start.year == 1 and start.month < self.credit_start_month)
def _started_after_holiday_credit_year_ended(self, start, year):
return start.year-year > 0 or \
(year == start.year and start.month >= self.credit_start_month)
def _count_working_months(self, start):
months = self.credit_start_month - start.month
if months <= 0:
months += 12
if self._first_month_has_too_few_working_days(start):
months -= 1
return months
def _first_month_has_too_few_working_days(self, start):
days = 0
date = start
while date:
if self._is_working_day(date):
days += 1
date = self._next_date(date)
return days < self.work_days_required
def _is_working_day(self, date):
return date.weekday() < 5
def _next_date(self, date):
try:
return date.replace(day=date.day+1)
except ValueError:
return None
| 31.390244 | 80 | 0.618752 |
0675a30f01abd52a6947113345a581005c7f6dbe | 6,497 | py | Python | src/proto/cppcookie.py | ago1024/xpp | 4ede24d7901e93e51c9e2a56dc7fdfb3054e50be | [
"MIT"
] | null | null | null | src/proto/cppcookie.py | ago1024/xpp | 4ede24d7901e93e51c9e2a56dc7fdfb3054e50be | [
"MIT"
] | null | null | null | src/proto/cppcookie.py | ago1024/xpp | 4ede24d7901e93e51c9e2a56dc7fdfb3054e50be | [
"MIT"
] | null | null | null | from builtins import object
from utils import _n, _ext, _n_item, get_namespace
_templates = {}
_templates['void_cookie_function'] = \
'''\
%s\
void
%s_checked(Connection && c%s)
{%s\
xpp::generic::check<Connection, xpp::%s::error::dispatcher>(
std::forward<Connection>(c),
%s_checked(std::forward<Connection>(c)%s));
}
%s\
void
%s(Connection && c%s)
{%s\
%s(std::forward<Connection>(c)%s);
}
'''
def _void_cookie_function(ns, name, c_name, template, return_value, protos, calls, initializer):
if len(template) == 0: template = "template<typename Connection>\n"
return _templates['void_cookie_function'] % \
( template
, name
, protos
, initializer
, ns
, c_name
, calls
, template
, name
, protos
, initializer
, c_name
, calls
)
_templates['cookie_static_getter'] = \
'''\
%s\
static
%s
cookie(xcb_connection_t * const c%s)
{%s\
return base::cookie(c%s);
}
'''
def _cookie_static_getter(template, return_value, protos, calls, initializer):
return _templates['cookie_static_getter'] % \
( template
, return_value
, protos
, initializer
, calls
)
class CppCookie(object):
def __init__(self, namespace, is_void, name, reply, parameter_list):
self.namespace = namespace
self.is_void = is_void
self.name = name
self.reply = reply
self.parameter_list = parameter_list
self.request_name = _ext(_n_item(self.name[-1]))
self.c_name = "xcb" \
+ (("_" + get_namespace(namespace)) if namespace.is_ext else "") \
+ "_" + self.request_name
def comma(self):
return self.parameter_list.comma()
def calls(self, sort):
return self.parameter_list.calls(sort)
def protos(self, sort, defaults):
return self.parameter_list.protos(sort, defaults)
def iterator_template(self, indent=" ", tail="\n"):
prefix = "template<typename " + ("Connection, typename " if self.is_void else "")
return indent + prefix \
+ ", typename ".join(self.parameter_list.iterator_templates \
+ self.parameter_list.templates) \
+ ">" + tail \
if len(self.parameter_list.iterator_templates) > 0 \
else ""
def iterator_calls(self, sort):
return self.parameter_list.iterator_calls(sort)
def iterator_protos(self, sort, defaults):
return self.parameter_list.iterator_protos(sort, defaults)
def iterator_initializers(self):
return self.parameter_list.iterator_initializers()
def void_functions(self, protos, calls, template="", initializer=[]):
inits = "" if len(initializer) > 0 else "\n"
for i in initializer:
inits += "\n"
for line in i.split('\n'):
inits += " " + line + "\n"
return_value = "xcb_void_cookie_t"
return _void_cookie_function(get_namespace(self.namespace),
self.request_name,
self.c_name,
template,
return_value,
self.comma() + protos,
self.comma() + calls,
inits)
def static_reply_methods(self, protos, calls, template="", initializer=[]):
inits = "" if len(initializer) > 0 else "\n"
for i in initializer:
inits += "\n"
for line in i.split('\n'):
inits += " " + line + "\n"
if self.is_void: return_value = "xcb_void_cookie_t"
else: return_value = self.c_name + "_cookie_t"
return _cookie_static_getter(template,
return_value,
self.comma() + protos,
self.comma() + calls,
inits)
def make_static_getter(self):
default = self.static_reply_methods(self.protos(False, False), self.calls(False))
if self.parameter_list.has_defaults:
default = self.static_reply_methods(self.protos(True, True), self.calls(False))
wrapped = ""
if self.parameter_list.want_wrap:
wrapped = \
self.static_reply_methods(self.iterator_protos(True, True),
self.iterator_calls(False), self.iterator_template(),
self.iterator_initializers())
default_args = ""
if self.parameter_list.is_reordered():
default_args = \
self.static_reply_methods(self.protos(True, True), self.calls(False))
result = ""
if (self.parameter_list.has_defaults
or self.parameter_list.is_reordered()
or self.parameter_list.want_wrap):
result += default
if self.parameter_list.is_reordered():
result += "\n" + default_args
if self.parameter_list.want_wrap:
result += "\n" + wrapped
return result
def make_void_functions(self):
default = self.void_functions(self.protos(False, False), self.calls(False))
if self.parameter_list.has_defaults:
default = self.void_functions(self.protos(True, True), self.calls(False))
wrapped = ""
if self.parameter_list.want_wrap:
wrapped = \
self.void_functions(self.iterator_protos(True, True),
self.iterator_calls(False),
self.iterator_template(indent=""),
self.iterator_initializers())
default_args = ""
if self.parameter_list.is_reordered():
default_args = \
self.void_functions(self.protos(True, True), self.calls(False))
result = ""
if (self.parameter_list.has_defaults
or self.parameter_list.is_reordered()
or self.parameter_list.want_wrap):
result += default
if self.parameter_list.is_reordered():
result += "\n" + default_args
if self.parameter_list.want_wrap:
result += "\n" + wrapped
return result
| 31.848039 | 96 | 0.548407 |
c67f47903560bd407f4bc2784d86fd93bc35b290 | 501 | py | Python | code/chapter_10/listing_10_10.py | guinslym/python_earth_science_book | f4dd0115dbbce140c6713989f630a71238daa72c | [
"MIT"
] | 80 | 2021-04-19T10:03:57.000Z | 2022-03-30T15:34:47.000Z | code/chapter_10/listing_10_10.py | guinslym/python_earth_science_book | f4dd0115dbbce140c6713989f630a71238daa72c | [
"MIT"
] | null | null | null | code/chapter_10/listing_10_10.py | guinslym/python_earth_science_book | f4dd0115dbbce140c6713989f630a71238daa72c | [
"MIT"
] | 23 | 2021-04-25T03:50:07.000Z | 2022-03-22T03:06:19.000Z | import sympy as sym
a, b, sigma_a, sigma_b = sym.symbols("a b sigma_a sigma_b")
def symbolic_error_prop(func, val_a, val_sigma_a, val_b=0, val_sigma_b=0):
z = sym.lambdify([a,b],func, 'numpy')
sigma_z = sym.lambdify([a, b, sigma_a, sigma_b], sym.sqrt((sym.diff(func, a)**2 * sigma_a**2)+(sym.diff(func, b)**2 * sigma_b**2)), 'numpy')
val_z = z(a=val_a, b=val_b)
val_sigma_z = sigma_z(a=val_a, b=val_b, sigma_a=val_sigma_a, sigma_b=val_sigma_b)
return val_z, val_sigma_z | 41.75 | 144 | 0.668663 |
1f837102ae63bf2d309af5b200492cc680a7fadb | 758 | py | Python | answers/x_2_5.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | null | null | null | answers/x_2_5.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | 1 | 2021-11-13T08:03:04.000Z | 2021-11-13T08:03:04.000Z | answers/x_2_5.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | null | null | null | # x_2_5
#
# 関数内で定義された変数や関数の引数はそのプロック内でのみ参照できる「ローカル変数」となり「locals()」関数でその内容を確認できます
# 「a」「b」「c」「d」がそれぞれどんな値となるかを予想してください
def sample(name):
food = 'オムライス'
print(locals())
sample('田中')
def full_name_a(first_name, last_name):
a = locals()
print(a) # => {'first_name': '山田', 'last_name': '太郎'}
def full_name_b(first_name, last_name):
first_name = '山中'
b = locals()
print(b) # => {'first_name': '山中', 'last_name': '太郎'}
def full_name_c(first_name):
c = locals()
print(c) # => {'first_name': '山口'}
def full_name_d():
d = locals()
print(d) # => {}
first_name = '山田'
last_name = '太郎'
full_name_a(first_name, last_name)
full_name_b(first_name, last_name)
full_name_c('山口')
last_name = '二郎'
full_name_d()
| 16.12766 | 70 | 0.630607 |
de4922c5014efcfa2e8fdbb78f043f5439e6f55b | 650 | py | Python | var/spack/repos/builtin/packages/py-binaryornot/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/py-binaryornot/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/py-binaryornot/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBinaryornot(PythonPackage):
"""Ultra-lightweight pure Python package to check if a file is binary or text.
"""
homepage = "https://binaryornot.readthedocs.io"
url = "https://github.com/audreyr/binaryornot/archive/0.4.0.tar.gz"
version('0.4.4', sha256='8cca04876a5e9d01f0dda79390e99089da87f3c1948ab2720661ba379d1b23f2')
depends_on('py-setuptools', type='build')
depends_on('py-chardet')
| 32.5 | 95 | 0.735385 |
a5fe6c43d4d24aaa0fe5fc54541045d0ea727fb3 | 814 | py | Python | gitClone.py | michaele77/CS231N-pytorch | cf19d34a62f3ce69cc3192f6c6f25729e1aab5f1 | [
"BSD-3-Clause"
] | 1 | 2020-05-21T00:40:04.000Z | 2020-05-21T00:40:04.000Z | gitClone.py | michaele77/CS231N-pytorch | cf19d34a62f3ce69cc3192f6c6f25729e1aab5f1 | [
"BSD-3-Clause"
] | null | null | null | gitClone.py | michaele77/CS231N-pytorch | cf19d34a62f3ce69cc3192f6c6f25729e1aab5f1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 22:16:38 2020
@author: ershov
"""
#Imports
import shutil
import os
import matplotlib.pyplot as plt
import random
import subprocess
dirList = os.listdir()
dirLen = len(dirList)
print('Directory length is: ' + str(dirLen))
if not dirLen > 5:
'Cloning repo...'
#Do all of the required pix2pix stuff
command = ['git', 'clone', 'https://github.com/michaele77/CS231N-pytorch']
subprocess.run(command)
# os.chdir('pytorch-CycleGAN-and-pix2pix/')
os.chdir('CS231N-pytorch/')
command = ['pip', 'install', '-r', 'requirements.txt']
subprocess.run(command)
else:
'Pulling repo...'
command = ['git', 'pull', 'https://github.com/michaele77/CS231N-pytorch']
subprocess.run(command)
| 22 | 78 | 0.646192 |
3d61611c505d2ec73a6414ea9572cf41badabf5b | 499 | py | Python | tests/restserver/polls/models.py | danicarrion/pyrestcli | 319a0d9574f47a2ff23cbce197bf412f5000d6fa | [
"MIT"
] | 5 | 2018-03-13T13:29:12.000Z | 2021-02-12T11:34:59.000Z | tests/restserver/polls/models.py | danicarrion/pyrestcli | 319a0d9574f47a2ff23cbce197bf412f5000d6fa | [
"MIT"
] | 21 | 2016-09-04T20:16:07.000Z | 2021-06-10T19:30:44.000Z | tests/restserver/polls/models.py | danicarrion/pyrestcli | 319a0d9574f47a2ff23cbce197bf412f5000d6fa | [
"MIT"
] | 5 | 2017-03-17T19:51:47.000Z | 2021-03-18T07:40:12.000Z | from django.db import models
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 26.263158 | 92 | 0.733467 |
3fd0da84f4e428087630fc4ccd48f938cf7a1f35 | 102 | py | Python | django-demo/src/for_django_2-0/myproject/myapp/urls.py | tamilmaran-7/samples | ba52f06fa7bcf3da88d833eba677624ee253999a | [
"Apache-2.0"
] | 1 | 2022-03-15T07:20:59.000Z | 2022-03-15T07:20:59.000Z | django-demo/src/for_django_2-0/myproject/myapp/urls.py | tamilmaran-7/samples | ba52f06fa7bcf3da88d833eba677624ee253999a | [
"Apache-2.0"
] | null | null | null | django-demo/src/for_django_2-0/myproject/myapp/urls.py | tamilmaran-7/samples | ba52f06fa7bcf3da88d833eba677624ee253999a | [
"Apache-2.0"
] | 4 | 2022-02-07T06:42:32.000Z | 2022-03-17T07:30:10.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.list, name='list')
] | 17 | 34 | 0.696078 |
65d6d4aba07197836dec099f892f7810dd3029ee | 86 | py | Python | app/domain/exceptions.py | c-w/appinsights-on-premises | 511ebf7b2ea0bdacb81b6a3bc40ca45ea912af0e | [
"MIT"
] | 9 | 2019-08-27T13:15:49.000Z | 2021-03-23T13:27:40.000Z | app/domain/exceptions.py | CatalystCode/appinsights-on-premises | 511ebf7b2ea0bdacb81b6a3bc40ca45ea912af0e | [
"MIT"
] | 1 | 2019-07-25T18:06:19.000Z | 2020-04-29T16:40:41.000Z | app/domain/exceptions.py | c-w/appinsights-on-premises | 511ebf7b2ea0bdacb81b6a3bc40ca45ea912af0e | [
"MIT"
] | 1 | 2019-02-05T21:36:22.000Z | 2019-02-05T21:36:22.000Z | class UnknownClient(Exception):
pass
class DuplicateClient(Exception):
pass
| 12.285714 | 33 | 0.744186 |
87d213129fd156d266000d166191c75177b8ca66 | 180 | py | Python | dbsavior/__init__.py | pengfei99/K8sCronJobPostgresBackup | 2dd7bc3c7820752ef63e04d555dcb36b2518e3c0 | [
"Apache-2.0"
] | null | null | null | dbsavior/__init__.py | pengfei99/K8sCronJobPostgresBackup | 2dd7bc3c7820752ef63e04d555dcb36b2518e3c0 | [
"Apache-2.0"
] | null | null | null | dbsavior/__init__.py | pengfei99/K8sCronJobPostgresBackup | 2dd7bc3c7820752ef63e04d555dcb36b2518e3c0 | [
"Apache-2.0"
] | null | null | null | __version__ = '0.1.0'
# Set default logging handler to avoid "No handler found" warnings.
import logging
logging.getLogger('db_backup_restore').addHandler(logging.NullHandler())
| 25.714286 | 72 | 0.783333 |
28336e696bbd53e4f6bb20c0b370d830112fc7c4 | 56,747 | py | Python | src/network-manager/azext_network_manager/custom.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | null | null | null | src/network-manager/azext_network_manager/custom.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 9 | 2022-03-25T19:35:49.000Z | 2022-03-31T06:09:47.000Z | src/network-manager/azext_network_manager/custom.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-03-10T22:13:02.000Z | 2022-03-10T22:13:02.000Z | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from knack.util import CLIError
from ._client_factory import (
cf_networkmanagercommit,
cf_networkmanagerdeploymentstatus,
cf_networkmanagementclient,
cf_activesecurityuserrule,
cf_effectivevirtualnetwork,
cf_listeffectivevirtualnetwork
)
def network_manager_list(client,
resource_group_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
top=top,
skip_token=skip_token)
def network_manager_show(client,
resource_group_name,
network_manager_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name)
def network_manager_create(client,
resource_group_name,
network_manager_name,
location,
network_manager_scopes,
network_manager_scope_accesses,
id_=None,
tags=None,
display_name=None,
description=None):
parameters = {}
parameters['id'] = id_
parameters['location'] = location
parameters['tags'] = tags
parameters['display_name'] = display_name
parameters['description'] = description
parameters['network_manager_scopes'] = network_manager_scopes
parameters['network_manager_scope_accesses'] = network_manager_scope_accesses
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
parameters=parameters)
def network_manager_update(instance,
resource_group_name,
network_manager_name,
id_=None,
location=None,
tags=None,
display_name=None,
description=None,
network_manager_scopes=None,
network_manager_scope_accesses=None):
if id_ is not None:
instance.id = id_
if location is not None:
instance.location = location
if tags is not None:
instance.tags = tags
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
if network_manager_scopes is not None:
instance.network_manager_scopes = network_manager_scopes
if network_manager_scope_accesses is not None:
instance.network_manager_scope_accesses = network_manager_scope_accesses
return instance
def network_manager_delete(client,
resource_group_name,
network_manager_name):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name)
def network_manager_commit_post(cmd,
client,
resource_group_name,
network_manager_name,
target_locations,
commit_type,
configuration_ids=None):
client = cf_networkmanagercommit(cmd.cli_ctx)
parameters = {}
parameters['target_locations'] = target_locations
parameters['configuration_ids'] = configuration_ids
parameters['commit_type'] = commit_type
return client.post(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
parameters=parameters)
def network_manager_deploy_status_list(cmd,
client,
resource_group_name,
network_manager_name,
skip_token=None,
regions=None,
deployment_types=None):
client = cf_networkmanagerdeploymentstatus(cmd.cli_ctx)
parameters = {}
parameters['regions'] = regions
parameters['deployment_types'] = deployment_types
parameters['skip_token'] = skip_token
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
parameters=parameters)
def network_manager_effect_vnet_list_by_network_group(cmd,
client,
resource_group_name,
network_manager_name,
network_group_name,
skip_token=None):
client = cf_listeffectivevirtualnetwork(cmd.cli_ctx)
parameters = {}
parameters['skip_token'] = skip_token
return client.by_network_group(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name,
parameters=parameters)
# def network_manager_effect_vnet_list_by_network_manager(cmd,
# client,
# resource_group_name,
# network_manager_name,
# top=None,
# skip_token=None,
# conditional_members=None):
# client = cf_effectivevirtualnetwork(cmd.cli_ctx)
# parameters = {}
# parameters['skip_token'] = skip_token
# parameters['conditional_members'] = conditional_members
# return client.list_by_network_manager(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# top=top,
# skip_token=skip_token,
# parameters=parameters)
def network_manager_active_config_list(cmd,
client,
resource_group_name,
network_manager_name,
skip_token=None,
regions=None):
client = cf_networkmanagementclient(cmd.cli_ctx)
parameters = {}
parameters['skip_token'] = skip_token
parameters['regions'] = regions
return client.list_active_connectivity_configurations(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
parameters=parameters)
def network_manager_effective_config_list(cmd,
client,
resource_group_name,
virtual_network_name,
skip_token=None):
client = cf_networkmanagementclient(cmd.cli_ctx)
parameters = {}
parameters['skip_token'] = skip_token
return client.list_network_manager_effective_connectivity_configurations(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters)
def network_manager_effective_security_admin_rule_list(cmd,
client,
resource_group_name,
virtual_network_name,
skip_token=None):
client = cf_networkmanagementclient(cmd.cli_ctx)
parameters = {}
parameters['skip_token'] = skip_token
return client.list_network_manager_effective_security_admin_rules(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters)
def network_manager_active_security_admin_rule_list(cmd,
client,
resource_group_name,
network_manager_name,
skip_token=None,
regions=None):
client = cf_networkmanagementclient(cmd.cli_ctx)
parameters = {}
parameters['skip_token'] = skip_token
parameters['region'] = regions
return client.list_active_security_admin_rules(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
parameters=parameters)
# def network_manager_active_security_user_rule_list(cmd,
# client,
# resource_group_name,
# network_manager_name,
# skip_token=None,
# regions=None):
# client = cf_activesecurityuserrule(cmd.cli_ctx)
# parameters = {}
# parameters['skip_token'] = skip_token
# parameters['regions'] = regions
# return client.list(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# parameters=parameters)
def network_manager_effective_security_user_rule_list(client,
resource_group_name,
network_manager_name,
top=None,
skip_token=None):
parameters = {}
parameters['skip_token'] = skip_token
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
parameters=parameters,
top=top)
def network_manager_connect_config_list(client,
resource_group_name,
network_manager_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
top=top,
skip_token=skip_token)
def network_manager_connect_config_show(client,
resource_group_name,
network_manager_name,
configuration_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name)
def network_manager_connect_config_create(client,
resource_group_name,
network_manager_name,
configuration_name,
applies_to_groups,
connectivity_topology,
display_name=None,
description=None,
hub=None,
is_global=None,
delete_existing_peering=None):
if connectivity_topology == 'HubAndSpoke' and hub is None:
raise CLIError("if 'HubAndSpoke' is the topolopy seleted,'--hub' is required")
connectivity_configuration = {}
connectivity_configuration['display_name'] = display_name
connectivity_configuration['description'] = description
connectivity_configuration['connectivity_topology'] = connectivity_topology
connectivity_configuration['hubs'] = hub
connectivity_configuration['is_global'] = is_global
connectivity_configuration['applies_to_groups'] = applies_to_groups
connectivity_configuration['delete_existing_peering'] = delete_existing_peering
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
connectivity_configuration=connectivity_configuration)
def network_manager_connect_config_update(instance,
resource_group_name,
network_manager_name,
configuration_name,
display_name=None,
description=None,
hub=None,
is_global=None,
applies_to_groups=None,
delete_existing_peering=None):
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
if hub is not None:
instance.hubs = hub
if is_global is not None:
instance.is_global = is_global
if applies_to_groups is not None:
instance.applies_to_groups = applies_to_groups
if delete_existing_peering is not None:
instance.delete_existing_peering = delete_existing_peering
return instance
def network_manager_connect_config_delete(client,
resource_group_name,
network_manager_name,
configuration_name):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name)
def network_manager_group_list(client,
resource_group_name,
network_manager_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
top=top,
skip_token=skip_token)
def network_manager_group_show(client,
resource_group_name,
network_manager_name,
network_group_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name)
def network_manager_group_create(client,
resource_group_name,
network_manager_name,
network_group_name,
member_type,
if_match=None,
display_name=None,
description=None):
parameters = {}
parameters['display_name'] = display_name
parameters['description'] = description
parameters['member_type'] = member_type
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name,
if_match=if_match,
parameters=parameters)
def network_manager_group_update(instance,
resource_group_name,
network_manager_name,
network_group_name,
if_match=None,
display_name=None,
description=None,
member_type=None):
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
if member_type is not None:
instance.member_type = member_type
return instance
def network_manager_group_delete(client,
resource_group_name,
network_manager_name,
network_group_name,
force=None):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name,
force=force)
def network_manager_security_user_config_list(client,
resource_group_name,
network_manager_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
top=top,
skip_token=skip_token)
def network_manager_security_user_config_show(client,
resource_group_name,
network_manager_name,
configuration_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name)
def network_manager_security_user_config_create(client,
resource_group_name,
network_manager_name,
configuration_name,
display_name=None,
description=None,
delete_existing_ns_gs=None):
security_configuration = {}
security_configuration['display_name'] = display_name
security_configuration['description'] = description
security_configuration['delete_existing_ns_gs'] = delete_existing_ns_gs
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
security_user_configuration=security_configuration)
def network_manager_security_user_config_update(instance,
resource_group_name,
network_manager_name,
configuration_name,
display_name=None,
description=None,
security_type=None,
delete_existing_ns_gs=None):
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
if security_type is not None:
instance.security_type = security_type
if delete_existing_ns_gs is not None:
instance.delete_existing_ns_gs = delete_existing_ns_gs
return instance
def network_manager_security_user_config_delete(client,
resource_group_name,
network_manager_name,
configuration_name):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name)
def network_manager_security_admin_config_list(client,
resource_group_name,
network_manager_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
top=top,
skip_token=skip_token)
def network_manager_security_admin_config_show(client,
resource_group_name,
network_manager_name,
configuration_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name)
def network_manager_security_admin_config_create(client,
resource_group_name,
network_manager_name,
configuration_name,
display_name=None,
description=None,
delete_existing_ns_gs=None,
apply_on_network_intent_policy_based_services=None):
security_configuration = {}
security_configuration['display_name'] = display_name
security_configuration['description'] = description
security_configuration['delete_existing_ns_gs'] = delete_existing_ns_gs
security_configuration['apply_on_network_intent_policy_based_services'] = \
apply_on_network_intent_policy_based_services
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
security_admin_configuration=security_configuration)
def network_manager_security_admin_config_update(instance,
resource_group_name,
network_manager_name,
configuration_name,
display_name=None,
description=None,
delete_existing_ns_gs=None,
apply_on_network_intent_policy_based_services=None):
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
if delete_existing_ns_gs is not None:
instance.delete_existing_ns_gs = delete_existing_ns_gs
if apply_on_network_intent_policy_based_services is not None:
instance.apply_on_network_intent_policy_based_services = apply_on_network_intent_policy_based_services
return instance
def network_manager_security_admin_config_delete(client,
resource_group_name,
network_manager_name,
configuration_name,
force=None):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
force=force)
def network_manager_admin_rule_collection_list(client,
resource_group_name,
network_manager_name,
configuration_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
top=top,
skip_token=skip_token)
def network_manager_admin_rule_collection_create(client,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name,
applies_to_groups,
display_name=None,
description=None):
rule_collection = {}
rule_collection['display_name'] = display_name
rule_collection['description'] = description
rule_collection['applies_to_groups'] = applies_to_groups
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
rule_collection_name=rule_collection_name,
rule_collection=rule_collection)
def network_manager_admin_rule_collection_update(instance,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name,
display_name=None,
description=None,
applies_to_groups=None):
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
if applies_to_groups is not None:
instance.applies_to_groups = applies_to_groups
return instance
def network_manager_admin_rule_collection_show(client,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
rule_collection_name=rule_collection_name)
def network_manager_admin_rule_collection_delete(client,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
rule_collection_name=rule_collection_name)
def network_manager_admin_rule_create(client,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name,
rule_name,
kind,
protocol,
access,
priority,
direction,
display_name=None,
description=None,
sources=None,
destinations=None,
source_port_ranges=None,
destination_port_ranges=None,
flag=None):
rule = {}
rule['kind'] = kind
rule['display_name'] = display_name
rule['description'] = description
rule['protocol'] = protocol
rule['sources'] = sources
rule['destinations'] = destinations
rule['source_port_ranges'] = source_port_ranges
rule['destination_port_ranges'] = destination_port_ranges
rule['access'] = access
rule['priority'] = priority
rule['direction'] = direction
rule['flag'] = flag
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
rule_collection_name=rule_collection_name,
rule_name=rule_name,
admin_rule=rule)
def network_manager_admin_rule_update(instance,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name,
rule_name,
kind=None,
display_name=None,
description=None,
protocol=None,
sources=None,
destinations=None,
source_port_ranges=None,
destination_port_ranges=None,
access=None,
priority=None,
direction=None,
flag=None):
if kind == 'DefaultAdminRule':
if flag is not None:
instance.flag = flag
else:
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
if protocol is not None:
instance.protocol = protocol
if sources is not None:
instance.sources = sources
if destinations is not None:
instance.destinations = destinations
if source_port_ranges is not None:
instance.source_port_ranges = source_port_ranges
if destination_port_ranges is not None:
instance.destination_port_ranges = destination_port_ranges
if access is not None:
instance.access = access
if priority is not None:
instance.priority = priority
if direction is not None:
instance.direction = direction
return instance
def network_manager_admin_rule_list(client,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
rule_collection_name=rule_collection_name,
top=top,
skip_token=skip_token)
def network_manager_admin_rule_show(client,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name,
rule_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
rule_collection_name=rule_collection_name,
rule_name=rule_name)
def network_manager_admin_rule_delete(client,
resource_group_name,
network_manager_name,
configuration_name,
rule_collection_name,
rule_name):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
configuration_name=configuration_name,
rule_collection_name=rule_collection_name,
rule_name=rule_name)
# def network_manager_user_rule_collection_list(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# top=None,
# skip_token=None):
# return client.list(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# top=top,
# skip_token=skip_token)
# def network_manager_user_rule_collection_create(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name,
# applies_to_groups,
# display_name=None,
# description=None):
# rule_collection = {}
# rule_collection['display_name'] = display_name
# rule_collection['description'] = description
# rule_collection['applies_to_groups'] = applies_to_groups
# return client.create_or_update(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# rule_collection_name=rule_collection_name,
# user_rule_collection=rule_collection)
# def network_manager_user_rule_collection_update(instance,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name,
# display_name=None,
# description=None,
# applies_to_groups=None):
# if display_name is not None:
# instance.display_name = display_name
# if description is not None:
# instance.description = description
# if applies_to_groups is not None:
# instance.applies_to_groups = applies_to_groups
# return instance
# def network_manager_user_rule_collection_show(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name):
# return client.get(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# rule_collection_name=rule_collection_name)
# def network_manager_user_rule_collection_delete(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name):
# return client.delete(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# rule_collection_name=rule_collection_name)
# def network_manager_user_rule_list(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name,
# top=None,
# skip_token=None):
# return client.list(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# rule_collection_name=rule_collection_name,
# top=top,
# skip_token=skip_token)
# def network_manager_user_rule_show(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name,
# rule_name):
# return client.get(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# rule_collection_name=rule_collection_name,
# rule_name=rule_name)
# def network_manager_user_rule_create(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name,
# rule_name,
# kind=None,
# display_name=None,
# description=None,
# protocol=None,
# sources=None,
# destinations=None,
# source_port_ranges=None,
# destination_port_ranges=None,
# direction=None,
# flag=None):
# user_rule = {}
# user_rule['kind'] = kind
# user_rule['display_name'] = display_name
# user_rule['description'] = description
# user_rule['protocol'] = protocol
# user_rule['sources'] = sources
# user_rule['destinations'] = destinations
# user_rule['source_port_ranges'] = source_port_ranges
# user_rule['destination_port_ranges'] = destination_port_ranges
# user_rule['direction'] = direction
# user_rule['flag'] = flag
# return client.create_or_update(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# rule_collection_name=rule_collection_name,
# rule_name=rule_name,
# user_rule=user_rule)
# def network_manager_user_rule_update(instance,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name,
# rule_name,
# kind=None,
# display_name=None,
# description=None,
# protocol=None,
# sources=None,
# destinations=None,
# source_port_ranges=None,
# destination_port_ranges=None,
# direction=None,
# flag=None):
#
# if kind == 'DefaultUserRule':
# if flag is not None:
# instance.flag = flag
# else:
# if display_name is not None:
# instance.display_name = display_name
# if description is not None:
# instance.description = description
# if protocol is not None:
# instance.protocol = protocol
# if sources is not None:
# instance.sources = sources
# if destinations is not None:
# instance.destinations = destinations
# if source_port_ranges is not None:
# instance.source_port_ranges = source_port_ranges
# if destination_port_ranges is not None:
# instance.destination_port_ranges = destination_port_ranges
# if direction is not None:
# instance.direction = direction
# return instance
# def network_manager_user_rule_delete(client,
# resource_group_name,
# network_manager_name,
# configuration_name,
# rule_collection_name,
# rule_name):
# return client.delete(resource_group_name=resource_group_name,
# network_manager_name=network_manager_name,
# configuration_name=configuration_name,
# rule_collection_name=rule_collection_name,
# rule_name=rule_name)
def network_manager_vnet_security_perimeter_list(client,
resource_group_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
top=top,
skip_token=skip_token)
def network_manager_vnet_security_perimeter_create(client,
resource_group_name,
network_security_perimeter_name,
display_name=None,
description=None):
parameters = {}
parameters['display_name'] = display_name
parameters['description'] = description
return client.create_or_update(resource_group_name=resource_group_name,
network_security_perimeter_name=network_security_perimeter_name,
parameters=parameters)
def network_manager_vnet_security_perimeter_update(instance,
resource_group_name,
network_security_perimeter_name,
display_name=None,
description=None):
if display_name is not None:
instance.display_name = display_name
if description is not None:
instance.description = description
return instance
def network_manager_vnet_security_perimeter_show(client,
resource_group_name,
network_security_perimeter_name):
return client.get(resource_group_name=resource_group_name,
network_security_perimeter_name=network_security_perimeter_name)
def network_manager_vnet_security_perimeter_delete(client,
resource_group_name,
network_security_perimeter_name):
return client.delete(resource_group_name=resource_group_name,
network_security_perimeter_name=network_security_perimeter_name)
def network_manager_perimeter_associable_resource_type_list(client,
location):
return client.get(location=location)
def network_manager_connection_subscription_list(client,
top=None,
skip_token=None):
return client.list(top=top,
skip_token=skip_token)
def network_manager_connection_subscription_create(client,
network_manager_connection_name,
network_manager_id,
description=None):
parameters = {}
parameters['network_manager_id'] = network_manager_id
parameters['description'] = description
return client.create_or_update(network_manager_connection_name=network_manager_connection_name,
parameters=parameters)
def network_manager_connection_subscription_update(instance,
description=None):
if description is not None:
instance.description = description
return instance
def network_manager_connection_subscription_show(client,
network_manager_connection_name):
return client.get(network_manager_connection_name=network_manager_connection_name)
def network_manager_connection_subscription_delete(client,
network_manager_connection_name):
return client.delete(network_manager_connection_name=network_manager_connection_name)
# def network_manager_connection_management_group_list(client,
# top=None,
# skip_token=None):
# return client.list(top=top,
# skip_token=skip_token)
#
#
# def network_manager_connection_management_group_create(client,
# resource_group_name,
# network_manager_connection_name,
# management_group_id,
# network_manager_id,
# description=None):
# parameters = {}
# parameters['description'] = description
# parameters['network_manager_id'] = network_manager_id
# return client.create_or_update(resource_group_name=resource_group_name,
# network_manager_connection_name=network_manager_connection_name,
# management_group_id=management_group_id,
# parameters=parameters)
#
#
# def network_manager_connection_management_group_update(instance,
# management_group_id,
# description=None):
# if description is not None:
# instance.description = description
# if management_group_id is not None:
# instance.management_group_id = management_group_id
# return instance
#
#
# def network_manager_connection_management_group_show(client,
# resource_group_name,
# network_manager_connection_name,
# management_group_id):
# return client.get(resource_group_name=resource_group_name,
# network_manager_connection_name=network_manager_connection_name,
# management_group_id=management_group_id)
#
#
# def network_manager_connection_management_group_delete(client,
# resource_group_name,
# network_manager_connection_name,
# management_group_id):
# return client.delete(resource_group_name=resource_group_name,
# network_manager_connection_name=network_manager_connection_name,
# management_group_id=management_group_id)
def network_manager_scope_connection_list(client,
resource_group_name,
network_manager_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
top=top,
skip_token=skip_token)
def network_manager_scope_connection_create(client,
resource_group_name,
network_manager_name,
scope_connection_name,
tenant_id,
resource_id,
description=None):
parameters = {}
parameters['tenant_id'] = tenant_id
parameters['resource_id'] = resource_id
parameters['description'] = description
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
scope_connection_name=scope_connection_name,
parameters=parameters)
def network_manager_scope_connection_update(instance,
description=None):
if description is not None:
instance.description = description
return instance
def network_manager_scope_connection_show(client,
resource_group_name,
network_manager_name,
scope_connection_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
scope_connection_name=scope_connection_name)
def network_manager_scope_connection_delete(client,
resource_group_name,
network_manager_name,
scope_connection_name):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
scope_connection_name=scope_connection_name)
def network_manager_group_static_member_list(client,
resource_group_name,
network_manager_name,
network_group_name,
top=None,
skip_token=None):
return client.list(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name,
top=top,
skip_token=skip_token)
def network_manager_group_static_member_create(client,
resource_group_name,
network_manager_name,
network_group_name,
static_member_name,
resource_id):
parameters = {}
parameters['resource_id'] = resource_id
return client.create_or_update(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name,
static_member_name=static_member_name,
parameters=parameters)
# def network_manager_group_static_member_update(instance, resource_id):
# if resource_id is not None:
# instance.resource_id = resource_id
# return instance
def network_manager_group_static_member_show(client,
resource_group_name,
network_manager_name,
network_group_name,
static_member_name):
return client.get(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name,
static_member_name=static_member_name)
def network_manager_group_static_member_delete(client,
resource_group_name,
network_manager_name,
network_group_name,
static_member_name):
return client.delete(resource_group_name=resource_group_name,
network_manager_name=network_manager_name,
network_group_name=network_group_name,
static_member_name=static_member_name)
| 48.962036 | 119 | 0.495938 |
14889bcb476954e7d14379f76a47622447c41f12 | 449 | py | Python | glocaltokens/utils/types.py | samapriya/glocaltokens | 989dcec7f0b68da41bf8e4de29c63a20525964d7 | [
"MIT"
] | null | null | null | glocaltokens/utils/types.py | samapriya/glocaltokens | 989dcec7f0b68da41bf8e4de29c63a20525964d7 | [
"MIT"
] | null | null | null | glocaltokens/utils/types.py | samapriya/glocaltokens | 989dcec7f0b68da41bf8e4de29c63a20525964d7 | [
"MIT"
] | null | null | null | def is_numeric(variable) -> bool:
"""Checks if a variable is numeric"""
return is_integer(variable) or is_float(variable)
def is_integer(variable):
"""Checks if a variable is an integer value"""
return type(variable) == int
def is_float(variable):
"""Checks if a variable is a floating point value"""
return type(variable) == float
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
| 23.631579 | 56 | 0.681514 |
dcfc4249b685f8d00cb5db85845f691a3f1e6126 | 1,656 | py | Python | test/functional/feature_blocksdir.py | BlackHatCoin/BlackHatWallet | 0f19b0ffafbf80ca97448f6ec243fb61218fd2c6 | [
"MIT"
] | 5 | 2021-05-01T18:24:06.000Z | 2022-01-12T22:05:16.000Z | test/functional/feature_blocksdir.py | BlackHatCoin/BlackHatWallet | 0f19b0ffafbf80ca97448f6ec243fb61218fd2c6 | [
"MIT"
] | null | null | null | test/functional/feature_blocksdir.py | BlackHatCoin/BlackHatWallet | 0f19b0ffafbf80ca97448f6ec243fb61218fd2c6 | [
"MIT"
] | 2 | 2021-05-02T01:48:22.000Z | 2022-02-23T21:44:07.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the blocksdir option."""
import os
import shutil
from test_framework.test_framework import BlackHatTestFramework, initialize_datadir
class BlocksdirTest(BlackHatTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks"))
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "blocks"))
shutil.rmtree(self.nodes[0].datadir)
initialize_datadir(self.options.tmpdir, 0)
self.log.info("Starting with nonexistent blocksdir ...")
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
expected_err = 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path)
self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], expected_err)
os.mkdir(blocksdir_path)
self.log.info("Starting with existing blocksdir ...")
self.start_node(0, ["-blocksdir=" + blocksdir_path])
self.log.info("mining blocks..")
self.nodes[0].generate(10)
assert(os.path.isfile(os.path.join(self.options.tmpdir, "blocksdir", "regtest", "blocks", "blk00000.dat")))
assert(os.path.isdir(os.path.join(self.options.tmpdir, "node0", "regtest", "blocks", "index")))
if __name__ == '__main__':
BlocksdirTest().main()
| 43.578947 | 115 | 0.693841 |
fae88690028930509b0ff873ec8c96c4a4e10cd4 | 2,440 | py | Python | codewriter.py | 42ip/animatedStickersDB | 861162907e67a4deae54c92c47f97fbaef7fd6ee | [
"MIT"
] | 4 | 2021-09-17T06:36:35.000Z | 2021-10-30T08:07:47.000Z | codewriter.py | 42ip/animatedStickersDB | 861162907e67a4deae54c92c47f97fbaef7fd6ee | [
"MIT"
] | null | null | null | codewriter.py | 42ip/animatedStickersDB | 861162907e67a4deae54c92c47f97fbaef7fd6ee | [
"MIT"
] | 3 | 2021-09-18T15:34:58.000Z | 2022-02-13T18:27:53.000Z | # DEPRECIATED
# loop to add emojis to the .json file.
import json,sys,os
d = dict()
# The custom code will appear on the output.txt file. just copy and paste into the place
with open(sys.path[0] + '/stickers.json', 'r+') as openfile:
# Reading from json file
json_object = json.load(openfile)
d = json_object
startfile = "{{$a := index .CmdArgs 0 }} \n"
endfile = "{{end}}"
fileno = 1
def updatePath(i):
PATH = sys.path[0] + "/outputs/output{}.txt".format(i)
return PATH
PATH = sys.path[0] + "/outputs/output{}.txt".format(fileno)
f = open(PATH, "w")
f.write(startfile)
f.write("{{if or (eq $a \"stickers\") (eq $a \"gifs\") (eq $a \"gif\") }}");f.write("\n")
f.write("{{deleteTrigger 0 }}");f.write("\n")
# todo: need to create a failsafe where the search gifs , if it ever exceeds 8000 characters, must overflow into the next command properly.
# This section will allow you to see the gifs with the first letter as index.
f.write("{{if eq (len .Args) 1}}")
arr = sorted([d[key][0] for key in d])
print(arr)
f.write(" ".join(arr) + "\n")
f.write("{{else if eq (len .Args) 2}}\n")
f.write("{{$b := index .CmdArgs 1}}\n")
for letter in "abcdefghijklmnopqrstuvwxyz":
listed = " ".join(list(filter(lambda string: string[0].lower() == letter,arr)))
f.write("{" + "{{{next} if eq (lower $b) \"{l}\"}}".format(next="else" if letter != 'a' else "" ,l=letter) + "}\n" + (listed if len(listed) > 0 else "none found :(") + "\n")
f.write("{{end}}{{end}}\n")
f.write(r"{{ deleteResponse 30 }}");f.write("\n")
# this section ends here
start = False
def giveString(arr,start):
s = ""
if len(arr) == 2:
s = "{"+("{{if eq $a \"{}\" }}".format(arr[0]) if start else "{{else if eq $a \"{}\" }}".format(arr[0])) + "}\n" if arr[0] != 'waa' else "{" + "{{ else if reFind \"^wa*a$\" $a }}".format(arr[0]) + "}\n"
s += "\t"
else:
s = "{" + ("{if or " if start else "{ else if or ")
for name in arr[:-1]:
s += "(" + "eq $a \"{}\" ) ".format(name)
s += "}" + "}\n\t"
s += arr[-1]
s += "\n"
return s
for key in d:
f.write(giveString(d[key],start))
start = False
currsize = os.path.getsize(PATH)
if currsize > 7500:
print("new file created")
start = True
f.write(endfile)
fileno += 1
PATH = updatePath(fileno)
f = open(PATH,"w")
f.write(startfile)
f.write(endfile)
f.close()
| 34.366197 | 212 | 0.562295 |
3e57b7c5ebab9146913ac69009664acd049ec3e6 | 3,784 | py | Python | MuPythonLibrary/TPM/Tpm2Stream.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | 8 | 2019-10-05T09:06:39.000Z | 2022-03-11T10:45:12.000Z | MuPythonLibrary/TPM/Tpm2Stream.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | 3 | 2018-12-14T21:14:17.000Z | 2019-04-18T20:26:55.000Z | MuPythonLibrary/TPM/Tpm2Stream.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | 8 | 2019-05-10T19:18:39.000Z | 2022-03-11T10:45:09.000Z | # @file Tpm2Stream.py
# This file contains utility classes to help marshal and unmarshal data to/from the TPM.
#
##
# Copyright (c) 2017, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import struct
class Tpm2StreamElement(object):
def __init__(self):
self.pack_string = ""
"""This get_size refers to the size of this structure when marshalled"""
def get_size(self):
return struct.calcsize(self.pack_string)
class Tpm2StreamPrimitive(Tpm2StreamElement):
def __init__(self, size, value):
super(Tpm2StreamPrimitive, self).__init__()
if size not in (1, 2, 4, 8):
raise ValueError("Size must be 1, 2, 4, or 8 bytes!")
self.pack_string = {
1: ">B",
2: ">H",
4: ">L",
8: ">Q"
}[size]
self.value = value
def marshal(self):
return struct.pack(self.pack_string, self.value)
class TPM2_COMMAND_HEADER(Tpm2StreamElement):
def __init__(self, tag, size, code):
super(TPM2_COMMAND_HEADER, self).__init__()
self.tag = tag
self.code = code
self.size = size
self.pack_string = ">HLL"
"""This update_size refers to the size of the whole command"""
def update_size(self, size):
self.size = size
def marshal(self):
return struct.pack(self.pack_string, self.tag, self.size, self.code)
class TPM2B(Tpm2StreamElement):
def __init__(self, data):
super(TPM2B, self).__init__()
self.data = data
self.size = len(data)
self.pack_string = ">H%ds" % self.size
def update_data(self, data):
self.data = data
self.size = len(data)
self.pack_string = ">H%ds" % self.size
def marshal(self):
return struct.pack(self.pack_string, self.size, self.data)
class Tpm2CommandStream(object):
def __init__(self, tag, size, code):
super(Tpm2CommandStream, self).__init__()
self.header = TPM2_COMMAND_HEADER(tag, size, code)
self.stream_size = self.header.get_size()
self.header.update_size(self.stream_size)
self.stream_elements = []
def get_size(self):
return self.stream_size
def add_element(self, element):
self.stream_elements.append(element)
self.stream_size += element.get_size()
self.header.update_size(self.stream_size)
def get_stream(self):
return self.header.marshal() + b''.join(element.marshal() for element in self.stream_elements)
| 33.785714 | 102 | 0.686839 |
0b30d226460ed8ef2b4bb8579974c11c798bafa3 | 9,406 | py | Python | game_map/cellular_automata.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | game_map/cellular_automata.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | game_map/cellular_automata.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | import arcade
import random
import math
from functools import wraps
import time
from constants import *
def stop_watch(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
elapsed_time = time.time() - start
print(f"{func.__name__}は{elapsed_time}秒かかりました")
return result
return wrapper
class CellularAutomata:
def __init__(self):
self.level = []
self.iterations = 15550
self.neighbors = 4 # cellに隣接する壁の数
self.wall_probability = 0.455 #セルが壁になる初期確率。.35から.55の間であることが推奨されます
self.ROOM_MIN_SIZE = 4 #cellの総数のサイズ
self.ROOM_MAX_SIZE = 15 # ’’
self.smooth_edges = True
self.smoothing = 1
@stop_watch
def generate_level(self, map_width, map_height):
# 空の2D配列を作成するか、既存の配列をクリアします
self.caves = []
self.level = [[1 for y in range(map_height)] for x in range(map_width)]
self.random_fillmap(map_width, map_height)
self.create_caves(map_width, map_height)
self.get_caves(map_width, map_height)
self.connot_caves(map_width, map_height)
self.clean_up_map(map_width, map_height)
return self.level
def random_fillmap(self, map_width, map_height):
for y in range(1, map_height-1):
for x in range(1, map_width-1):
if random.random() >= self.wall_probability:
self.level[x][y] = 0
def create_caves(self, map_width, map_height):
for i in range(0, self.iterations):
tile_x = random.randint(1, map_width-2)
tile_y = random.randint(1, map_height-2)
# セルの隣接する壁> self.neighborsの場合は、1に設定します。
if self.get_adjacent_walls(tile_x, tile_y) > self.neighbors:
self.level[tile_x][tile_y] = 1
elif self.get_adjacent_walls(tile_x, tile_y) < self.neighbors:
self.level[tile_x][tile_y] = 0
self.clean_up_map(map_width, map_height)
def clean_up_map(self, map_width, map_height):
if (self.smooth_edges):
for i in range(0, 5):
# 各セルを個別に見て、滑らかさを確認します
for x in range(1, map_width-1):
for y in range(1, map_height-1):
if (self.level[x][y] == 1) and (self.get_adjacent_walls_simple(x, y) <= self.smoothing):
self.level[x][y] = 0
def create_tunnel(self, point_1, point_2, current_cave, map_width, map_height):
# randomwalkをpoint_1からpoint_2に実行する
drunker_dx = point_2[0]
drunker_dy = point_2[1]
while (drunker_dx, drunker_dy) not in current_cave:
north = 1.0
south = 1.0
east = 1.0
west = 1.0
weight = 1
# ランダムウォークをエッジに対して重み付けします
if drunker_dx < point_1[0]:
east += weight
elif drunker_dx > point_1[0]:
west += weight
if drunker_dy < point_1[1]:
south += weight
elif drunker_dy > point_1[1]:
north += weight
# 確率を正規化して、0から1の範囲を形成します
total = north+south+east+west
north /= total
south /= total
east /= total
west /= total
# choose the direction
choice = random.random()
if 0 <= choice < north:
dx = 0
dy = -1
elif north <= choice < (north+south):
dx = 0
dy = 1
elif (north+south) <= choice < (north+south+east):
dx = 1
dy = 0
else:
dx = -1
dy = 0
# =walk=
# edgesの衝突をチェックする
if (0 < drunker_dx+dx < map_width-1) and (0 < drunker_dy+dy < map_height-1):
drunker_dx += dx
drunker_dy += dy
if self.level[drunker_dx][drunker_dy] == 1:
self.level[drunker_dx][drunker_dy] = 0
def get_adjacent_walls_simple(self, x, y):
wall_counter = 0
if (self.level[x][y-1] == 1):
wall_counter += 1
if (self.level[x][y+1] == 1):
wall_counter += 1
if (self.level[x-1][y] == 1):
wall_counter += 1
if (self.level[x+1][y] == 1):
wall_counter += 1
return wall_counter
# 8方向の壁をチェックする
def get_adjacent_walls(self, tile_x, tile_y):
# pass
wall_counter = 0
for x in range(tile_x-1, tile_x+2):
for y in range(tile_y-1, tile_y+2):
if (self.level[x][y] ==1):
if (x != tile_x) or (y != tile_y):
wall_counter += 1
return wall_counter
def get_caves(self, map_width, map_height):
for x in range(0, map_width):
for y in range(0, map_height):
if self.level[x][y] == 0:
self.flood_fill(x, y)
for set in self.caves:
for tile in set:
self.level[tile[0]][tile[1]] = 0
def flood_fill(self, x, y):
#レベルの独立した領域をフラッドで埋め、最小サイズよりも小さい領域は破棄し
#最小限のサイズよりも小さいリージョンを捨てて, 残りの領域のリファレンスを作成します。
cave = set()
tile = (x, y)
to_be_filled = set([tile])
while to_be_filled:
tile = to_be_filled.pop()
if tile not in cave:
cave.add(tile)
self.level[tile[0]][tile[1]] = 1
# 隣接するセルをチェックする
x = tile[0]
y = tile[1]
north = (x, y-1)
south = (x, y+1)
east = (x+1, y)
west = (x-1, y)
for dist in [north, south, east, west]:
if self.level[dist[0]][dist[1]] == 0:
if dist not in to_be_filled and dist not in cave:
to_be_filled.add(dist)
if len(cave) >= self.ROOM_MIN_SIZE:
self.caves.append(cave)
def connot_caves(self, map_width, map_height):
# 現在のcaveに最も近いcaveを探す
for cur_cave in self.caves:
for point_1 in cur_cave:# cave1から要素を取得します
break
else:
return None
point_2 = None
distance = None
for next_cave in self.caves:
if next_cave != cur_cave and not self.check_connectivity(cur_cave, next_cave):
# nextCaveからランダムなポイントを選択します
for next_point in next_cave:# cave1から要素を取得します
break
# ポイント1と新旧のポイント2の距離を比較します
new_distance = self.distance_formula(point_1, next_point)
if distance == None or (new_distance < distance):
point_2 = next_point
distance = new_distance
if point_2:# すべてのトンネルが接続されている場合、point2 == None
print(point_2, "point_2")
self.create_tunnel(point_1, point_2, cur_cave, map_width, map_height)
def distance_formula(self, point_1, point_2):
d = math.sqrt((point_2[0]-point_1[0])**2 + (point_2[1]-point_1[1])**2)
return d
def check_connectivity(self, cave_1, cave_2):
# 洞窟1を浸水させた後、洞窟2のある地点をチェックして浸水させる
connected_region = set()
for start in cave_1:
break# cave1から要素を取得します
else:
return None
to_be_filled = set([start])
while to_be_filled:
tile = to_be_filled.pop()
if tile not in connected_region:
connected_region.add(tile)
# 隣接するセルを確認する
x = tile[0]
y = tile[1]
north = (x, y-1)
south = (x, y+1)
east = (x+1, y)
west = (x-1, y)
for dist in [north, south, east, west]:
if self.level[dist[0]][dist[1]] == 0:
if dist not in to_be_filled and dist not in connected_region:
to_be_filled.add(dist)
for end in cave_2:#cave2から要素を取得します
break
if end in connected_region:
return True
else:
return False
# cell = CellularAutomata().generate_level(map_height=40, map_width=40)
class MG(arcade.Window):
def __init__(self, width, height, title="cell"):
super().__init__(width, height, title)
self._cell = CellularAutomata()
self.cell = self._cell.generate_level(map_height=40, map_width=40)
self.tiles = [[TILE.WALL for y in range(40)] for x in range(40)]
arcade.set_background_color((200,200,200))
def on_draw(self):
arcade.start_render()
for x in range(len(self.tiles)):
for y in range(len(self.tiles)):
if self.cell[x][y] == 1:
arcade.draw_rectangle_filled(x*10, y*10, 9, 9, arcade.color.BABY_POWDER)
def on_update(self, delta_time):
pass
def on_key_press(self, symbol: int, modifiers: int):
if symbol == arcade.key.ESCAPE:
arcade.close_window()
def main():
gam = MG(600, 600)
arcade.run()
if __name__ == "__main__":
main()
| 30.738562 | 112 | 0.520625 |
45fb289da7e440d74d682b50cbe5f6281a25523c | 2,275 | py | Python | tools/sentencesplit.py | UCLA-BD2K/BRAT | 675e81bfcb1b504678a32d70a560c44ee5bfff1d | [
"CC-BY-3.0"
] | 20 | 2015-01-26T01:39:44.000Z | 2020-05-30T19:04:14.000Z | tools/sentencesplit.py | UCLA-BD2K/BRAT | 675e81bfcb1b504678a32d70a560c44ee5bfff1d | [
"CC-BY-3.0"
] | 7 | 2020-06-18T15:21:48.000Z | 2022-03-02T05:02:45.000Z | tools/sentencesplit.py | UCLA-BD2K/BRAT | 675e81bfcb1b504678a32d70a560c44ee5bfff1d | [
"CC-BY-3.0"
] | 13 | 2015-01-26T01:39:45.000Z | 2022-03-09T16:45:09.000Z | #!/usr/bin/env python
'''
Basic sentence splitter using brat segmentation to add newlines to
input text at likely sentence boundaries.
'''
import sys
from os.path import join as path_join
from os.path import dirname
# Assuming this script is found in the brat tools/ directory ...
from sys import path as sys_path
sys_path.append(path_join(dirname(__file__), '../server/src'))
# import brat sentence boundary generator
from ssplit import en_sentence_boundary_gen
def _text_by_offsets_gen(text, offsets):
for start, end in offsets:
yield text[start:end]
def _normspace(s):
import re
return re.sub(r'\s', ' ', s)
def sentencebreaks_to_newlines(text):
offsets = [o for o in en_sentence_boundary_gen(text)]
# break into sentences
sentences = [s for s in _text_by_offsets_gen(text, offsets)]
# join up, adding a newline for space where possible
orig_parts = []
new_parts = []
sentnum = len(sentences)
for i in range(sentnum):
sent = sentences[i]
orig_parts.append(sent)
new_parts.append(sent)
if i < sentnum-1:
orig_parts.append(text[offsets[i][1]:offsets[i+1][0]])
if (offsets[i][1] < offsets[i+1][0] and
text[offsets[i][1]].isspace()):
# intervening space; can add newline
new_parts.append('\n'+text[offsets[i][1]+1:offsets[i+1][0]])
else:
new_parts.append(text[offsets[i][1]:offsets[i+1][0]])
if len(offsets) and offsets[-1][1] < len(text):
orig_parts.append(text[offsets[-1][1]:])
new_parts.append(text[offsets[-1][1]:])
# sanity check
assert text == ''.join(orig_parts), "INTERNAL ERROR:\n '%s'\nvs\n '%s'" % (text, ''.join(orig_parts))
splittext = ''.join(new_parts)
# sanity
assert len(text) == len(splittext), "INTERNAL ERROR"
assert _normspace(text) == _normspace(splittext), "INTERNAL ERROR:\n '%s'\nvs\n '%s'" % (_normspace(text), _normspace(splittext))
return splittext
def main(argv):
while True:
text = sys.stdin.readline()
if len(text) == 0:
break
sys.stdout.write(sentencebreaks_to_newlines(text))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 29.934211 | 139 | 0.632088 |
37091d7c1519a80fa780d38185774d3d1f1229c6 | 196 | py | Python | gym_grid_driving_ray/__init__.py | thefatbandit/multi-agent-driving | 0f5787672b2e8d4aa5ac0f88003a46ab3c1a0fb2 | [
"MIT"
] | null | null | null | gym_grid_driving_ray/__init__.py | thefatbandit/multi-agent-driving | 0f5787672b2e8d4aa5ac0f88003a46ab3c1a0fb2 | [
"MIT"
] | null | null | null | gym_grid_driving_ray/__init__.py | thefatbandit/multi-agent-driving | 0f5787672b2e8d4aa5ac0f88003a46ab3c1a0fb2 | [
"MIT"
] | null | null | null | import logging
from gym.envs.registration import register
logger = logging.getLogger(__name__)
register(
id='GridDriving-ray-v0',
entry_point='gym_grid_driving_ray.envs:GridDrivingEnv'
) | 21.777778 | 58 | 0.790816 |
2ef28f2f31edc647e8f11ed01e0053773398ebb7 | 81 | py | Python | src/sage/tests/french_book/__init__.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 5 | 2015-01-04T07:15:06.000Z | 2022-03-04T15:15:18.000Z | src/sage/geometry/riemannian_manifolds/__init__.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | 2 | 2018-10-30T13:40:20.000Z | 2020-07-23T12:13:30.000Z | src/sage/geometry/riemannian_manifolds/__init__.py | dimpase/sage | 468f23815ade42a2192b0a9cd378de8fdc594dcd | [
"BSL-1.0"
] | 10 | 2016-09-28T13:12:40.000Z | 2022-02-12T09:28:34.000Z | # This comment is here so the file is non-empty (so Mercurial will check it in).
| 40.5 | 80 | 0.740741 |
e5943201a2d5341fee99c319274a355207960ef3 | 527 | py | Python | app.py | AbstractMonkey/flask_test | 84a983c204234f471420a5041c28400c1193f762 | [
"MIT"
] | null | null | null | app.py | AbstractMonkey/flask_test | 84a983c204234f471420a5041c28400c1193f762 | [
"MIT"
] | null | null | null | app.py | AbstractMonkey/flask_test | 84a983c204234f471420a5041c28400c1193f762 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello():
return 'Hello, World!'
@app.route('/weather')
def weather():
w = 78
w -= 10
if w > 65 and w < 90:
data = {"message": "It's nice outside"}
elif w > 45 and w < 66:
data = {"message": "It's ok outside"}
else:
data = {"message": "It's miserable outside"}
return render_template("weather.html", title="WeatherApp", data=data)
if __name__ == '__main__':
app.run(debug=True)
| 20.269231 | 73 | 0.58444 |
0b866120ca665569c0528f33e690c8ff1c85c15a | 6,810 | py | Python | env/lib/python3.8/site-packages/social_core/backends/azuread_b2c.py | Vee245/dukapepedj | 8525166b38c8f74dc8d62a2fdf1905c32793efcd | [
"MIT"
] | null | null | null | env/lib/python3.8/site-packages/social_core/backends/azuread_b2c.py | Vee245/dukapepedj | 8525166b38c8f74dc8d62a2fdf1905c32793efcd | [
"MIT"
] | null | null | null | env/lib/python3.8/site-packages/social_core/backends/azuread_b2c.py | Vee245/dukapepedj | 8525166b38c8f74dc8d62a2fdf1905c32793efcd | [
"MIT"
] | null | null | null | """
Copyright (c) 2018 Noderabbit Inc., d.b.a. Appsembler
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
See https://nicksnettravels.builttoroam.com/post/2017/01/24/Verifying-Azure-Active-Directory-JWT-Tokens.aspx
for verifying JWT tokens.
"""
import json
from cryptography.hazmat.primitives import serialization
from jwt import DecodeError, ExpiredSignatureError
from jwt import decode as jwt_decode
from jwt import get_unverified_header
try:
from jwt.algorithms import RSAAlgorithm
except ImportError:
raise Exception(
# Python 3.3 is not supported because of compatibility in
# Cryptography package in Python3.3 You are welcome to patch
# and open a pull request.
'Cryptography library is required for this backend ' \
'(AzureADB2COAuth2) to work. Note that this backend is only ' \
'supported on Python 2 and Python 3.4+.'
)
from ..exceptions import AuthException, AuthTokenError
from .azuread import AzureADOAuth2
class AzureADB2COAuth2(AzureADOAuth2):
name = 'azuread-b2c-oauth2'
BASE_URL = 'https://login.microsoftonline.com/{tenant_id}'
AUTHORIZATION_URL = '{base_url}/oauth2/v2.0/authorize'
OPENID_CONFIGURATION_URL = '{base_url}/v2.0/.well-known/openid-configuration?p={policy}'
ACCESS_TOKEN_URL = '{base_url}/oauth2/v2.0/token?p={policy}'
JWKS_URL = '{base_url}/discovery/v2.0/keys?p={policy}'
DEFAULT_SCOPE = ['openid', 'email']
EXTRA_DATA = [
('access_token', 'access_token'),
('id_token', 'id_token'),
('refresh_token', 'refresh_token'),
('id_token_expires_in', 'expires'),
('exp', 'expires_on'),
('not_before', 'not_before'),
('given_name', 'first_name'),
('family_name', 'last_name'),
('tfp', 'policy'),
('token_type', 'token_type')
]
@property
def tenant_id(self):
return self.setting('TENANT_ID', 'common')
@property
def policy(self):
policy = self.setting('POLICY')
if not policy or not policy.lower().startswith('b2c_'):
raise AuthException('SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_POLICY is '
'required and should start with `b2c_`')
return policy
@property
def base_url(self):
return self.BASE_URL.format(tenant_id=self.tenant_id)
def openid_configuration_url(self):
return self.OPENID_CONFIGURATION_URL.format(base_url=self.base_url,
policy=self.policy)
def authorization_url(self):
# Policy is required, but added later by `auth_extra_arguments()`
return self.AUTHORIZATION_URL.format(base_url=self.base_url)
def access_token_url(self):
return self.ACCESS_TOKEN_URL.format(base_url=self.base_url,
policy=self.policy)
def jwks_url(self):
return self.JWKS_URL.format(base_url=self.base_url,
policy=self.policy)
def request_access_token(self, *args, **kwargs):
"""
This is probably a hack, but otherwise AzureADOAuth2 expects
`access_token`.
However, B2C backends provides `id_token`.
"""
response = super().request_access_token(
*args,
**kwargs
)
if 'access_token' not in response:
response['access_token'] = response['id_token']
return response
def auth_extra_arguments(self):
"""
Return extra arguments needed on auth process.
The defaults can be overridden by GET parameters.
"""
extra_arguments = super().auth_extra_arguments()
extra_arguments['p'] = self.policy or self.data.get('p')
return extra_arguments
def jwt_key_to_pem(self, key_json_dict):
"""
Builds a PEM formatted key string from a JWT public key dict.
"""
pub_key = RSAAlgorithm.from_jwk(json.dumps(key_json_dict))
return pub_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
def get_user_id(self, details, response):
"""Use subject (sub) claim as unique id."""
return response.get('sub')
def get_user_details(self, response):
"""
Email address is returned on a different attribute for AzureAD
B2C backends.
"""
details = super().get_user_details(response)
if not details['email'] and response.get('emails'):
details['email'] = response['emails']
if isinstance(details.get('email'), (list, tuple)):
details['email'] = details['email'][0]
return details
def get_public_key(self, kid):
"""
Retrieve JWT keys from the URL.
"""
resp = self.request(self.jwks_url(), method='GET')
resp.raise_for_status()
# find the proper key for the kid
for key in resp.json()['keys']:
if key['kid'] == kid:
return self.jwt_key_to_pem(key)
raise DecodeError(f'Cannot find kid={kid}')
def user_data(self, access_token, *args, **kwargs):
response = kwargs.get('response')
id_token = response.get('id_token')
# `kid` is short for key id
kid = get_unverified_header(id_token)['kid']
key = self.get_public_key(kid)
try:
return jwt_decode(
id_token,
key=key,
algorithms=['RS256'],
audience=self.setting('KEY'),
leeway=self.setting('JWT_LEEWAY', default=0),
)
except (DecodeError, ExpiredSignatureError) as error:
raise AuthTokenError(self, error)
| 35.842105 | 108 | 0.64699 |
1abfa8181f8832e12a236c522b43a4f566cdde82 | 2,232 | py | Python | plotly/validators/scattercarpet/marker/colorbar/_tickformatstops.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/scattercarpet/marker/colorbar/_tickformatstops.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | null | null | null | plotly/validators/scattercarpet/marker/colorbar/_tickformatstops.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class TickformatstopsValidator(
_plotly_utils.basevalidators.CompoundArrayValidator
):
def __init__(
self,
plotly_name='tickformatstops',
parent_name='scattercarpet.marker.colorbar',
**kwargs
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Tickformatstop',
data_docs="""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
**kwargs
)
| 40.581818 | 63 | 0.56586 |
edb1bf68dfb5ebd1e098715abee7d66de24823bc | 5,650 | py | Python | isi_sdk_8_2_0/isi_sdk_8_2_0/models/drives_drive_firmware_update.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_0/isi_sdk_8_2_0/models/drives_drive_firmware_update.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_0/isi_sdk_8_2_0/models/drives_drive_firmware_update.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_0.models.drives_drive_firmware_update_node import DrivesDriveFirmwareUpdateNode # noqa: F401,E501
from isi_sdk_8_2_0.models.node_drives_purposelist_error import NodeDrivesPurposelistError # noqa: F401,E501
class DrivesDriveFirmwareUpdate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'errors': 'list[NodeDrivesPurposelistError]',
'nodes': 'list[DrivesDriveFirmwareUpdateNode]',
'total': 'int'
}
attribute_map = {
'errors': 'errors',
'nodes': 'nodes',
'total': 'total'
}
def __init__(self, errors=None, nodes=None, total=None): # noqa: E501
"""DrivesDriveFirmwareUpdate - a model defined in Swagger""" # noqa: E501
self._errors = None
self._nodes = None
self._total = None
self.discriminator = None
if errors is not None:
self.errors = errors
if nodes is not None:
self.nodes = nodes
if total is not None:
self.total = total
@property
def errors(self):
"""Gets the errors of this DrivesDriveFirmwareUpdate. # noqa: E501
A list of errors encountered by the individual nodes involved in this request, or an empty list if there were no errors. # noqa: E501
:return: The errors of this DrivesDriveFirmwareUpdate. # noqa: E501
:rtype: list[NodeDrivesPurposelistError]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this DrivesDriveFirmwareUpdate.
A list of errors encountered by the individual nodes involved in this request, or an empty list if there were no errors. # noqa: E501
:param errors: The errors of this DrivesDriveFirmwareUpdate. # noqa: E501
:type: list[NodeDrivesPurposelistError]
"""
self._errors = errors
@property
def nodes(self):
"""Gets the nodes of this DrivesDriveFirmwareUpdate. # noqa: E501
The responses from the individual nodes involved in this request. # noqa: E501
:return: The nodes of this DrivesDriveFirmwareUpdate. # noqa: E501
:rtype: list[DrivesDriveFirmwareUpdateNode]
"""
return self._nodes
@nodes.setter
def nodes(self, nodes):
"""Sets the nodes of this DrivesDriveFirmwareUpdate.
The responses from the individual nodes involved in this request. # noqa: E501
:param nodes: The nodes of this DrivesDriveFirmwareUpdate. # noqa: E501
:type: list[DrivesDriveFirmwareUpdateNode]
"""
self._nodes = nodes
@property
def total(self):
"""Gets the total of this DrivesDriveFirmwareUpdate. # noqa: E501
The total number of nodes responding. # noqa: E501
:return: The total of this DrivesDriveFirmwareUpdate. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this DrivesDriveFirmwareUpdate.
The total number of nodes responding. # noqa: E501
:param total: The total of this DrivesDriveFirmwareUpdate. # noqa: E501
:type: int
"""
if total is not None and total > 2147483647: # noqa: E501
raise ValueError("Invalid value for `total`, must be a value less than or equal to `2147483647`") # noqa: E501
if total is not None and total < 0: # noqa: E501
raise ValueError("Invalid value for `total`, must be a value greater than or equal to `0`") # noqa: E501
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DrivesDriveFirmwareUpdate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.741573 | 142 | 0.608673 |
096db180792048d5636dd260aefd96ef77c52d40 | 6,398 | py | Python | misc/send-umb-iib-notifier.py | ppitonak/umb | 89b0d32d3867f9f489ee8a442a1faf7e04c52f45 | [
"Apache-2.0"
] | null | null | null | misc/send-umb-iib-notifier.py | ppitonak/umb | 89b0d32d3867f9f489ee8a442a1faf7e04c52f45 | [
"Apache-2.0"
] | null | null | null | misc/send-umb-iib-notifier.py | ppitonak/umb | 89b0d32d3867f9f489ee8a442a1faf7e04c52f45 | [
"Apache-2.0"
] | 2 | 2021-08-16T14:18:59.000Z | 2021-08-25T14:52:41.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Script to send a umb message to be plugged in a finally task"""
import argparse
import json
import os
import subprocess
import sys
import typing
import urllib.request
import datetime
import uuid
class UMBNotificationError(Exception):
"""Custom exception when we fail"""
def get_openshift_console_url(namespace: str) -> str:
"""Get the openshift console url for a namespace"""
cmd = (
"oc get route -n openshift-console console -o jsonpath='{.spec.host}'",
)
ret = subprocess.run(cmd, shell=True, check=True, capture_output=True)
if ret.returncode != 0:
raise UMBNotificationError(
"Could not detect the location of openshift console url: {ret.stdout.decode()}"
)
return f"https://{ret.stdout.decode()}/k8s/ns/{namespace}/tekton.dev~v1beta1~PipelineRun/"
def get_json_of_pipelinerun(pipelinerun: str) -> typing.Dict[str, typing.Dict]:
"""Find which namespace where we are running currently by checking the
pipelinerun namespace"""
cmd = f"oc get pipelinerun {pipelinerun} -o json"
ret = subprocess.run(cmd, shell=True, check=True, capture_output=True)
if ret.returncode != 0:
raise UMBNotificationError(f"Could not run command: {cmd}")
return json.loads(ret.stdout)
def check_status_of_pipelinerun(
jeez: typing.Dict[str, typing.Dict]) -> typing.List[str]:
"""Check status of a pipelinerun using kubectl, we avoid the the Running
ones since we run in finally, it will have a running ones"""
task_runs = jeez['status']['taskRuns']
failed = []
pname = jeez['metadata']['name']
for task in task_runs.keys():
bname = task.replace(pname + "-", '')
bname = bname.replace("-" + bname.split("-")[-1], '')
if bool([
x['message'] for x in task_runs[task]['status']['conditions']
if x['status'] != 'Running' and x['status'] == 'False'
]):
failed.append(bname)
return failed
def send_iib_test_complete_msg(webhook_url: str, iib: str,ocp_version: str,uid: str,pipelinerun: str,log_url: str) -> str:
"""Send a index image test complete message"""
msg = {
"artifact": {
"advisory_id": "N/A",
"brew_build_tag": "Undefined Brew Tag Name",
"brew_build_target": "Undefined Brew Target Name",
"component": "cvp-teamredhatopenshiftcontainerplatform",
"full_name": "Undefined Artifact Image Full Name",
"id": "1584936",
"image_tag": "Undefined Artifact Image Tag",
"issuer": "contra/pipeline",
"name": "Undefined Artifact Image Name",
"namespace": "Undefined Artifact Image Namespace",
"nvr": "openshift-pipelines-operator-bundle-container-"+iib,
"registry_url": "Undefined Artifact Image Registry URL",
"scratch": "false",
"type": "cvp"
},
"contact": {
"docs": "",
"email": "psi-pipelines@redhat.com",
"name": "openshift-pipelines",
"team": "openshift-pipelines",
"url": "https://master-jenkins-csb-cnfqe.cloud.paas.psi.redhat.com/"
},
"generated_at": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f"),
"pipeline": {
"build": uid,
"id": pipelinerun,
"name": "cnf-dev-jenkins-tests-pipeline"
},
"run": {
"log": log_url,
"url": "cvp-redhat-operator-bundle-image-validation-test"+"/console",
},
"system": [
{
"architecture": "x86_64",
"os": "registry.ci.openshift.org/ocp/release:"+ocp_version,
"provider": "openshift"
}
],
"test": {
"category": "integration,functional,validation",
"namespace": "cnf-ci",
"result": "passed",
"type": "smoke-test"
},
"version": "0.2.1"
}
data={"topic": "topic://VirtualTopic.eng.ci.product-build.test.complete", "message": msg}
req = urllib.request.Request(webhook_url,
data=json.dumps(data).encode(),
headers={"Content-type": "application/json"},
method="POST")
# TODO: Handle error?
return urllib.request.urlopen(req).read().decode()
def main() -> int:
"""Main"""
parser = argparse.ArgumentParser()
parser.add_argument("--log-url",
default=os.environ.get("LOG_URL"),
help="Link to the log url")
parser.add_argument("--pipelinerun",
default=os.environ.get("PIPELINERUN"),
help="The pipelinerun to check the status on")
parser.add_argument("--ocp-version",
default=os.environ.get("OCP_VERSION"),
help="The OCP version")
parser.add_argument("--umb-webhook-url",
default=os.environ.get("UMB_WEBHOOK_URL"),
help="UMB webhook URL")
parser.add_argument("--iib",
default=os.environ.get("IIB"),
help="The index image number")
args = parser.parse_args()
if not args.pipelinerun:
print(
"error --pipelinerun need to be set via env env variable or other means."
)
return 1
if not args.version:
print(
"error --version need to be set via env env variable or other means."
)
return 1
if not args.umb_webhook_url:
print(
"error --umb-webhook-url need to be set via env variable or other means."
)
return 1
jeez = get_json_of_pipelinerun(args.pipelinerun)
failures = check_status_of_pipelinerun(jeez)
if args.log_url and args.log_url == "openshift":
# TODO: Add tekton dashboard if we can find this automatically
args.log_url = get_openshift_console_url(jeez['metadata']['namespace']) + \
args.pipelinerun + "/logs"
if failures:
error_msg = f"""• *Failed Tasks*: {", ".join(failures)}\n"""
print(error_msg)
#TO_DO Implement send failed message to right topic(If exist).
else:
ret = send_iib_test_complete_msg(args.umb_webhook_url, args.iib,args.ocp_version,str(uuid.uuid4()),args.pipelinerun,args.log_url)
if ret:
print(ret)
return 0
if __name__ == '__main__':
sys.exit(main()) | 34.961749 | 137 | 0.59503 |
2d237afa98db4b9a0cae184889d5e486b3b12991 | 3,832 | py | Python | cohesity_management_sdk/models/sqlaag_host_and_databases.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/sqlaag_host_and_databases.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/sqlaag_host_and_databases.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.aag_and_databases
import cohesity_management_sdk.models.node_in_a_protection_sources_tree
import cohesity_management_sdk.models.protection_source
class SQLAAGHostAndDatabases(object):
"""Implementation of the 'SQL AAG Host and Databases.' model.
Specifies AAGs and databases information on an SQL server. If AAGs exist
on the server, specifies information about the AAG and databases in the
group for each AAG found on the server.
Attributes:
aag_databases (list of AAGAndDatabases): Specifies a list of AAGs and
database members in each AAG.
application_node (NodeInAProtectionSourcesTree): Many different node
types are supported such as 'kComputeResource' and
'kResourcePool'.
databases (list of ProtectionSource): Specifies all database entities
found on the server. Database may or may not be in an AAG.
error_message (string): Specifies an error message when the host is
not registered as an SQL host.
unknown_host_name (string): Specifies the name of the host that is not
registered as an SQL server on Cohesity Cluser.
"""
# Create a mapping from Model property names to API property names
_names = {
"aag_databases":'aagDatabases',
"application_node":'applicationNode',
"databases":'databases',
"error_message":'errorMessage',
"unknown_host_name":'unknownHostName'
}
def __init__(self,
aag_databases=None,
application_node=None,
databases=None,
error_message=None,
unknown_host_name=None):
"""Constructor for the SQLAAGHostAndDatabases class"""
# Initialize members of the class
self.aag_databases = aag_databases
self.application_node = application_node
self.databases = databases
self.error_message = error_message
self.unknown_host_name = unknown_host_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
aag_databases = None
if dictionary.get('aagDatabases') != None:
aag_databases = list()
for structure in dictionary.get('aagDatabases'):
aag_databases.append(cohesity_management_sdk.models.aag_and_databases.AAGAndDatabases.from_dictionary(structure))
application_node = cohesity_management_sdk.models.node_in_a_protection_sources_tree.NodeInAProtectionSourcesTree.from_dictionary(dictionary.get('applicationNode')) if dictionary.get('applicationNode') else None
databases = None
if dictionary.get('databases') != None:
databases = list()
for structure in dictionary.get('databases'):
databases.append(cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(structure))
error_message = dictionary.get('errorMessage')
unknown_host_name = dictionary.get('unknownHostName')
# Return an object of this model
return cls(aag_databases,
application_node,
databases,
error_message,
unknown_host_name)
| 39.916667 | 218 | 0.668319 |
afc9b9bfb9f5106de308fdcb52fcce23527d7e57 | 4,982 | py | Python | angr/exploration_techniques/__init__.py | ecoppa/angr | 91ff545eb19d09ed6196ec3bf7ad4e10e0a37ca5 | [
"BSD-2-Clause"
] | null | null | null | angr/exploration_techniques/__init__.py | ecoppa/angr | 91ff545eb19d09ed6196ec3bf7ad4e10e0a37ca5 | [
"BSD-2-Clause"
] | null | null | null | angr/exploration_techniques/__init__.py | ecoppa/angr | 91ff545eb19d09ed6196ec3bf7ad4e10e0a37ca5 | [
"BSD-2-Clause"
] | 1 | 2022-02-03T20:06:20.000Z | 2022-02-03T20:06:20.000Z | from ..errors import SimError
class ExplorationTechnique(object):
"""
An otiegnqwvk is a set of hooks for a simulation manager that assists in the implementation of new techniques in
symbolic exploration.
TODO: choose actual name for the functionality (techniques? strategies?)
Any number of these methods may be overridden by a subclass.
To use an exploration technique, call ``simgr.use_technique`` with an *instance* of the technique.
"""
# pylint: disable=unused-argument, no-self-use
def __init__(self):
# this attribute will be set from above by the manager
self.project = None
def setup(self, simgr):
"""
Perform any initialization on this manager you might need to do.
"""
pass
def step_state(self, state, **kwargs):
"""
Perform the process of stepping a state forward.
If the stepping fails, return None to fall back to a default stepping procedure.
Otherwise, return a dict of stashes to merge into the simulation manager. All the states
will be added to the PathGroup's stashes based on the mapping in the returned dict.
"""
return None
def step(self, simgr, stash, **kwargs):
"""
Step this stash of this manager forward. Should call ``simgr.step(stash, **kwargs)`` in order to do the actual
processing.
Return the stepped manager.
"""
return simgr.step(stash=stash, **kwargs)
def filter(self, state):
"""
Perform filtering on a state.
If the state should not be filtered, return None.
If the state should be filtered, return the name of the stash to move the state to.
If you want to modify the state before filtering it, return a tuple of the stash to move the state to and the
modified state.
"""
return None
def complete(self, simgr):
"""
Return whether or not this manager has reached a "completed" state, i.e. ``SimulationManager.run()`` should halt.
"""
return False
def _condition_to_lambda(self, condition, default=False):
"""
Translates an integer, set, list or lambda into a lambda that checks a state address against the given addresses, and the
other ones from the same basic block
:param condition: An integer, set, list or lambda to convert to a lambda.
:param default: The default return value of the lambda (in case condition is None). Default: false.
:returns: A lambda that takes a state and returns the set of addresses that it matched from the condition
The lambda has an `.addrs` attribute that contains the full set of the addresses at which it matches if that
can be determined statically.
"""
if condition is None:
condition_function = lambda p: default
condition_function.addrs = set()
elif isinstance(condition, (int, long)):
return self._condition_to_lambda((condition,))
elif isinstance(condition, (tuple, set, list)):
addrs = set(condition)
def condition_function(p):
if p.addr in addrs:
# returning {p.addr} instead of True to properly handle find/avoid conflicts
return {p.addr}
try:
# If the address is not in the set (which could mean it is
# not at the top of a block), check directly in the blocks
# (Blocks are repeatedly created for every check, but with
# the IRSB cache in angr lifter it should be OK.)
return addrs.intersection(set(self.project.factory.block(p.addr).instruction_addrs))
except (AngrError, SimError):
return False
condition_function.addrs = addrs
elif hasattr(condition, '__call__'):
condition_function = condition
else:
raise AngrExplorationTechniqueError("ExplorationTechnique is unable to convert given type (%s) to a callable condition function." % condition.__class__)
return condition_function
#registered_actions = {}
#registered_surveyors = {}
#
#def register_action(name, strat):
# registered_actions[name] = strat
#
#def register_surveyor(name, strat):
# registered_surveyors[name] = strat
from .driller_core import DrillerCore
from .crash_monitor import CrashMonitor
from .tracer import Tracer
from .explorer import Explorer
from .threading import Threading
from .dfs import DFS
from .looplimiter import LoopLimiter
from .lengthlimiter import LengthLimiter
from .veritesting import Veritesting
from .oppologist import Oppologist
from .director import Director, ExecuteAddressGoal, CallFunctionGoal
from .spiller import Spiller
from ..errors import AngrError, AngrExplorationTechniqueError
| 40.177419 | 164 | 0.655159 |
48a6d7ff3d9a1147b54a47ec45a19e0ca3c3da16 | 3,702 | py | Python | source/hsicbt/task/task_varieddim.py | choasma/HSIC-Bottleneck | 9f1fe2447592d61c0ba524aad0ff0820ae2ba9cb | [
"MIT"
] | 49 | 2019-12-05T03:03:43.000Z | 2022-02-23T13:12:02.000Z | source/hsicbt/task/task_varieddim.py | choasma/HSIC-Bottleneck | 9f1fe2447592d61c0ba524aad0ff0820ae2ba9cb | [
"MIT"
] | 10 | 2020-04-02T22:22:02.000Z | 2020-12-19T04:36:27.000Z | source/hsicbt/task/task_varieddim.py | choasma/HSIC-Bottleneck | 9f1fe2447592d61c0ba524aad0ff0820ae2ba9cb | [
"MIT"
] | 18 | 2020-01-18T00:17:11.000Z | 2022-01-13T10:07:55.000Z | from . import *
def plot_varieddim_result(config_dict):
try:
out_standard_batch_001 = load_logs(get_log_filepath(
config_dict['task'], TTYPE_FORMAT, config_dict['data_code'], 1))['batch_log_list']
out_standard_batch_005 = load_logs(get_log_filepath(
config_dict['task'], TTYPE_FORMAT, config_dict['data_code'], 2))['batch_log_list']
out_standard_batch_010 = load_logs(get_log_filepath(
config_dict['task'], TTYPE_FORMAT, config_dict['data_code'], 3))['batch_log_list']
out_standard_epoch_001 = load_logs(get_log_filepath(
config_dict['task'], TTYPE_FORMAT, config_dict['data_code'], 1))['epoch_log_dict']
out_standard_epoch_005 = load_logs(get_log_filepath(
config_dict['task'], TTYPE_FORMAT, config_dict['data_code'], 2))['epoch_log_dict']
out_standard_epoch_010 = load_logs(get_log_filepath(
config_dict['task'], TTYPE_FORMAT, config_dict['data_code'], 3))['epoch_log_dict']
except IOError as e:
print_highlight("{}.\nPlease do training by setting do_training key to True in config. Program quits.".format(e), 'red')
quit()
input_batch_list = [out_standard_batch_001, out_standard_batch_005, out_standard_batch_010]
input_epoch_list = [out_standard_epoch_001, out_standard_epoch_005, out_standard_epoch_010]
label_list = ['dim-8', 'dim-32', 'dim-64']
# metadata = {
# #'title':'HSIC(X, Z_L) of Varied-dim',
# 'title':'',
# 'xlabel': 'epochs',
# 'ylabel': 'HSIC(X, Z_L)',
# 'label': label_list
# }
# plot.plot_batches_log(input_batch_list, 'batch_hsic_hx', metadata)
# plot.save_figure(get_exp_path("varied-dim-hsic_xz-{}.{}".format(
# config_dict['data_code'], config_dict['ext'])))
# metadata = {
# #'title':'HSIC(Y, Z_L) of Varied-dim',
# 'title': '',
# 'xlabel': 'epochs',
# 'ylabel': 'HSIC(Y, Z_L)',
# 'label': label_list
# }
# plot.plot_batches_log(input_batch_list, 'batch_hsic_hy', metadata)
# plot.save_figure(get_exp_path("varied-dim-hsic_yz-{}.{}".format(
# config_dict['data_code'], config_dict['ext'])))
metadata = {
#'title':'format-train of Varied-dim',
'title': '',
'xlabel': 'epoch',
'ylabel': 'train acc',
'label': label_list
}
plot.plot_batches_log(input_batch_list, 'batch_acc', metadata)
filepath = get_exp_path("fig6a-varied-dim-acc-{}.{}".format( config_dict['data_code'], config_dict['ext']))
save_experiment_fig(filepath)
# metadata = {
# #'title':'format-train of Varied-dim',
# 'title': '',
# 'xlabel': 'epochs',
# 'ylabel': 'training loss',
# 'label': label_list
# }
# plot.plot_batches_log(input_batch_list, 'batch_loss', metadata)
# plot.save_figure(get_exp_path("varied-dim-loss-{}.{}".format(
# config_dict['data_code'], config_dict['ext'])))
metadata = {
#'title':'{} test performance of Varied-dim'.format(config_dict['data_code']),
'title': '',
'xlabel': 'epoch',
'ylabel': 'test acc',
'label': label_list
}
plot.plot_epoch_log(input_epoch_list, 'test_acc', metadata)
plot.save_figure(get_exp_path("fig6a-{}-epoch-test-acc.{}".format(
get_plot_filename(config_dict), config_dict['ext'])))
def task_varieddim_func(config_dict):
model_filename = config_dict['model_file']
config_dict['model_file'] = "{}-{:04d}.pt".format(
os.path.splitext(model_filename)[0], config_dict['exp_index'])
func = task_assigner(config_dict['training_type'])
func(config_dict)
| 41.133333 | 128 | 0.634522 |
edf7f7ddb08e00f4c72a9556b4eed346dd5a96ac | 10,888 | py | Python | Team7/parser.py | paralysedforce/Recipe-Project | 35336252f0629c1879e1c55238b6be451f4cf603 | [
"MIT"
] | 1 | 2016-03-15T01:49:33.000Z | 2016-03-15T01:49:33.000Z | Team7/parser.py | paralysedforce/Recipe-Project | 35336252f0629c1879e1c55238b6be451f4cf603 | [
"MIT"
] | null | null | null | Team7/parser.py | paralysedforce/Recipe-Project | 35336252f0629c1879e1c55238b6be451f4cf603 | [
"MIT"
] | null | null | null | #!usr/bin/python
from data import *
import scrape
import sys
#from collections import namedtuple
from string import punctuation
import re
import nltk
from pymongo import MongoClient
import recipe_classes
import reconstruction
import transform
client = MongoClient()
db = client["k_base"]
# Quantity = namedtuple("Quantity", ["value", "unit"])
# Ingredient = namedtuple("Ingredient", ['name', 'quantity', 'descriptors'])
# Step = namedtuple("Step", ["ingredients", "processes", "cookware"])
#TODO: unit tests for recipes
#TODO: improve recognize_descriptors
def parse_ingredient(ingredients):
"""ingredients is a string scraped from the web site. This function
processes the string and returns an ingredient object"""
number = recognize_number(ingredients)
unit = recognize_unit(ingredients)
ingredients = ingredients.replace(unit, '')
ingredients = ingredients.replace(str(number), '')
ingredient_name = recognize_ingredient(ingredients)
# descriptors = recognize_descriptors(ingredients)
ingredient = recipe_classes.Ingredient(ingredient_name, number, unit)
return ingredient
def recognize_number(ingredients):
number_pattern = re.compile("^(\.?\d+)([ \/\.]\d+)?([ \/\.]\d+)?")
matches = re.findall(number_pattern, ingredients)
if not matches: return 1
match = matches[0]
if match[1] == '':
return int(match[0])
elif match[1][0] == '/':
return float(match[0]) / float(match[1][1:])
elif match[1][0] == ' ' and match[2][0] == '/':
return int(match[0]) + (float(match[1][1:]) / float(match[2][1:]))
else:
match = ''
return match
def recognize_unit(ingredients):
for unit in UNITS:
variations = UNITS[unit]
for variation in variations:
if variation in ingredients:
return unit
return 'unit'
def recognize_ingredient(ingredients):
# preprocessing
ingredient = _strip_punctuation((ingredients.replace('-', ' ')).lower())
all_ing_cursor = db.ingredients.find()
longest_match = ''
for doc in all_ing_cursor:
if doc['name'] in ingredient:
if len(doc['name']) > len(longest_match):
longest_match = doc['name']
if longest_match:
return longest_match
else:
return ingredient
def recognize_descriptors(ingredients, data = None):
stopwords = nltk.corpus.stopwords.words('english')
descriptors = []
common_ingredients = db.ingredients.find()
for word in ingredients.split():
stripped = _strip_punctuation(word).lower()
no_numerals = all(map(lambda c: c not in '123456789', word))
no_stopwords = not word.lower() in stopwords
no_ingredients = not (stripped in common_ingredients or word.lower() in
common_ingredients)
no_units = all([stripped not in UNITS[unit] for unit in UNITS])
if no_numerals and no_stopwords and no_ingredients and no_units:
descriptors.append(word.lower())
return descriptors
# Step parsing
def parse_step(step):
step_procedures = []
step_ingredients = []
step_cookware = []
# for direction in DIRECTIONS:
# if direction in step:
# step_directions.append(direction)
# for ingredient in ingredients:
# if ingredient.name in step:
# step_ingredients.append(ingredient.name)
# for cookware in COOKWARE:
# if cookware in step:
# step_cookware.append(cookware)
# return Step(step_ingredients, step_directions, step_cookware)
ing_cursor = db.ingredients.find()
proc_cursor = db.procedures.find()
step = _strip_punctuation(step.lower())
longest_match = ''
for document in ing_cursor:
if isinstance(document['name'], basestring):
if document['name'] in step:
step_ingredients.append(document['name'])
for document in proc_cursor:
if isinstance(document['name'], basestring):
if document['name'] in step:
if len(document['name']) > len(longest_match):
longest_match = document['name']
step_procedures.append(document['name'])
cookware_set = set()
for cookware in COOKWARE:
for variation in COOKWARE[cookware]:
if variation in step:
cookware_set.add(unicode(cookware))
step_cookware = list(cookware_set)
#GET TIME AND TEMP FROM STEP
time = recognize_time(step)
temp = recognize_temp(step)
if not step_procedures:
step_procedures = ['placeholder proc']
proc = recipe_classes.Procedure(step_procedures[0], step_ingredients, step_cookware, time, temp)
return proc
def double_action(step):
i = 0
proc_step = _strip_punctuation(step.lower()).split()
ing = []
proc1 = ''
proc2 = ''
flag = ''
ret = []
for word in proc_step:
if word is 'and' or word is 'then':
if word is 'and':
flag = 'a'
elif word is 'then':
flag = 't'
proc1 = proc_step[i-1]
proc2 = proc_step[i+1]
c1 = db.procedures.find({"name":proc1})
c2 = db.procedures.find({"name":proc2})
try:
c1[0]
c2[0]
for ing in proc_step:
cursor = db.ingredients.find({"name":ing})
try:
doc = cursor[0]
ings.append(ing)
except:
pass
except:
pass
i += 1
if flag is 'a':
split_step = step.split('and')
for s in split_step:
for ing in ings:
s += ' '.join(ing)
ret.append(s)
elif flag is 't':
split_step = step.split('then')
for s in split_step:
for ing in ings:
s += ' '.join(ing)
ret.append(s)
return ret
def recognize_time(step):
processed_step = _strip_punctuation(step.lower()).split()
time = ''
for i in xrange(len(processed_step)):
word = processed_step[i]
if word in TIME and i > 0:
prev_word = processed_step[i-1]
if all(map(lambda c: c in '1234567890' or c in punctuation, prev_word)):
time += prev_word + ' ' + word
return time
def recognize_temp(step):
lower_step = step.lower()
if 'degrees' in lower_step:
ind = lower_step.split().index('degrees')
return " ".join(i for i in step.split()[ind-1: ind+2])
elif 'low heat' in lower_step:
return 'low heat'
elif 'medium heat' in lower_step:
return 'medium heat'
elif 'high heat' in lower_step:
return 'high heat'
return ''
## Helper
def _strip_punctuation(string):
return "".join(char for char in string if char not in punctuation)
def contains_procedure(step):
count = 0
step = _strip_punctuation(step.lower()).split()
for w in step:
cursor = db.procedures.find({"name":w})
try:
doc = cursor[0]
count += 1
except:
pass
return count
def main(original_recipe):
# urls = ['http://allrecipes.com/recipe/easy-meatloaf/',
# 'http://allrecipes.com/Recipe/Easy-Garlic-Broiled-Chicken/',
# 'http://allrecipes.com/Recipe/Baked-Lemon-Chicken-with-Mushroom-Sauce/',
# 'http://allrecipes.com/Recipe/Meatball-Nirvana/']
if original_recipe.url:
scraped_ing, scraped_steps = scrape.scrape(original_recipe.url)
# parse ingredient info, create objects
ingredients = []
for ingredient in scraped_ing:
new_ing = parse_ingredient(ingredient)
cursor = db.ingredients.find({"name":new_ing.name})
i = 0
for document in cursor:
i += 1
if i == 0:
# add to DB
db.ingredients.insert({"name":new_ing.name, "category":"????", "flag":"none"})
ingredients.append(new_ing)
steps = []
for step in scraped_steps:
#SPLIT STEP CONTENTS BEFORE PARSING
if not step:
continue # HANDLE EMPTY
# for new_parser
# parsed_steps = parse_step(step)
# for p in parsed_steps:
# steps.append(p)
#for new_parser
step_sent = nltk.sent_tokenize(step)
for sent in step_sent:
if contains_procedure(sent) == 1:
new_proc = parse_step(sent)
steps.append(new_proc)
elif contains_procedure(sent) > 1:
actions = double_action(sent)
if actions:
for a in actions:
new_proc = parse_step(a)
steps.append(new_proc)
if contains_procedure(sent) == 2:
break
clause = sent.split(';')
for c in clause:
if contains_procedure(c) == 1:
new_proc = parse_step(c)
steps.append(new_proc)
elif contains_procedure(c) > 1:
more_clause = c.split(',')
for more_c in more_clause:
if contains_procedure(more_c) == 1:
new_proc = parse_step(more_c)
steps.append(new_proc)
elif contains_procedure(more_c) > 1:
actions = double_action(more_c)
if actions:
for a in actions:
new_proc = parse_step(a)
steps.append(new_proc)
if contains_procedure(more_c) == 2:
break
else:
new_proc = parse_step(more_c)
steps.append(new_proc)
original_recipe.in_list = ingredients
original_recipe.pr_list = steps
#call transform etc
reconstruction.reconstruct(original_recipe)
r = original_recipe
try:
transformed_recipe = transform.transform(r)
except RuntimeError:
return [original_recipe, Recipe()]
#if transformed_recipe == original_recipe:
# print "There are no changes to be made"
#else:
reconstruction.reconstruct(transformed_recipe)
return [original_recipe, transformed_recipe]
# if __name__ == "__main__":
# main()
| 34.675159 | 100 | 0.558597 |
832e7208be27188e8ce63f2cc78843b68612bb20 | 414 | py | Python | neurospyke/__init__.py | RebeccaClarkson/NeuroSpyke | 750090062939738add8bc20a1ee3cae768a66d58 | [
"FSFAP"
] | null | null | null | neurospyke/__init__.py | RebeccaClarkson/NeuroSpyke | 750090062939738add8bc20a1ee3cae768a66d58 | [
"FSFAP"
] | null | null | null | neurospyke/__init__.py | RebeccaClarkson/NeuroSpyke | 750090062939738add8bc20a1ee3cae768a66d58 | [
"FSFAP"
] | null | null | null | # set up module defaults across project
import numpy as np
print("Setting defaults for numpy")
np.set_printoptions(precision=2, linewidth=40, suppress=True)
import matplotlib as mpl
print("Setting defaults for matplotlib")
mpl.use('TkAgg')
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False
import pandas as pd
print("Settings defaults for pandas")
pd.set_option('precision', 3)
| 25.875 | 61 | 0.770531 |
7941033c88397f8adf0c96551952397941d27dd0 | 495 | py | Python | registration/forms.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 1,758 | 2017-04-21T08:42:59.000Z | 2022-03-09T22:58:53.000Z | registration/forms.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 172 | 2017-04-23T21:30:03.000Z | 2022-02-10T20:10:06.000Z | registration/forms.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 138 | 2017-04-23T23:02:16.000Z | 2022-03-25T04:44:19.000Z | from django.contrib.auth.forms import PasswordResetForm
from conf.models import Site
from conf.utils import current_site_id
class TimestrapPasswordResetForm(PasswordResetForm):
"""
Override the 'domain' and 'site_name' email context variables to use the
current site.
"""
def save(self, **kwargs):
site = Site.objects.get(id=current_site_id())
kwargs["extra_email_context"] = {"domain": site.domain, "site_name": site.name}
super().save(**kwargs)
| 29.117647 | 87 | 0.705051 |
d83ebc8d48c8223b3cc422c5a57fc1c8d5c90505 | 2,927 | py | Python | tests/integration/frameworks/test_torchscript_impl.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | tests/integration/frameworks/test_torchscript_impl.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | tests/integration/frameworks/test_torchscript_impl.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import pytest
import torch.nn as nn
import bentoml
from tests.utils.helpers import assert_have_file_extension
from tests.utils.frameworks.pytorch_utils import test_df
from tests.utils.frameworks.pytorch_utils import predict_df
from tests.utils.frameworks.pytorch_utils import LinearModel
@pytest.fixture(scope="module")
def models():
def _(test_type, labels=None, custom_objects=None):
_model: nn.Module = LinearModel()
if "trace" in test_type:
tracing_inp = torch.ones(5)
model = torch.jit.trace(_model, tracing_inp)
else:
model = torch.jit.script(_model)
tag = bentoml.torchscript.save(
"torchscript_test", model, labels=labels, custom_objects=custom_objects
)
return tag
return _
@pytest.mark.parametrize("test_type", ["tracedmodel", "scriptedmodel"])
def test_torchscript_save_load(test_type, models):
labels = {"stage": "dev"}
def custom_f(x: int) -> int:
return x + 1
tag = models(test_type, labels=labels, custom_objects={"func": custom_f})
bentomodel = bentoml.models.get(tag)
assert_have_file_extension(bentomodel.path, ".pt")
assert bentomodel.info.context.get("model_format") == "torchscript:v1"
for k in labels.keys():
assert labels[k] == bentomodel.info.labels[k]
assert bentomodel.custom_objects["func"](3) == custom_f(3)
torchscript_loaded: nn.Module = bentoml.torchscript.load(tag)
assert predict_df(torchscript_loaded, test_df) == 5.0
@pytest.mark.gpus
@pytest.mark.parametrize("dev", ["cpu", "cuda", "cuda:0"])
@pytest.mark.parametrize("test_type", ["tracedmodel", "scriptedmodel"])
def test_torchscript_save_load_across_devices(dev, test_type, models):
def is_cuda(model):
return next(model.parameters()).is_cuda
tag = models(test_type)
loaded = bentoml.torchscript.load(tag)
if dev == "cpu":
assert not is_cuda(loaded)
else:
assert is_cuda(loaded)
@pytest.mark.parametrize(
"input_data",
[
test_df.to_numpy().astype(np.float32),
torch.from_numpy(test_df.to_numpy().astype(np.float32)),
],
)
@pytest.mark.parametrize("test_type", ["tracedmodel", "scriptedmodel"])
def test_torchscript_runner_setup_run_batch(input_data, models, test_type):
tag = models(test_type)
runner = bentoml.torchscript.load_runner(tag)
assert tag in runner.required_models
assert runner.num_replica == 1
res = runner.run_batch(input_data)
assert res.unsqueeze(dim=0).item() == 5.0
@pytest.mark.gpus
@pytest.mark.parametrize("dev", ["cuda", "cuda:0"])
@pytest.mark.parametrize("test_type", ["tracedmodel", "scriptedmodel"])
def test_torchscript_runner_setup_on_gpu(dev, models, test_type):
tag = models(test_type)
runner = bentoml.torchscript.load_runner(tag)
assert torch.cuda.device_count() == runner.num_replica
| 31.815217 | 83 | 0.703109 |
2d4a5fe0c8e888a47da9ae2365c8adb59f34216c | 1,072 | py | Python | test/test_tmep_integration.py | Josef-Friedrich/mutagen-renamer | 3df494f04dc74b4ed5a70150502756ba80a81cd4 | [
"MIT"
] | 6 | 2020-03-09T10:23:32.000Z | 2022-03-14T21:29:15.000Z | test/test_tmep_integration.py | Josef-Friedrich/mutagen-renamer | 3df494f04dc74b4ed5a70150502756ba80a81cd4 | [
"MIT"
] | 10 | 2022-02-02T17:09:08.000Z | 2022-03-23T22:19:47.000Z | test/test_tmep_integration.py | Josef-Friedrich/mutagen-renamer | 3df494f04dc74b4ed5a70150502756ba80a81cd4 | [
"MIT"
] | 1 | 2022-02-06T14:06:30.000Z | 2022-02-06T14:06:30.000Z | """Test the integration of the python package “tmep”."""
import unittest
import helper
class TestFunctions(unittest.TestCase):
def test_ifdefempty_empty_existent_field(self):
out = helper.call_bin('--dry-run', '--format',
'%ifdefempty{mb_workid,_empty_,_notempty_}',
helper.get_testfile('files', 'album.mp3'))
self.assertTrue('_empty_' in str(out))
def test_ifdefempty_empty_nonexistent_field(self):
out = helper.call_bin('--dry-run', '--format',
'%ifdefempty{xxx,_empty_,_notempty_}',
helper.get_testfile('files', 'album.mp3'))
self.assertTrue('_empty_' in str(out))
def test_ifdefempty_notempty(self):
out = helper.call_bin('--dry-run', '--format',
'%ifdefempty{title,_empty_,_notempty_}',
helper.get_testfile('files', 'album.mp3'))
self.assertTrue('_notempty_' in str(out))
if __name__ == '__main__':
unittest.main()
| 35.733333 | 74 | 0.576493 |
c8a2793e61fe3a7d19043419ada42edc90cb5228 | 7,448 | py | Python | setup.py | jawaff/python-sfml | 135d95d7da45478b352771073bf73789997e9047 | [
"Zlib"
] | 2 | 2018-10-24T15:40:41.000Z | 2018-12-16T12:34:24.000Z | setup.py | jawaff/python-sfml | 135d95d7da45478b352771073bf73789997e9047 | [
"Zlib"
] | null | null | null | setup.py | jawaff/python-sfml | 135d95d7da45478b352771073bf73789997e9047 | [
"Zlib"
] | null | null | null | import sys, os, platform
import os.path, shutil
from glob import glob
import versioneer
from subprocess import call
from setuptools import setup, Command, Extension
try:
from Cython.Distutils import build_ext
except ImportError:
print("Please install cython and try again.")
raise SystemExit
if platform.architecture()[0] == "32bit":
arch = "x86"
elif platform.architecture()[0] == "64bit":
arch = "x64"
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
class CythonBuildExt(build_ext):
""" Updated version of cython build_ext command to deal with the
generated API headers. C/C++ header files are all moved to the
temporary build directory before being properly installed on
the system.
"""
def cython_sources(self, sources, extension):
# cythonize .pxd source files
ret = build_ext.cython_sources(self, sources, extension)
# should result the module name; e.g, graphics[.pyx]
module = os.path.basename(sources[0])[:-4]
# prepare a list with all header files related to the module (*.hpp, *_api.h, *.h)
header_files = glob(os.path.join(SCRIPT_DIR, 'src', 'sfml', module, '*.hpp'))
header_files.append(os.path.join(SCRIPT_DIR, 'src', 'sfml', module, module + '.h'))
header_files.append(os.path.join(SCRIPT_DIR, 'src', 'sfml', module, module + '_api.h'))
# deal with exceptions
if module == "network":
header_files.remove(os.path.join(SCRIPT_DIR, 'src', 'sfml', module, module + '.h'))
header_files.remove(os.path.join(SCRIPT_DIR, 'src', 'sfml', module, module + '_api.h'))
# create the temporary destination in the build directory
destination = os.path.join(self.build_temp, 'include', 'pysfml', module)
if not os.path.exists(destination):
os.makedirs(destination)
# move all header files to the build directory
for header_file in header_files:
if os.path.isfile(header_file):
try:
shutil.copy(header_file, destination)
except shutil.Error:
pass
# add the temporary header directory to compilation options
self.compiler.include_dirs.append(os.path.join(self.build_temp, 'include'))
# update data_files to install the files on the system
# On Windows: C:\Python27\include\pysfml\*_api.h
# On Unix: /usr/local/include/pysfml/*_api.h
install_directory = os.path.join(sys.exec_prefix, 'include', 'pysfml', module)
files_to_install = [os.path.join(self.build_temp, 'include', 'pysfml', module, os.path.basename(header_file)) for header_file in header_files]
data_files.append((install_directory, files_to_install))
return ret
modules = ['system', 'window', 'graphics', 'audio', 'network']
extension = lambda name, files, libs: Extension(
name='sfml.' + name,
sources= [os.path.join(SCRIPT_DIR, 'src', 'sfml', name, filename) for filename in files],
include_dirs=[os.path.join(SCRIPT_DIR, 'include', 'Includes'), os.path.join(SCRIPT_DIR, 'extlibs', 'SFML-2.3.2', 'include')],
library_dirs=[os.path.join(SCRIPT_DIR, 'extlibs', 'SFML-2.3.2', 'lib'), os.path.join(SCRIPT_DIR, 'extlibs', 'libs-msvc-universal', arch)] if sys.hexversion >= 0x03050000 else [os.path.join(SCRIPT_DIR, 'extlibs', 'SFML-2.3.2', 'lib')],
language='c++',
libraries=libs,
define_macros=[('SFML_STATIC', '1')] if platform.system() == 'Windows' else [])
if platform.system() == 'Windows':
system_libs = ['winmm', 'sfml-system-s']
window_libs = ['user32', 'advapi32', 'winmm', 'sfml-system-s', 'gdi32', 'opengl32', 'sfml-window-s']
graphics_libs = ['user32', 'advapi32', 'winmm', 'sfml-system-s', 'gdi32', 'opengl32', 'sfml-window-s', 'freetype', 'jpeg', 'sfml-graphics-s']
audio_libs = ['winmm', 'sfml-system-s', 'flac', 'vorbisenc', 'vorbisfile', 'vorbis', 'ogg', 'openal32', 'sfml-audio-s']
network_libs = ['ws2_32', 'sfml-system-s', 'sfml-network-s']
else:
system_libs = ['sfml-system']
window_libs = ['sfml-system', 'sfml-window']
graphics_libs = ['sfml-system', 'sfml-window', 'sfml-graphics']
audio_libs = ['sfml-system', 'sfml-audio']
network_libs = ['sfml-system', 'sfml-network']
system = extension(
'system',
['system.pyx', 'error.cpp', 'hacks.cpp', 'NumericObject.cpp'],
system_libs)
window = extension(
'window',
['window.pyx', 'DerivableWindow.cpp'],
window_libs)
graphics = extension(
'graphics',
['graphics.pyx', 'DerivableRenderWindow.cpp', 'DerivableDrawable.cpp', 'NumericObject.cpp'],
graphics_libs)
audio = extension(
'audio',
['audio.pyx', 'DerivableSoundRecorder.cpp', 'DerivableSoundStream.cpp'],
audio_libs)
network = extension(
'network',
['network.pyx'],
network_libs)
major, minor, _, _ , _ = sys.version_info
with open('README.md', 'r') as f:
long_description = f.read()
ext_modules=[system, window, graphics, audio, network]
install_requires = []
data_files = []
if sys.version_info < (3, 4):
install_requires.append('enum34')
# Sets GCC environment variables for assumed SFML location -- scripts/sfml_install.sh uses that location.
# This is required for deployment from Travis.
if platform.system() == "Linux":
os.environ["CPPFLAGS"]="-I{}".format(os.path.join(SCRIPT_DIR, 'extlibs/SFML-2.3.2/include'))
os.environ["LIBRARY_PATH"]=os.path.join(SCRIPT_DIR, "extlibs/SFML-2.3.2/lib")
print("CPPFLAGS:", os.environ["CPPFLAGS"])
print("LIBRARY_PATH:", os.environ["LIBRARY_PATH"])
print("ls $ROOT_DIR/", os.listdir(SCRIPT_DIR))
print("ls $ROOT_DIR/extlibs/", os.listdir(os.path.join(SCRIPT_DIR, 'extlibs')))
kwargs = dict(
name='python-sfml',
ext_modules=ext_modules,
package_dir={'': 'src'},
packages=['sfml'],
data_files=data_files,
version=versioneer.get_version(),
description='Python bindings for SFML',
long_description=long_description,
author='Jonathan de Wachter',
author_email='dewachter.jonathan@gmail.com',
url='http://python-sfml.org',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: zlib/libpng License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Cython',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Games/Entertainment',
'Topic :: Multimedia',
'Topic :: Software Development :: Libraries :: Python Modules'],
keywords='sfml SFML simple fast multimedia system window graphics audio network pySFML PySFML python-sfml',
install_requires=install_requires,
cmdclass={
'build_ext': CythonBuildExt,
'version': versioneer.get_cmdclass(),
})
setup(**kwargs)
| 41.377778 | 238 | 0.62312 |
203ceca59e5438cfaaba24a8cca3330f82881a39 | 4,463 | py | Python | gjnn/lda.py | gnebbia/gj-deep-forecasts | ce8cfa74ee3e2b8b77fa5fb3e674070b5a0c5423 | [
"BSD-3-Clause"
] | null | null | null | gjnn/lda.py | gnebbia/gj-deep-forecasts | ce8cfa74ee3e2b8b77fa5fb3e674070b5a0c5423 | [
"BSD-3-Clause"
] | null | null | null | gjnn/lda.py | gnebbia/gj-deep-forecasts | ce8cfa74ee3e2b8b77fa5fb3e674070b5a0c5423 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import re
import gensim
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import logging
logger = logging.getLogger(__name__)
# create sample documents
def get_questions_text(questions_df):
# Given a pandas dataframe containing the questions, this function merges text and descriptions
# filename: this is the name of the csv file containing the questions
# the return value is an array containing the merge of text and description for each question
doc_desc = questions_df.q_desc.values.tolist()
doc_quest = questions_df.q_text.values.tolist()
doc_set = []
for i, v in zip(doc_quest, doc_desc):
doc_set.append(i + v)
return doc_set
def preprocess_text(doc_set):
# This function removes the stopwords and useless words/strings
# doc_set: this is an array containing the question data, each element is a question
# the return value is the array of question data but cleaned from all the stopwords and words we do not want
list_of_removal_words = ['\'s','e.g.','(', ')','-','_',',',';',':','i.e.','*','.','\'']
list_of_removal_regex = [r"http\S+", r"Http\S+", r"HTTP\S+", r"www\S+", r"WWW\S+"]
stopwords = ['will','\'s','e.g.,','i.e.,']
for i in range(len(doc_set)):
question = doc_set[i]
for string in list_of_removal_words:
question = question.replace(string, " ")
for regex in list_of_removal_regex:
question = re.sub(regex, "", question)
querywords = question.split()
resultwords = [word for word in querywords if word.lower() not in stopwords]
doc_set[i] = ' '.join(resultwords)
return doc_set
def get_corpus(doc_set):
# Gives corpus of text in output after having applied tokenization and stem-ization
# doc_set: is the set of documents already cleaned after a preprocessing (preprocess text)
# return value is an array of corpus text which are cleaned tokenized and stemmed
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = get_stop_words('en')
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
texts = []
# loop through document list
for i in doc_set:
# clean and tokenize document string
raw = i.lower()
tokens = tokenizer.tokenize(raw)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem tokens
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
# add tokens to list
texts.append(stemmed_tokens)
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
# convert tokenized documents into a document-term matrix
corpus = [dictionary.doc2bow(text) for text in texts]
return corpus, dictionary
def generate_lda_model(num_topics, corpus, passes, dictionary):
# generate LDA model
num_topics = 6
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word =
dictionary, passes=20)
return ldamodel
def save_topics_to_file(filename, num_words = 20):
with open(filename, 'a') as out:
for i in ldamodel.print_topics(num_words=20):
out.write(str(i) + '\n')
if __name__ == "__main__":
filename_in = "data/ifps.csv"
filename_out_topics = "data/topics_structure.txt"
questions = pd.read_csv(filename_in, sep=None, engine='python')
doc_set = get_questions_text(questions)
doc_set = preprocess_text(doc_set)
corpus, dictionary = get_corpus(doc_set)
num_topics = 7
passes = 20
ldamodel = generate_lda_model(num_topics, corpus, passes, dictionary)
save_topics_to_file(filename_out_topics)
for i in range(num_topics):
questions['topic_' + str(i)] = 0
# Let's see how the model assigns topic on a new question string
doc_lda = []
for i in range(len(doc_set)):
#print("The question is {}".format(doc_set[i]))
doc_lda.append(ldamodel[corpus[i]])
#print("The result is {}".format(doc_lda))
for index, row in questions.iterrows():
for i in doc_lda[index]:
questions.loc[index,'topic_' + str(i[0])] = i[1]
questions.to_csv('data/questions_w_topics.csv', index=False)
| 31.878571 | 108 | 0.674434 |
79ab1e99e2e577b818c358facb36fb94379026a2 | 801 | py | Python | python/code_challenges/class_30/test_hash.py | Leenhazaimeh/data-structures-and-algorithms | d55d55bf8c98e768cb929326b5ec8c18fb5c8384 | [
"MIT"
] | null | null | null | python/code_challenges/class_30/test_hash.py | Leenhazaimeh/data-structures-and-algorithms | d55d55bf8c98e768cb929326b5ec8c18fb5c8384 | [
"MIT"
] | 10 | 2021-07-29T18:56:48.000Z | 2021-09-11T19:11:00.000Z | python/code_challenges/class_30/test_hash.py | Leenhazaimeh/data-structures-and-algorithms | d55d55bf8c98e768cb929326b5ec8c18fb5c8384 | [
"MIT"
] | 3 | 2021-08-16T06:16:37.000Z | 2021-12-05T14:29:51.000Z | import pytest
from hash_tabel import *
def test_hashtable():
test=HashTable()
test.add("hash",3333)
test.add("tabel",4444)
return test
def test_add():
test=HashTable()
test.add("map",5555)
assert test.contains('map')==True
def test_hash():
test=HashTable()
assert test.hash("leen")==700
def test_git(test_hashtable):
test=test_hashtable
assert test.git("hash")==3333
assert test.git("tabel") ==4444
def test_not_git(test_hashtable):
test=test_hashtable
assert test.git("test") == None
def test_contains(test_hashtable):
test=test_hashtable
assert test.contains("hash")
assert test.contains("tabel")
def test_retrieve():
test=HashTable(3)
test.add("map",5555)
test.add("leen",7777)
assert test.git("leen")==7777
| 19.071429 | 35 | 0.672909 |
cee989bc2bb9eefb24a90e6640f54a1389214631 | 3,642 | py | Python | dogpile/cache/backends/memory.py | SandySalvatore/dogpile_cache_release | 863b35becbf741bba89ddbf2700b7667298bf3d3 | [
"BSD-3-Clause"
] | 152 | 2015-01-06T00:56:19.000Z | 2022-03-11T21:08:32.000Z | dogpile/cache/backends/memory.py | SandySalvatore/dogpile_cache_release | 863b35becbf741bba89ddbf2700b7667298bf3d3 | [
"BSD-3-Clause"
] | 25 | 2015-01-19T15:57:43.000Z | 2020-06-29T08:51:22.000Z | dogpile/cache/backends/memory.py | SandySalvatore/dogpile_cache_release | 863b35becbf741bba89ddbf2700b7667298bf3d3 | [
"BSD-3-Clause"
] | 34 | 2015-02-10T01:45:14.000Z | 2022-01-03T13:52:35.000Z | """
Memory Backends
---------------
Provides simple dictionary-based backends.
The two backends are :class:`.MemoryBackend` and :class:`.MemoryPickleBackend`;
the latter applies a serialization step to cached values while the former
places the value as given into the dictionary.
"""
from dogpile.cache.api import CacheBackend, NO_VALUE
from dogpile.cache.compat import pickle
class MemoryBackend(CacheBackend):
"""A backend that uses a plain dictionary.
There is no size management, and values which
are placed into the dictionary will remain
until explicitly removed. Note that
Dogpile's expiration of items is based on
timestamps and does not remove them from
the cache.
E.g.::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.memory'
)
To use a Python dictionary of your choosing,
it can be passed in with the ``cache_dict``
argument::
my_dictionary = {}
region = make_region().configure(
'dogpile.cache.memory',
arguments={
"cache_dict":my_dictionary
}
)
"""
pickle_values = False
def __init__(self, arguments):
self._cache = arguments.pop("cache_dict", {})
def get(self, key):
value = self._cache.get(key, NO_VALUE)
if value is not NO_VALUE and self.pickle_values:
value = pickle.loads(value)
return value
def get_multi(self, keys):
ret = [self._cache.get(key, NO_VALUE)
for key in keys]
if self.pickle_values:
ret = [
pickle.loads(value)
if value is not NO_VALUE else value
for value in ret
]
return ret
def set(self, key, value):
if self.pickle_values:
value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = value
def set_multi(self, mapping):
pickle_values = self.pickle_values
for key, value in mapping.items():
if pickle_values:
value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = value
def delete(self, key):
self._cache.pop(key, None)
def delete_multi(self, keys):
for key in keys:
self._cache.pop(key, None)
class MemoryPickleBackend(MemoryBackend):
"""A backend that uses a plain dictionary, but serializes objects on
:meth:`.MemoryBackend.set` and deserializes :meth:`.MemoryBackend.get`.
E.g.::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.memory_pickle'
)
The usage of pickle to serialize cached values allows an object
as placed in the cache to be a copy of the original given object, so
that any subsequent changes to the given object aren't reflected
in the cached value, thus making the backend behave the same way
as other backends which make use of serialization.
The serialization is performed via pickle, and incurs the same
performance hit in doing so as that of other backends; in this way
the :class:`.MemoryPickleBackend` performance is somewhere in between
that of the pure :class:`.MemoryBackend` and the remote server oriented
backends such as that of Memcached or Redis.
Pickle behavior here is the same as that of the Redis backend, using
either ``cPickle`` or ``pickle`` and specifying ``HIGHEST_PROTOCOL``
upon serialize.
.. versionadded:: 0.5.3
"""
pickle_values = True
| 29.609756 | 79 | 0.644701 |
7b6aafb9ee7cd5ec4f3dc385c93e4f53fdd99ca3 | 145 | py | Python | pycs/misc/types.py | sfarrens/cosmostat | a475315cda06dca346095a1e83cb6ad23979acae | [
"MIT"
] | 3 | 2021-02-09T05:03:24.000Z | 2021-11-26T10:20:02.000Z | pycs/misc/types.py | sfarrens/cosmostat | a475315cda06dca346095a1e83cb6ad23979acae | [
"MIT"
] | 8 | 2020-04-28T17:09:50.000Z | 2022-02-01T16:24:43.000Z | pycs/misc/types.py | sfarrens/cosmostat | a475315cda06dca346095a1e83cb6ad23979acae | [
"MIT"
] | 3 | 2020-06-22T07:53:00.000Z | 2021-02-10T19:59:53.000Z | # -*- coding: utf-8 -*-
"""TYPE HANDLING ROUTINES
This module contains methods for handing object types.
"""
from modopt.base.types import *
| 14.5 | 54 | 0.696552 |
fce135424f62df8a659f9049811ce07a6bd607ea | 5,753 | py | Python | tests/rough_clustering_tests.py | geofizx/rough-clustering | 9ab5da8bbc26fc062a15fe9732c3fadac9e77fdf | [
"MIT"
] | 6 | 2019-10-28T12:14:42.000Z | 2021-11-28T18:17:00.000Z | tests/rough_clustering_tests.py | geofizx/rough-clustering | 9ab5da8bbc26fc062a15fe9732c3fadac9e77fdf | [
"MIT"
] | null | null | null | tests/rough_clustering_tests.py | geofizx/rough-clustering | 9ab5da8bbc26fc062a15fe9732c3fadac9e77fdf | [
"MIT"
] | 3 | 2020-12-28T01:54:14.000Z | 2021-11-28T18:17:13.000Z | #!/usr/bin/env python2.7
# encoding: utf-8
"""
Some unit tests and usage examples for rough set clustering class
@data UCI Statlog Data Set:
Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA:
University of California, School of Information and Computer Science.
@author Michael Tompkins
@copyright 2016
"""
# Externals
import time
import json
from collections import Counter
import matplotlib.pyplot as plt
import numpy as npy
from scipy.cluster.vq import kmeans2
# Package level imports from /code
from code import RoughCluster,RoughKMeans
# Set some rough clustering parameters
maxD = 20 # if None, maxD will be determined by algorithm
max_clusters = 2 # Number of clusters to return
# Load some data
file2 = open("german_all.json","r")
data = json.load(file2)
print data.keys()
# Do some numerical encoding for input payload
header = []
data2 = {}
for key in data["payload"].keys():
header.append(key)
try:
data2[key] = [int(data["payload"][key][m]) for m in range(0,len(data["payload"][key]))]
if key == "amount":
data2[key] = []
for n in range(len(data["payload"][key])):
bins = [0,1500,3000,8000,20000]
for i,val in enumerate(bins[0:-1]):
if (int(data["payload"][key][n])) >= val and (int(data["payload"][key][n]) < bins[i+1]):
data2[key].append(i+1)
except:
data2[key] = []
encoding = {key : m for m,key in enumerate(Counter(data["payload"][key]).keys())}
for n in range(len(data["payload"][key])):
data2[key].append(encoding[data["payload"][key][n]])
# Instantiate and run rough clustering
t1 = time.time()
clust = RoughCluster(data2,max_clusters,"ratio",maxD)
clust.get_entity_distances()
clust.enumerate_clusters()
clust.prune_clusters(optimize=True)
t2 = time.time()
print "Rough Set Clustering Took: ",t2-t1," secs"
# Run rough kmeans as well
clstrk = RoughKMeans(data2,2,0.75,0.25,1.2)
clstrk.get_rough_clusters()
t3 = time.time()
print "Rough Kmeans Clustering Took: ",t3-t2," secs"
print "rough kmeans",clstrk.centroids
# Compare results with known centroid mean and std deviations as well as those from k-means
# Print stats for members of clusters
# Determine labels from known classes for "good" and "bad" credit risk
list1 = [i for i in range(len(data["response"])) if data["response"][i] == '1']
list2 = [i for i in range(len(data["response"])) if data["response"][i] == '2']
tableau_lists = []
tableau_1 = []
tableau_2 = []
for key in header:
tableau_lists.append(data2[key][:])
tableau_1.append(npy.asarray(data2[key])[list1].tolist())
tableau_2.append(npy.asarray(data2[key])[list2].tolist())
datav = npy.asfarray(tableau_lists).T
data1 = npy.asarray(tableau_1).T
data3 = npy.asarray(tableau_2).T
mean1 = npy.mean(data1,axis=0)
mean2 = npy.mean(data3,axis=0)
std1 = npy.std(data1,axis=0)
std2 = npy.std(data3,axis=0)
# Just run k-means to compare centroid mean and std deviations
[centroids,groups] = kmeans2(datav,2,iter=20)
print "kmeans",centroids
meank = [[] for g in range(2)]
val = [[] for n in range(len(groups))]
for m in range(len(groups)):
for n in range(2):
if groups[m] == n:
val[n].append(data["response"][m])
meank[n].append(datav[int(val[n][-1]),:])
meankp = []
stddevk = []
for n in range(2):
meankp.append(npy.mean(meank[n],axis=0))
stddevk.append(npy.std(meank[n],axis=0))
# Compile stats for rough clusters and plot results with known class statistics and kmeans results
resultsm = []
resultss = []
rangek = [l+.2 for l in range(20)]
ranger = [l+.1 for l in range(20)]
print "total instances",Counter(data["response"])
key1 = clust.opt_d # Optimal distance D to plot
fig, axs = plt.subplots(nrows=1,ncols=1)
axs.errorbar(range(20),mean2,fmt='ro',yerr=std2,label="True Good Mean+-Std Dev")
plt.hold(True)
plt.title("Comparison of Mean +- Std Deviation for 'Good' and 'Bad' Credit Risk Clustering")
axs.errorbar(range(20),mean1,fmt='bo',yerr=std1,label="True Bad Mean+-Std Dev")
axs.errorbar(rangek,meankp[1],fmt='r+',yerr=stddevk[0],label="Kmeans 0 Class")
axs.errorbar(rangek,meankp[0],fmt='b+',yerr=stddevk[1],label="Kmeans 1 Class")
print "Optimal Clusters",clust.pruned[key1]
print "Optimal Intra-Entity Distance",clust.opt_d
markers = ['bv','rv','gv','kv']
ct = 0
for key in clust.pruned[key1]["cluster_list"][max_clusters]:
datav2 = []
meant = []
stdt = []
for val in clust.pruned[key1]["cluster_list"][max_clusters][key]:
meant.append(data["response"][int(val)])
datav2.append(datav[int(val),:])
tmp = npy.mean(npy.asarray(datav2),axis=0)
tmp2 = npy.std(npy.asarray(datav2),axis=0)
axs.errorbar(ranger,tmp,fmt=markers[ct],yerr=tmp2,label="Rough Cluster D="+str(key1)+" Cluster:"+str(key))
resultsm.append(npy.mean(npy.asarray(datav2),axis=0))
resultss.append(npy.std((npy.asarray(datav2)),axis=0))
print key1,key,len(meant),Counter(meant)
ct += 1
print "kmeans groups",len([i for i in groups if i == 0]),len([i for i in groups if i == 1])
print "Rough kmeans groups",len(clstrk.clusters['0']["upper"]),len(clstrk.clusters['1']["upper"])
print "Cluster 0 vs Target 0",len(set(clstrk.clusters['0']["upper"]).intersection(set(list1)))
print "Cluster 1 vs Target 1",len(set(clstrk.clusters['1']["upper"]).intersection(set(list2)))
print "Cluster 0 vs Target 1",len(set(clstrk.clusters['0']["upper"]).intersection(set(list2)))
print "Cluster 1 vs Target 0",len(set(clstrk.clusters['1']["upper"]).intersection(set(list1)))
plt.axis([0,20,-2,7])
plt.xlabel("Feature Number",fontsize=14)
plt.ylabel("Centroid Mean +- Std Dev.",fontsize=14)
plt.legend()
plt.show()
| 37.357143 | 110 | 0.678429 |
47647450eb011c1188dc4a11fa1f2dcfe3a2f996 | 2,588 | py | Python | utils/boolmask.py | rdjdejong/attention-learn-to-route | 3b6bbdad677a36df53eabad98b48f436be298ac8 | [
"MIT"
] | 540 | 2019-02-07T13:52:30.000Z | 2022-03-31T12:51:46.000Z | utils/boolmask.py | rdjdejong/attention-learn-to-route | 3b6bbdad677a36df53eabad98b48f436be298ac8 | [
"MIT"
] | 40 | 2019-02-06T17:57:11.000Z | 2022-03-18T12:18:48.000Z | utils/boolmask.py | rdjdejong/attention-learn-to-route | 3b6bbdad677a36df53eabad98b48f436be298ac8 | [
"MIT"
] | 227 | 2019-02-15T09:25:02.000Z | 2022-03-27T10:42:21.000Z | import torch
import torch.nn.functional as F
def _pad_mask(mask):
# By taking -size % 8, we get 0 if exactly divisible by 8
# and required padding otherwise (i.e. -1 % 8 = 7 pad)
pad = -mask.size(-1) % 8
if pad != 0:
mask = F.pad(mask, [0, pad])
return mask, mask.size(-1) // 8
def _mask_bool2byte(mask):
assert mask.dtype == torch.uint8
# assert (mask <= 1).all() # Precondition, disabled for efficiency
mask, d = _pad_mask(mask)
return (mask.view(*mask.size()[:-1], d, 8) << torch.arange(8, out=mask.new())).sum(-1, dtype=torch.uint8)
def _mask_byte2long(mask):
assert mask.dtype == torch.uint8
mask, d = _pad_mask(mask)
# Note this corresponds to a temporary factor 8
# memory overhead by converting to long before summing
# Alternatively, aggregate using for loop
return (mask.view(*mask.size()[:-1], d, 8).long() << (torch.arange(8, dtype=torch.int64, device=mask.device) * 8)).sum(-1)
def mask_bool2long(mask):
assert mask.dtype == torch.uint8
return _mask_byte2long(_mask_bool2byte(mask))
def _mask_long2byte(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] >> (torch.arange(8, out=mask.new()) * 8))[..., :n].to(torch.uint8).view(*mask.size()[:-1], -1)[..., :n]
def _mask_byte2bool(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] & (mask.new_ones(8) << torch.arange(8, out=mask.new()) * 1)).view(*mask.size()[:-1], -1)[..., :n] > 0
def mask_long2bool(mask, n=None):
assert mask.dtype == torch.int64
return _mask_byte2bool(_mask_long2byte(mask), n=n)
def mask_long_scatter(mask, values, check_unset=True):
"""
Sets values in mask in dimension -1 with arbitrary batch dimensions
If values contains -1, nothing is set
Note: does not work for setting multiple values at once (like normal scatter)
"""
assert mask.size()[:-1] == values.size()
rng = torch.arange(mask.size(-1), out=mask.new())
values_ = values[..., None] # Need to broadcast up do mask dim
# This indicates in which value of the mask a bit should be set
where = (values_ >= (rng * 64)) & (values_ < ((rng + 1) * 64))
# Optional: check that bit is not already set
assert not (check_unset and ((mask & (where.long() << (values_ % 64))) > 0).any())
# Set bit by shifting a 1 to the correct position
# (% not strictly necessary as bitshift is cyclic)
# since where is 0 if no value needs to be set, the bitshift has no effect
return mask | (where.long() << (values_ % 64))
| 37.507246 | 131 | 0.633694 |
60079817ffc58cc52c07c3f012e1d15ed0823580 | 37,718 | py | Python | kornia/geometry/transform/affwarp.py | NickleDave/kornia | 5392651d0bc268da577fa0a49aa50f957289c7dd | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/geometry/transform/affwarp.py | NickleDave/kornia | 5392651d0bc268da577fa0a49aa50f957289c7dd | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/geometry/transform/affwarp.py | NickleDave/kornia | 5392651d0bc268da577fa0a49aa50f957289c7dd | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Union, Tuple, Optional
import torch
import torch.nn as nn
from kornia.geometry.transform.imgwarp import (
warp_affine, get_rotation_matrix2d, get_affine_matrix2d
)
from kornia.geometry.transform.projwarp import (
warp_affine3d, get_projective_transform
)
from kornia.utils import _extract_device_dtype
__all__ = [
"affine",
"affine3d",
"scale",
"rotate",
"rotate3d",
"translate",
"shear",
"resize",
"rescale",
"Scale",
"Rotate",
"Translate",
"Shear",
"Resize",
"Rescale",
"Affine",
]
# utilities to compute affine matrices
def _compute_tensor_center(tensor: torch.Tensor) -> torch.Tensor:
"""Computes the center of tensor plane for (H, W), (C, H, W) and (B, C, H, W)."""
assert 2 <= len(tensor.shape) <= 4, f"Must be a 3D tensor as HW, CHW and BCHW. Got {tensor.shape}."
height, width = tensor.shape[-2:]
center_x: float = float(width - 1) / 2
center_y: float = float(height - 1) / 2
center: torch.Tensor = torch.tensor(
[center_x, center_y],
device=tensor.device, dtype=tensor.dtype)
return center
def _compute_tensor_center3d(tensor: torch.Tensor) -> torch.Tensor:
"""Computes the center of tensor plane for (D, H, W), (C, D, H, W) and (B, C, D, H, W)."""
assert 3 <= len(tensor.shape) <= 5, f"Must be a 3D tensor as DHW, CDHW and BCDHW. Got {tensor.shape}."
depth, height, width = tensor.shape[-3:]
center_x: float = float(width - 1) / 2
center_y: float = float(height - 1) / 2
center_z: float = float(depth - 1) / 2
center: torch.Tensor = torch.tensor(
[center_x, center_y, center_z],
device=tensor.device, dtype=tensor.dtype)
return center
def _compute_rotation_matrix(angle: torch.Tensor,
center: torch.Tensor) -> torch.Tensor:
"""Computes a pure affine rotation matrix."""
scale: torch.Tensor = torch.ones_like(center)
matrix: torch.Tensor = get_rotation_matrix2d(center, angle, scale)
return matrix
def _compute_rotation_matrix3d(yaw: torch.Tensor, pitch: torch.Tensor, roll: torch.Tensor,
center: torch.Tensor) -> torch.Tensor:
"""Computes a pure affine rotation matrix."""
if len(yaw.shape) == len(pitch.shape) == len(roll.shape) == 0:
yaw = yaw.unsqueeze(dim=0)
pitch = pitch.unsqueeze(dim=0)
roll = roll.unsqueeze(dim=0)
if len(yaw.shape) == len(pitch.shape) == len(roll.shape) == 1:
yaw = yaw.unsqueeze(dim=1)
pitch = pitch.unsqueeze(dim=1)
roll = roll.unsqueeze(dim=1)
assert len(yaw.shape) == len(pitch.shape) == len(roll.shape) == 2, \
f"Expected yaw, pitch, roll to be (B, 1). Got {yaw.shape}, {pitch.shape}, {roll.shape}."
angles: torch.Tensor = torch.cat([yaw, pitch, roll], dim=1)
scales: torch.Tensor = torch.ones_like(yaw)
matrix: torch.Tensor = get_projective_transform(center, angles, scales)
return matrix
def _compute_translation_matrix(translation: torch.Tensor) -> torch.Tensor:
"""Computes affine matrix for translation."""
matrix: torch.Tensor = torch.eye(
3, device=translation.device, dtype=translation.dtype)
matrix = matrix.repeat(translation.shape[0], 1, 1)
dx, dy = torch.chunk(translation, chunks=2, dim=-1)
matrix[..., 0, 2:3] += dx
matrix[..., 1, 2:3] += dy
return matrix
def _compute_scaling_matrix(scale: torch.Tensor,
center: torch.Tensor) -> torch.Tensor:
"""Computes affine matrix for scaling."""
angle: torch.Tensor = torch.zeros(scale.shape[:1], device=scale.device, dtype=scale.dtype)
matrix: torch.Tensor = get_rotation_matrix2d(center, angle, scale)
return matrix
def _compute_shear_matrix(shear: torch.Tensor) -> torch.Tensor:
"""Computes affine matrix for shearing."""
matrix: torch.Tensor = torch.eye(3, device=shear.device, dtype=shear.dtype)
matrix = matrix.repeat(shear.shape[0], 1, 1)
shx, shy = torch.chunk(shear, chunks=2, dim=-1)
matrix[..., 0, 1:2] += shx
matrix[..., 1, 0:1] += shy
return matrix
# based on:
# https://github.com/anibali/tvl/blob/master/src/tvl/transforms.py#L166
def affine(tensor: torch.Tensor, matrix: torch.Tensor,
mode: str = 'bilinear', padding_mode: str = 'zeros',
align_corners: Optional[bool] = None) -> torch.Tensor:
r"""Apply an affine transformation to the image.
Args:
tensor (torch.Tensor): The image tensor to be warped in shapes of
:math:`(H, W)`, :math:`(D, H, W)` and :math:`(B, C, H, W)`.
matrix (torch.Tensor): The 2x3 affine transformation matrix.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The warped image with the same shape as the input.
Example:
>>> img = torch.rand(1, 2, 3, 5)
>>> aff = torch.eye(2, 3)[None]
>>> out = affine(img, aff)
>>> print(out.shape)
torch.Size([1, 2, 3, 5])
"""
# warping needs data in the shape of BCHW
is_unbatched: bool = tensor.ndimension() == 3
if is_unbatched:
tensor = torch.unsqueeze(tensor, dim=0)
# we enforce broadcasting since by default grid_sample it does not
# give support for that
matrix = matrix.expand(tensor.shape[0], -1, -1)
# warp the input tensor
height: int = tensor.shape[-2]
width: int = tensor.shape[-1]
warped: torch.Tensor = warp_affine(tensor, matrix, (height, width),
mode, padding_mode, align_corners)
# return in the original shape
if is_unbatched:
warped = torch.squeeze(warped, dim=0)
return warped
def affine3d(tensor: torch.Tensor, matrix: torch.Tensor,
mode: str = 'bilinear', padding_mode: str = 'zeros',
align_corners: bool = False) -> torch.Tensor:
r"""Apply an affine transformation to the 3d volume.
Args:
tensor (torch.Tensor): The image tensor to be warped in shapes of
:math:`(D, H, W)`, :math:`(C, D, H, W)` and :math:`(B, C, D, H, W)`.
matrix (torch.Tensor): The affine transformation matrix with shape :math:`(B, 3, 4)`.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: False.
Returns:
torch.Tensor: The warped image.
Example:
>>> img = torch.rand(1, 2, 4, 3, 5)
>>> aff = torch.eye(3, 4)[None]
>>> out = affine3d(img, aff)
>>> print(out.shape)
torch.Size([1, 2, 4, 3, 5])
"""
# warping needs data in the shape of BCDHW
is_unbatched: bool = tensor.ndimension() == 4
if is_unbatched:
tensor = torch.unsqueeze(tensor, dim=0)
# we enforce broadcasting since by default grid_sample it does not
# give support for that
matrix = matrix.expand(tensor.shape[0], -1, -1)
# warp the input tensor
depth: int = tensor.shape[-3]
height: int = tensor.shape[-2]
width: int = tensor.shape[-1]
warped: torch.Tensor = warp_affine3d(tensor, matrix, (depth, height, width),
mode, padding_mode, align_corners)
# return in the original shape
if is_unbatched:
warped = torch.squeeze(warped, dim=0)
return warped
# based on:
# https://github.com/anibali/tvl/blob/master/src/tvl/transforms.py#L185
def rotate(tensor: torch.Tensor, angle: torch.Tensor,
center: Union[None, torch.Tensor] = None,
mode: str = 'bilinear', padding_mode: str = 'zeros',
align_corners: Optional[bool] = None) -> torch.Tensor:
r"""Rotate the tensor anti-clockwise about the centre.
Args:
tensor (torch.Tensor): The image tensor to be warped in shapes of :math:`(B, C, H, W)`.
angle (torch.Tensor): The angle through which to rotate. The tensor
must have a shape of (B), where B is batch size.
center (torch.Tensor): The center through which to rotate. The tensor
must have a shape of (B, 2), where B is batch size and last
dimension contains cx and cy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The rotated tensor with shape as input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> angle = torch.tensor([90.])
>>> out = rotate(img, angle)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input tensor type is not a torch.Tensor. Got {}"
.format(type(tensor)))
if not isinstance(angle, torch.Tensor):
raise TypeError("Input angle type is not a torch.Tensor. Got {}"
.format(type(angle)))
if center is not None and not isinstance(center, torch.Tensor):
raise TypeError("Input center type is not a torch.Tensor. Got {}"
.format(type(center)))
if len(tensor.shape) not in (3, 4,):
raise ValueError("Invalid tensor shape, we expect CxHxW or BxCxHxW. "
"Got: {}".format(tensor.shape))
# compute the rotation center
if center is None:
center = _compute_tensor_center(tensor)
# compute the rotation matrix
# TODO: add broadcasting to get_rotation_matrix2d for center
angle = angle.expand(tensor.shape[0])
center = center.expand(tensor.shape[0], -1)
rotation_matrix: torch.Tensor = _compute_rotation_matrix(angle, center)
# warp using the affine transform
return affine(tensor, rotation_matrix[..., :2, :3], mode, padding_mode, align_corners)
def rotate3d(tensor: torch.Tensor, yaw: torch.Tensor,
pitch: torch.Tensor, roll: torch.Tensor,
center: Union[None, torch.Tensor] = None,
mode: str = 'bilinear', padding_mode: str = 'zeros',
align_corners: bool = False) -> torch.Tensor:
r"""Rotate 3D the tensor anti-clockwise about the centre.
Args:
tensor (torch.Tensor): The image tensor to be warped in shapes of :math:`(B, C, D, H, W)`.
yaw(torch.Tensor): The yaw angle through which to rotate. The tensor
must have a shape of (B), where B is batch size.
pitch (torch.Tensor): The pitch angle through which to rotate. The tensor
must have a shape of (B), where B is batch size.
roll (torch.Tensor): The roll angle through which to rotate. The tensor
must have a shape of (B), where B is batch size.
center (torch.Tensor): The center through which to rotate. The tensor
must have a shape of (B, 2), where B is batch size and last
dimension contains cx and cy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: False.
Returns:
torch.Tensor: The rotated tensor with shape as input.
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input tensor type is not a torch.Tensor. Got {}"
.format(type(tensor)))
if not isinstance(yaw, torch.Tensor):
raise TypeError("yaw is not a torch.Tensor. Got {}".format(type(yaw)))
if not isinstance(pitch, torch.Tensor):
raise TypeError("pitch is not a torch.Tensor. Got {}".format(type(pitch)))
if not isinstance(roll, torch.Tensor):
raise TypeError("roll is not a torch.Tensor. Got {}".format(type(roll)))
if center is not None and not isinstance(center, torch.Tensor):
raise TypeError("Input center type is not a torch.Tensor. Got {}"
.format(type(center)))
if len(tensor.shape) not in (4, 5,):
raise ValueError("Invalid tensor shape, we expect CxDxHxW or BxCxDxHxW. "
"Got: {}".format(tensor.shape))
# compute the rotation center
if center is None:
center = _compute_tensor_center3d(tensor)
# compute the rotation matrix
# TODO: add broadcasting to get_rotation_matrix2d for center
yaw = yaw.expand(tensor.shape[0])
pitch = pitch.expand(tensor.shape[0])
roll = roll.expand(tensor.shape[0])
center = center.expand(tensor.shape[0], -1)
rotation_matrix: torch.Tensor = _compute_rotation_matrix3d(yaw, pitch, roll, center)
# warp using the affine transform
return affine3d(tensor, rotation_matrix[..., :3, :4], mode, padding_mode, align_corners)
def translate(tensor: torch.Tensor, translation: torch.Tensor,
mode: str = 'bilinear', padding_mode: str = 'zeros',
align_corners: Optional[bool] = None) -> torch.Tensor:
r"""Translate the tensor in pixel units.
Args:
tensor (torch.Tensor): The image tensor to be warped in shapes of :math:`(B, C, H, W)`.
translation (torch.Tensor): tensor containing the amount of pixels to
translate in the x and y direction. The tensor must have a shape of
(B, 2), where B is batch size, last dimension contains dx dy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The translated tensor with shape as input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> translation = torch.tensor([[1., 0.]])
>>> out = translate(img, translation)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input tensor type is not a torch.Tensor. Got {}"
.format(type(tensor)))
if not isinstance(translation, torch.Tensor):
raise TypeError("Input translation type is not a torch.Tensor. Got {}"
.format(type(translation)))
if len(tensor.shape) not in (3, 4,):
raise ValueError("Invalid tensor shape, we expect CxHxW or BxCxHxW. "
"Got: {}".format(tensor.shape))
# compute the translation matrix
translation_matrix: torch.Tensor = _compute_translation_matrix(translation)
# warp using the affine transform
return affine(tensor, translation_matrix[..., :2, :3], mode, padding_mode, align_corners)
def scale(tensor: torch.Tensor, scale_factor: torch.Tensor,
center: Union[None, torch.Tensor] = None,
mode: str = 'bilinear', padding_mode: str = 'zeros',
align_corners: Optional[bool] = None) -> torch.Tensor:
r"""Scale the tensor by a factor.
Args:
tensor (torch.Tensor): The image tensor to be warped in shapes of :math:`(B, C, H, W)`.
scale_factor (torch.Tensor): The scale factor apply. The tensor
must have a shape of (B) or (B, 2), where B is batch size.
If (B), isotropic scaling will perform.
If (B, 2), x-y-direction specific scaling will perform.
center (torch.Tensor): The center through which to scale. The tensor
must have a shape of (B, 2), where B is batch size and last
dimension contains cx and cy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The scaled tensor with the same shape as the input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> scale_factor = torch.tensor([[2., 2.]])
>>> out = scale(img, scale_factor)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input tensor type is not a torch.Tensor. Got {}"
.format(type(tensor)))
if not isinstance(scale_factor, torch.Tensor):
raise TypeError("Input scale_factor type is not a torch.Tensor. Got {}"
.format(type(scale_factor)))
if len(scale_factor.shape) == 1:
# convert isotropic scaling to x-y direction
scale_factor = scale_factor.repeat(1, 2)
# compute the tensor center
if center is None:
center = _compute_tensor_center(tensor)
# compute the rotation matrix
# TODO: add broadcasting to get_rotation_matrix2d for center
center = center.expand(tensor.shape[0], -1)
scale_factor = scale_factor.expand(tensor.shape[0], 2)
scaling_matrix: torch.Tensor = _compute_scaling_matrix(scale_factor, center)
# warp using the affine transform
return affine(tensor, scaling_matrix[..., :2, :3], mode, padding_mode, align_corners)
def shear(tensor: torch.Tensor, shear: torch.Tensor,
mode: str = 'bilinear', padding_mode: str = 'zeros',
align_corners: bool = False) -> torch.Tensor:
r"""Shear the tensor.
Args:
tensor (torch.Tensor): The image tensor to be skewed with shape of :math:`(B, C, H, W)`.
shear (torch.Tensor): tensor containing the angle to shear
in the x and y direction. The tensor must have a shape of
(B, 2), where B is batch size, last dimension contains shx shy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The skewed tensor with shape same as the input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> shear_factor = torch.tensor([[0.5, 0.0]])
>>> out = shear(img, shear_factor)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input tensor type is not a torch.Tensor. Got {}"
.format(type(tensor)))
if not isinstance(shear, torch.Tensor):
raise TypeError("Input shear type is not a torch.Tensor. Got {}"
.format(type(shear)))
if len(tensor.shape) not in (3, 4,):
raise ValueError("Invalid tensor shape, we expect CxHxW or BxCxHxW. "
"Got: {}".format(tensor.shape))
# compute the translation matrix
shear_matrix: torch.Tensor = _compute_shear_matrix(shear)
# warp using the affine transform
return affine(tensor, shear_matrix[..., :2, :3], mode, padding_mode, align_corners)
def _side_to_image_size(
side_size: int, aspect_ratio: float, side: str = "short"
) -> Tuple[int, int]:
if side not in ("short", "long", "vert", "horz"):
raise ValueError(f"side can be one of 'short', 'long', 'vert', and 'horz'. Got '{side}'")
if side == "vert":
return side_size, int(side_size * aspect_ratio)
elif side == "horz":
return int(side_size / aspect_ratio), side_size
elif (side == "short") ^ (aspect_ratio < 1.0):
return side_size, int(side_size * aspect_ratio)
else:
return int(side_size / aspect_ratio), side_size
def resize(input: torch.Tensor, size: Union[int, Tuple[int, int]],
interpolation: str = 'bilinear', align_corners: Optional[bool] = None,
side: str = "short") -> torch.Tensor:
r"""Resize the input torch.Tensor to the given size.
Args:
tensor (torch.Tensor): The image tensor to be skewed with shape of :math:`(B, C, H, W)`.
size (int, tuple(int, int)): Desired output size. If size is a sequence like (h, w),
output size will be matched to this. If size is an int, smaller edge of the image will
be matched to this number. i.e, if height > width, then image will be rescaled
to (size * height / width, size)
interpolation (str): algorithm used for upsampling: 'nearest' | 'linear' | 'bilinear' |
'bicubic' | 'trilinear' | 'area'. Default: 'bilinear'.
align_corners(bool): interpolation flag. Default: None. See
https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate for detail
side (str): Corresponding side if ``size`` is an integer. Can be one of ``"short"``, ``"long"``, ``"vert"``,
or ``"horz"``. Defaults to ``"short"``.
Returns:
torch.Tensor: The resized tensor with the shape as the specified size.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> out = resize(img, (6, 8))
>>> print(out.shape)
torch.Size([1, 3, 6, 8])
"""
if not isinstance(input, torch.Tensor):
raise TypeError("Input tensor type is not a torch.Tensor. Got {}"
.format(type(input)))
input_size = h, w = input.shape[-2:]
if isinstance(size, int):
aspect_ratio = w / h
size = _side_to_image_size(size, aspect_ratio, side)
if size == input_size:
return input
return torch.nn.functional.interpolate(input, size=size, mode=interpolation, align_corners=align_corners)
def rescale(
input: torch.Tensor,
factor: Union[float, Tuple[float, float]],
interpolation: str = "bilinear",
align_corners: Optional[bool] = None,
) -> torch.Tensor:
r"""Rescale the input torch.Tensor with the given factor.
Args:
input(torch.Tensor): The image tensor to be scale with shape of :math:`(B, C, H, W)`.
interpolation (str): algorithm used for upsampling: 'nearest' | 'linear' | 'bilinear' |
'bicubic' | 'trilinear' | 'area'. Default: 'bilinear'.
align_corners(bool): interpolation flag. Default: None. See
https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate for detail
side (str): Corresponding side if ``size`` is an integer. Can be one of ``"short"``, ``"long"``, ``"vert"``,
or ``"horz"``. Defaults to ``"short"``.
Returns:
torch.Tensor: The rescaled tensor with the shape as the specified size.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> out = rescale(img, (2, 3))
>>> print(out.shape)
torch.Size([1, 3, 8, 12])
"""
if isinstance(factor, float):
factor_vert = factor_horz = factor
else:
factor_vert, factor_horz = factor
height, width = input.size()[-2:]
size = (int(height * factor_vert), int(width * factor_horz))
return resize(input, size, interpolation=interpolation, align_corners=align_corners)
class Resize(nn.Module):
r"""Resize the input torch.Tensor to the given size.
Args:
size (int, tuple(int, int)): Desired output size. If size is a sequence like (h, w),
output size will be matched to this. If size is an int, smaller edge of the image will
be matched to this number. i.e, if height > width, then image will be rescaled
to (size * height / width, size)
interpolation (str): algorithm used for upsampling: 'nearest' | 'linear' | 'bilinear' |
'bicubic' | 'trilinear' | 'area'. Default: 'bilinear'.
align_corners(bool): interpolation flag. Default: None. See
https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate for detail
side (str): Corresponding side if ``size`` is an integer. Can be one of ``"short"``, ``"long"``, ``"vert"``,
or ``"horz"``. Defaults to ``"short"``.
Returns:
torch.Tensor: The resized tensor with the shape of the given size.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> out = Resize((6, 8))(img)
>>> print(out.shape)
torch.Size([1, 3, 6, 8])
"""
def __init__(self, size: Union[int, Tuple[int, int]], interpolation: str = 'bilinear',
align_corners: Optional[bool] = None, side: str = "short") -> None:
super(Resize, self).__init__()
self.size: Union[int, Tuple[int, int]] = size
self.interpolation: str = interpolation
self.align_corners: Optional[bool] = align_corners
self.side: str = side
def forward(self, input: torch.Tensor) -> torch.Tensor:
return resize(input, self.size, self.interpolation, align_corners=self.align_corners, side=self.side)
class Affine(nn.Module):
r"""Apply multiple elementary affine transforms simultaneously.
Args:
angle (torch.Tensor, optional): Angle in degrees for counter-clockwise rotation around the center. The tensor
must have a shape of (B), where B is the batch size.
translation (torch.Tensor, optional): Amount of pixels for translation in x- and y-direction. The tensor must
have a shape of (B, 2), where B is the batch size and the last dimension contains dx and dy.
scale_factor (torch.Tensor, optional): Factor for scaling. The tensor must have a shape of (B), where B is the
batch size.
shear (torch.Tensor, optional): Angles in degrees for shearing in x- and y-direction around the center. The
tensor must have a shape of (B, 2), where B is the batch size and the last dimension contains sx and sy.
center (torch.Tensor, optional): Transformation center in pixels. The tensor must have a shape of (B, 2), where
B is the batch size and the last dimension contains cx and cy. Defaults to the center of image to be
transformed.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Raises:
RuntimeError: If not one of ``angle``, ``translation``, ``scale_factor``, or ``shear`` is set.
Returns:
torch.Tensor: The transformed tensor with same shape as input.
Example:
>>> img = torch.rand(1, 2, 3, 5)
>>> angle = 90. * torch.rand(1)
>>> out = Affine(angle)(img)
>>> print(out.shape)
torch.Size([1, 2, 3, 5])
"""
def __init__(
self,
angle: Optional[torch.Tensor] = None,
translation: Optional[torch.Tensor] = None,
scale_factor: Optional[torch.Tensor] = None,
shear: Optional[torch.Tensor] = None,
center: Optional[torch.Tensor] = None,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: Optional[bool] = None
) -> None:
batch_sizes = [arg.size()[0] for arg in (angle, translation, scale_factor, shear) if arg is not None]
if not batch_sizes:
msg = (
"Affine was created without any affine parameter. At least one of angle, translation, scale_factor, or "
"shear has to be set."
)
raise RuntimeError(msg)
batch_size = batch_sizes[0]
if not all(other == batch_size for other in batch_sizes[1:]):
raise RuntimeError(f"The batch sizes of the affine parameters mismatch: {batch_sizes}")
self._batch_size = batch_size
super().__init__()
device, dtype = _extract_device_dtype([angle, translation, scale_factor])
if angle is None:
angle = torch.zeros(batch_size, device=device, dtype=dtype)
self.angle = angle
if translation is None:
translation = torch.zeros(batch_size, 2, device=device, dtype=dtype)
self.translation = translation
if scale_factor is None:
scale_factor = torch.ones(batch_size, 2, device=device, dtype=dtype)
self.scale_factor = scale_factor
self.shear = shear
self.center = center
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
def forward(self, input: torch.Tensor) -> torch.Tensor:
if self.shear is None:
sx = sy = None
else:
sx, sy = self.shear[..., 0], self.shear[..., 1]
if self.center is None:
center = _compute_tensor_center(input).expand(input.size()[0], -1)
else:
center = self.center
matrix = get_affine_matrix2d(self.translation, center, self.scale_factor, -self.angle, sx=sx, sy=sy)
return affine(input, matrix[..., :2, :3], self.mode, self.padding_mode, self.align_corners)
class Rescale(nn.Module):
r"""Rescale the input torch.Tensor with the given factor.
Args:
factor (float, tuple(float, float)): Desired scaling factor in each direction. If scalar, the value is used
for both the x- and y-direction.
interpolation (str): Algorithm used for upsampling. Can be one of ``"nearest"``, ``"linear"``, ``"bilinear"``,
``"bicubic"``, ``"trilinear"``, or ``"area"``. Default: ``"bilinear"``.
align_corners(bool): Interpolation flag. Default: None. See :func:`~torch.nn.functional.interpolate` for
details.
Returns:
torch.Tensor: The rescaled tensor with the shape according to the given factor.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> out = Rescale((2, 3))(img)
>>> print(out.shape)
torch.Size([1, 3, 8, 12])
"""
def __init__(
self,
factor: Union[float, Tuple[float, float]],
interpolation: str = "bilinear",
align_corners: Optional[bool] = None
) -> None:
super().__init__()
self.factor: Union[float, Tuple[float, float]] = factor
self.interpolation: str = interpolation
self.align_corners: Optional[bool] = align_corners
def forward(self, input: torch.Tensor) -> torch.Tensor:
return rescale(input, self.factor, self.interpolation, align_corners=self.align_corners)
class Rotate(nn.Module):
r"""Rotate the tensor anti-clockwise about the centre.
Args:
angle (torch.Tensor): The angle through which to rotate. The tensor
must have a shape of (B), where B is batch size.
center (torch.Tensor): The center through which to rotate. The tensor
must have a shape of (B, 2), where B is batch size and last
dimension contains cx and cy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The rotated tensor with the same shape as the input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> angle = torch.tensor([90.])
>>> out = Rotate(angle)(img)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
def __init__(self, angle: torch.Tensor,
center: Union[None, torch.Tensor] = None,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: Optional[bool] = None) -> None:
super(Rotate, self).__init__()
self.angle: torch.Tensor = angle
self.center: Union[None, torch.Tensor] = center
self.mode: str = mode
self.padding_mode: str = padding_mode
self.align_corners: Optional[bool] = align_corners
def forward(self, input: torch.Tensor) -> torch.Tensor:
return rotate(input, self.angle, self.center, self.mode, self.padding_mode, self.align_corners)
class Translate(nn.Module):
r"""Translate the tensor in pixel units.
Args:
translation (torch.Tensor): tensor containing the amount of pixels to
translate in the x and y direction. The tensor must have a shape of
(B, 2), where B is batch size, last dimension contains dx dy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The translated tensor with the same shape as the input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> translation = torch.tensor([[1., 0.]])
>>> out = Translate(translation)(img)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
def __init__(self, translation: torch.Tensor,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: Optional[bool] = None) -> None:
super(Translate, self).__init__()
self.translation: torch.Tensor = translation
self.mode: str = mode
self.padding_mode: str = padding_mode
self.align_corners: Optional[bool] = align_corners
def forward(self, input: torch.Tensor) -> torch.Tensor:
return translate(input, self.translation, self.mode, self.padding_mode, self.align_corners)
class Scale(nn.Module):
r"""Scale the tensor by a factor.
Args:
scale_factor (torch.Tensor): The scale factor apply. The tensor
must have a shape of (B) or (B, 2), where B is batch size.
If (B), isotropic scaling will perform.
If (B, 2), x-y-direction specific scaling will perform.
center (torch.Tensor): The center through which to scale. The tensor
must have a shape of (B, 2), where B is batch size and last
dimension contains cx and cy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: None.
Returns:
torch.Tensor: The scaled tensor with the same shape as the input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> scale_factor = torch.tensor([[2., 2.]])
>>> out = Scale(scale_factor)(img)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
def __init__(self, scale_factor: torch.Tensor,
center: Union[None, torch.Tensor] = None,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: Optional[bool] = None) -> None:
super(Scale, self).__init__()
self.scale_factor: torch.Tensor = scale_factor
self.center: Union[None, torch.Tensor] = center
self.mode: str = mode
self.padding_mode: str = padding_mode
self.align_corners: Optional[bool] = align_corners
def forward(self, input: torch.Tensor) -> torch.Tensor:
return scale(input, self.scale_factor, self.center, self.mode, self.padding_mode, self.align_corners)
class Shear(nn.Module):
r"""Shear the tensor.
Args:
tensor (torch.Tensor): The image tensor to be skewed.
shear (torch.Tensor): tensor containing the angle to shear
in the x and y direction. The tensor must have a shape of
(B, 2), where B is batch size, last dimension contains shx shy.
mode (str): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (str): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
align_corners(bool, optional): interpolation flag. Default: False.
Returns:
torch.Tensor: The skewed tensor with the same shape as the input.
Example:
>>> img = torch.rand(1, 3, 4, 4)
>>> shear_factor = torch.tensor([[0.5, 0.0]])
>>> out = Shear(shear_factor)(img)
>>> print(out.shape)
torch.Size([1, 3, 4, 4])
"""
def __init__(self, shear: torch.Tensor,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False) -> None:
super(Shear, self).__init__()
self.shear: torch.Tensor = shear
self.mode: str = mode
self.padding_mode: str = padding_mode
self.align_corners: bool = align_corners
def forward(self, input: torch.Tensor) -> torch.Tensor:
return shear(input, self.shear, self.mode, self.padding_mode, self.align_corners)
| 41.176856 | 120 | 0.619147 |
5202583833fbf8d07924e1f7355781ee7957dd0e | 182,169 | py | Python | tensorflow/python/framework/ops.py | DHsLc/test | f286c78b619b81ca95ba9f738cc0de4e14440e44 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/ops.py | DHsLc/test | f286c78b619b81ca95ba9f738cc0de4e14440e44 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/ops.py | DHsLc/test | f286c78b619b81ca95ba9f738cc0de4e14440e44 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import linecache
import re
import sys
import threading
from autograd import core as ag_core
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_contextlib
# Temporary global switch determining if we should enable the work-in-progress
# calls to the C API. Currently disabled by default but can be manually enabled
# e.g. in tests. This will be removed once all functionality is supported and
# there's no performance penalty with it enabled.
#
# TODO(skyewm) before we can remove this:
# - functions
# - import_graph_def() incrementally adds inputs to ops (i.e. creates an
# Operation and then calls _add_input()). The current code requires that all
# inputs be specified when creating the Operation (since we call
# TF_FinishOperation()).
# - ops_test.py (and others?) create unregistered op types
# - while loop
# - performance (e.g. delete/refactor redundant Python functionality, switch to
# new session API)
_USE_C_API = False
def tensor_id(t):
"""Returns a unique identifier for this Tensor."""
t = ag_core.getval(t)
return t._id # pylint: disable=protected-access
def _in_gpu_device():
return "GPU" == context.context().device_spec.device_type
@tf_contextlib.contextmanager
def _null_contextmanager():
yield
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property")
except AttributeError:
raise TypeError("Type %s does not define a `name` property")
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property")
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property")
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
_uid_counter = 0
_uid_lock = threading.Lock()
def uid():
"""A unique (within this program execution) integer."""
with _uid_lock:
global _uid_counter
_uid_counter += 1
return _uid_counter
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
self._shape = tensor_shape.unknown_shape()
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
# Attributes used for C++ shape inference. Not inspected, only forwarded.
# If set, will be a HandleData object from cpp_shape_inference.proto.
self._handle_data = None
self._id = uid()
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
return "%s:%d" % (self._op.name, self._value_index)
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
@{tf.TensorShape}
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
return self._shape
def _shape_as_list(self):
if self._shape.ndims is not None:
return [dim.value for dim in self._shape.dims]
else:
return None
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
Args:
shape: A `TensorShape` representing the shape of this tensor.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
self._shape = self._shape.merge_with(shape)
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
return self._consumers
def _add_consumer(self, consumer):
"""Add a consumer to this tensor.
Args:
consumer: an Operation.
Raises:
TypeError: if the consumer is not an Operation.
"""
if not isinstance(consumer, Operation):
raise TypeError("Consumer must be an Operation: %s" % consumer)
self._consumers.append(consumer)
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
assert self.op._c_op # pylint: disable=protected-access
tf_output = c_api.TF_Output()
tf_output.oper = self.op._c_op # pylint: disable=protected-access
tf_output.index = self.value_index
return tf_output
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name, (", shape=%s" % self.get_shape())
if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name)
if self._dtype else "", (", device=%s" % self.device)
if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
return id(self) == id(other)
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the Tensor from 0 to
infinity. Declaring this method prevents this unintended
behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Tensor' object is not iterable.")
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
# TODO(apassos): unify this logic that in constant_op.py
def _maybe_modify_numpy_dtype_determination(np_array):
"""Tweak numpy dtype determination.
numpy prefers int64 and float64, we prefer int32 and float32.
(int32 is often used as the "shape" input to various operations,
many of which only support int32 shapes).
This preference is copied from tensor_util.make_tensor_proto
(https://goto.google.com/numpy_prefs_156503903)
Args:
np_array: A numpy ndarray
Returns:
A numpy ndarray whose dtype may have been modified.
"""
if np_array.dtype == np.float64:
return np_array.astype(np.float32)
if np_array.dtype == np.int64:
# Downcast iff there is no precision loss.
downcasted = np_array.astype(np.int32)
if np.array_equal(downcasted, np_array):
return downcasted
return np_array
# TODO(agarwal): rename to TensorHandle.
class EagerTensor(Tensor):
"""A TensorFlow Eager Tensor."""
def __init__(self, value, dtype=None): # pylint: disable=super-init-not-called
"""Creates a Tensor object from a Python object or numpy array.
May share storage with the numpy array, in which case changes to the numpy
object will reflect
in the Tensor.
Arguments:
value: A numpy.array or a Python object to create a Tensor for.
dtype: TensorFlow dtype for the returned Tensor. If None, one will be
automatically selected.
"""
# TODO(ashankar): Evaluate if we can and perhaps share code with
# tf.constant defined in
# https://www.tensorflow.org/code/tensorflow/python/framework/constant_op.py
self._id = uid()
if not isinstance(value, np.ndarray):
npt = None if dtype is None else dtype.as_numpy_dtype
try:
value = np.array(value, dtype=npt)
except ValueError as e:
raise ValueError(
"Cannot convert %s to array. Error: %s" % (str(value), e))
if dtype is None:
value = _maybe_modify_numpy_dtype_determination(value)
elif dtype is not None:
npt = dtype.as_numpy_dtype
if npt != value.dtype:
value = value.astype(npt)
try:
value = np.asarray(value, order="C")
self._handle = c_api.TFE_Py_NumpyToTensorHandle(value)
except core._NotOkStatusException as e: # pylint: disable=protected-access
raise core._status_to_exception(e.code, e.message) # pylint: disable=protected-access
# Almost all TensorFlow kernels for GPU devices keep int32 tensors in host
# memory. This change approximates the same behavior for eager execution -
# keeping int32 tensors in host memory.
#
# We do so to preclude the need for callers into such kernels from having to
# explicitly place the int32 tensors in host memory. For example, prior to
# this change one needed:
#
# with tfe.device('/gpu:0'):
# ... # code here
# with tfe.device('/cpu:0'):
# shape = tfe.Tensor(...)
# y = tfe.ops.random_uniform(.., shape)
#
# Without the CPU device block tfe.ops.random_uniform would fail since the
# kernel expects the shape in host memory.
#
# After this change, we simplify the code:
#
# with tfe.device('/gpu:0'):
# y = tfe.ops.random_uniform(, tfe.Tensor(...))
#
# The approximation is not exact since if there are GPU kernels which do not
# require host memory for int32 tensors, there will be a discrepancy between
# eager execution and TensorFlow graphs. However, as of July 2017, there
# were no known GPU kernels that kept int32 tensors in device memory.
if _in_gpu_device() and value.dtype != np.int32:
ctx = context.context()
# pylint: disable=protected-access
device_name = ctx.device_name
with errors.raise_exception_on_not_ok_status() as status:
self._handle = c_api.TFE_TensorHandleCopyToDevice(
self._handle, ctx._handle, device_name, status)
# pylint: enable=protected-access
self._dtype = dtypes.as_dtype(c_api.TFE_TensorHandleDataType(self._handle))
# This mirrors tensorflow.core.framework.ops.Tensor._handle_data Which will
# be None for tensors of type other than DT_REOSURCE. For DT_RESOURCE
# tensors, this will contain a serialized HandleData proto with shape
# inference metadata about shapes and dtypes of resources accessible from
# this handle.
self._handle_data = None
if core.active_trace() is not None:
core.active_trace().record_tensor("MANUAL",
tensor_id(self), self.device,
self.shape.num_elements())
def __del__(self):
try:
if c_api is not None and c_api.TFE_DeleteTensorHandle is not None:
c_api.TFE_DeleteTensorHandle(self._handle)
if core.active_trace() is not None:
core.active_trace().delete_tensor(tensor_id(self))
except (AttributeError, TypeError):
# Sometimes deletion during program shutdown throws exception as other
# modules are no longer available.
pass
def _numpy_text(self, is_repr=False):
if self.dtype.is_numpy_compatible:
numpy_text = repr(self.numpy()) if is_repr else str(self.numpy())
else:
numpy_text = "<unprintable>"
if "\n" in numpy_text:
numpy_text = "\n" + numpy_text
return numpy_text
def __str__(self):
return "tfe.Tensor(shape=%s, dtype=%s, numpy=%s)" % (self.shape,
self.dtype.name,
self._numpy_text())
def __repr__(self):
return "<tfe.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s)>" % (
self._id, self.shape, self.dtype.name, self._numpy_text(is_repr=True))
@staticmethod
def _override_operator(name, func):
setattr(EagerTensor, name, func)
def numpy(self):
"""Returns a numpy array with the same contents as the Tensor.
The contents of the Tensor must be backed by host memory. The
as_cpu_tensor() method can be used ensure that this is true.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array that may share memory with the Tensor object. Any changes
to one may be reflected in the other.
"""
# TODO(ashankar): This with status business seems expensive. Profile/avoid?
cpu = self.as_cpu_tensor()
with errors.raise_exception_on_not_ok_status() as status:
return c_api.TFE_Py_TensorHandleToNumpy(cpu._handle, status) # pylint: disable=protected-access
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
with errors.raise_exception_on_not_ok_status() as status:
h = c_api.TFE_TensorHandleCopyToDevice(self._handle, ctx._handle,
device_name, status)
new_tensor = _tensor_from_handle(h)
if core.active_trace() is not None:
core.active_trace().record_tensor("COPY",
tensor_id(new_tensor),
new_tensor.device,
new_tensor.shape.num_elements())
return new_tensor
# pylint: enable=protected-access
@property
def device(self):
return c_api.TFE_TensorHandleDeviceName(self._handle)
@property
def dtype(self):
return self._dtype
@property
def shape(self):
"""The shape of this Tensor as a TensorShape object."""
n = c_api.TFE_TensorHandleNumDims(self._handle)
# As of May 2017, TFE_TensorHandle objects were always backed by concrete
# tensors (which have a valid, known shape). There were vague plans to
# change this so that the Tensor class can also represent Tensors that have
# not yet been computed.
# If that happens, handle that (e.g., if n < 0: return tensor_shape(None))
# and also handle -1s returned by TFE_TensorHandleDim.
assert n >= 0, "See comment in source code"
return tensor_shape.TensorShape(
[c_api.TFE_TensorHandleDim(self._handle, x) for x in range(n)])
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
n = c_api.TFE_TensorHandleNumDims(self._handle)
# As of May 2017, TFE_TensorHandle objects were always backed by concrete
# tensors (which have a valid, known shape). There were vague plans to
# change this so that the Tensor class can also represent Tensors that have
# not yet been computed.
# If that happens, handle that (e.g., if n < 0: return tensor_shape(None))
# and also handle -1s returned by TFE_TensorHandleDim.
assert n >= 0, "See comment in source code"
return tuple(c_api.TFE_TensorHandleDim(self._handle, x) for x in range(n))
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
def as_cpu_tensor(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
def as_gpu_tensor(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def __bool__(self):
if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison
raise ValueError(
"Non-scalar tensor %s cannot be converted to boolean." % repr(self))
if self.dtype != dtypes.bool:
raise ValueError(
"Non-boolean tensor %s cannot be converted to boolean." % repr(self))
return bool(self.as_cpu_tensor().numpy())
def __nonzero__(self):
return self.__bool__()
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise NotImplementedError("op not supported for Eager Tensors.")
@property
def graph(self):
raise NotImplementedError("graph not supported for Eager Tensors.")
@property
def name(self):
raise NotImplementedError("name not supported for Eager Tensors.")
def set_shape(self, shape):
raise NotImplementedError("set_shape not supported for Eager Tensors.")
@property
def value_index(self):
raise NotImplementedError("value_index not supported for Eager Tensors.")
def consumers(self):
raise NotImplementedError("consumers not supported for Eager Tensors.")
def _add_consumer(self, consumer):
raise NotImplementedError("_add_consumer not supported for Eager Tensors.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported for Eager Tensors.")
def _as_tf_output(self):
raise NotImplementedError("_as_tf_output not supported for Eager Tensors.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError("eval not supported for Eager Tensors.")
def _tensor_from_handle(handle):
"""'Private' constructor for the Tensor object.
The existence of a 'handle' is an implementation detail that should be hidden
from users of this module. Functions within this module do need to create a
Tensor object from a handle though.
One option would be to have an __init__(self, handle) method on the
Tensor class, but that would make the existence and use of a handle
'public'.
Instead, this function avoids exposing a Tensor.__init__ that understands
handles and yet allows functions within this module to create Tensor
objects from a handle.
Arguments:
handle: A valid TFE_TensorHandle object.
Returns:
A Tensor object.
"""
# pylint: disable=protected-access
t = EagerTensor.__new__(EagerTensor)
t._id = uid()
t._handle = handle
t._dtype = dtypes.as_dtype(c_api.TFE_TensorHandleDataType(handle))
t._handle_data = None
return t
# pylint: enable=protected-access
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]
}
register_dense_tensor_like_type(Tensor)
def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
An `Output` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None):
"""Converts the given `value` to an `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
This function can be useful when composing a new operation in Python
All standard Python op constructors apply this function to each of their
Tensor-valued inputs, which allows those ops to accept numpy arrays, Python
lists, and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the mutable view of Variables, if applicable.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
error_prefix = "" if name is None else "%s: " % name
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
for _, funcs_at_priority in sorted(_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ag_core.getval(ret), Tensor):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(error_prefix, conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(error_prefix, conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." % (error_prefix, value,
type(value)))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to an `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
Defaults to 100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple) and
all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
Contrast this representation with
@{tf.SparseTensor},
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values, (", dense_shape=%s" % self._dense_shape)
if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
@{tf.matmul})
or @{tf.Graph.create_op}.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
@{tf.Session.run}.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`.
Used for attributes of `node_def_pb2.NodeDef`, typically `name`,
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
`Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
input_types: List of `DType` objects representing the
types of the tensors accepted by the `Operation`. By default
uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect
reference-typed inputs must specify these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the
op type that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
if not isinstance(node_def, node_def_pb2.NodeDef):
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._node_def = copy.deepcopy(node_def)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
self._inputs = list(inputs) # Defensive copy.
for a in self._inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
# Mark that we consume the inputs.
a._add_consumer(self) # pylint: disable=protected-access
if output_types is None:
output_types = []
self._output_types_val = output_types
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
if input_types is None:
input_types = [i.dtype.base_dtype for i in self._inputs]
else:
if not all(
x.is_compatible_with(i.dtype)
for i, x in zip(self._inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(self.node_def.name, [i.dtype for i in self._inputs],
input_types))
self._input_types_val = input_types
# Build the list of control inputs.
self._control_inputs = []
if control_inputs:
for c in control_inputs:
c_op = None
if isinstance(c, Operation):
c_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
c_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
self._control_inputs.append(c_op)
self._original_op = original_op
self._op_def = op_def
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
# Add this op to the current control flow context:
self._control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
# NOTE(keveman): Control flow context's AddOp could be creating new ops and
# setting op.inputs[index] = new_op. Thus the new ops' id could be larger
# than this op's id even though this op depend on them. Therefore, delaying
# assigning id to this op until all ops this could be dependent on are
# created.
self._id_value = self._graph._next_id() # pylint: disable=protected-access
self._recompute_node_def()
if self._graph._c_graph: # pylint: disable=protected-access
if self._op_def:
# TODO(skyewm): op_def_library.apply_op() flattens the incoming
# inputs. Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
self._op_def, self._inputs, self._node_def.attr)
else:
# If no OpDef is specified, assume all inputs are scalar.
grouped_inputs = self._inputs
self._c_op = self._create_c_op(self._graph, self._node_def,
grouped_inputs, self._control_inputs)
else:
self._c_op = None
def _create_c_op(self, graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
with errors.raise_exception_on_not_ok_status() as status:
c_api.TF_SetAttrValueProto(op_desc,
compat.as_str(name), serialized, status)
with errors.raise_exception_on_not_ok_status() as status:
c_op = c_api.TF_FinishOperation(op_desc, status)
return c_op
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [
compat.as_bytes("loc:@%s" % self._node_def.name)
]
if "_class" not in self._node_def.attr:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in self.get_attr("_class")
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
if self._graph._c_graph: # pylint: disable=protected-access
# TODO(iga): Remove this assert after converting to C API by default.
# Just being a bit paranoid here.
assert self._node_def.name == c_api.TF_OperationName(self._c_op)
return c_api.TF_OperationName(self._c_op)
else:
return self._node_def.name
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
if self._graph._c_graph: # pylint: disable=protected-access
# TODO(iga): Remove this assert after converting to C API by default.
# Just being a bit paranoid here
assert self._node_def.device == c_api.TF_OperationDevice(self._c_op)
return c_api.TF_OperationDevice(self._c_op)
else:
return self._node_def.device
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
if self._graph._c_graph: # pylint: disable=protected-access
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# TODO(iga): Remove this assert after converting to C API by default.
# Just being a bit paranoid here.
assert self._output_types_val == output_types
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
else:
return self._output_types_val
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
if _USE_C_API:
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
_device_string(device))
# TODO(nolivia): remove this line when switch to C api
self._node_def.device = _device_string(device)
def _add_input(self, tensor, dtype=None):
"""Add a new input to this operation.
Args:
tensor: the Tensor to add as an input.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
assert not self._graph._c_graph, ( # pylint: disable=protected-access
"Operation._add_input doesn't work with C API")
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s" %
(tensor.dtype.name, dtype.name))
self._inputs.append(tensor)
self._input_types_val.append(dtype)
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _update_input(self, index, tensor, dtype=None):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
assert not self._graph._c_graph, ( # pylint: disable=protected-access
"Operation._update_input doesn't work with C API")
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s" %
(tensor.dtype.name, dtype.name))
self._inputs[index].consumers().remove(self)
self._inputs[index] = tensor
self._input_types_val[index] = dtype
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
assert not self._graph._c_graph, ( # pylint: disable=protected-access
"Operation._add_control_inputs doesn't work with C API")
if ops:
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
_assert_same_graph(self, op)
self._control_inputs.append(op)
self._recompute_node_def()
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if _USE_C_API:
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
else:
self._add_control_inputs([op])
# Methods below are used when building the NodeDef and Graph proto.
def _recompute_node_def(self):
del self._node_def.input[:]
# pylint: disable=protected-access
self._node_def.input.extend([t._as_node_def_input() for t in self._inputs])
# pylint: enable=protected-access
if self._control_inputs:
self._node_def.input.extend(
["^%s" % op.name for op in self._control_inputs])
def __str__(self):
return str(self._node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, op):
self._op = op
def __iter__(self):
return iter(self._op._inputs)
def __len__(self):
return len(self._op._inputs)
def __bool__(self):
return bool(self._op._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._op._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
return Operation._InputList(self)
@property
def _input_dtypes(self):
return self._input_types
@property
def _input_types(self):
if self._graph._c_graph: # pylint: disable=protected-access
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
# TODO(iga): Remove this assert after converting to C API by default.
# Just being a bit paranoid here.
assert self._input_types_val == input_types
return input_types
else:
return self._input_types_val
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
if self._graph._c_graph: # pylint: disable=protected-access
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
else:
return self._control_inputs
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
if self._graph._c_graph: # pylint: disable=protected-access
op_type = c_api.TF_OperationOpType(self._c_op)
# TODO(iga): Remove these asserts after converting to C API by default.
# Just being a bit paranoid here.
# pylint: disable=unidiomatic-typecheck
assert type(op_type) == type(self._node_def.op), (
"Expected same types %s vs %s" % (type(op_type),
type(self._node_def.op)))
# pylint: enable=unidiomatic-typecheck
assert op_type == self._node_def.op
return op_type
else:
return self._node_def.op
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns a serialized `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return self._graph._convert_stack( # pylint: disable=protected-access
self._traceback,
include_func_start_lineno=True)
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
if name not in self._node_def.attr:
raise ValueError("No attr named '" + name + "' in " + str(self._node_def))
x = self._node_def.attr[name]
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return []
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
return list(getattr(x.list, f))
return []
else:
for f in fields:
if x.HasField(f):
if f == "type":
return dtypes.as_dtype(getattr(x, f))
else:
return getattr(x, f)
assert False, "Unsupported field type in " + str(x)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run}
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
def NotDifferentiable(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.NotDifferentiable("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Alias for the old name, will be eventually removed.
NoGradient = NotDifferentiable
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used. Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shapes_for_outputs(op):
"""Uses the registered shape functions to set the shapes for op's outputs."""
try:
shape_func = _shape_registry.lookup(op.type)
except LookupError:
try:
shape_func = _default_shape_function_registry.lookup(op.type)
except LookupError:
shape_func = _call_cpp_shape_fn_and_require_op
shapes = shape_func(op)
if shapes is None:
raise RuntimeError(
"Shape function for op %s did not return any shapes" % op)
elif isinstance(shapes, dict):
# Returned by call_cpp_shape_fn
shapes_dict = shapes
shapes = shapes_dict["shapes"]
handle_datas = shapes_dict["handle_data"]
for output, handle_data in zip(op.outputs, handle_datas):
# pylint: disable=protected-access
output._handle_data = handle_data
# pylint: enable=protected-access
if len(op.outputs) != len(shapes):
raise RuntimeError(
"Shape function for op %s returned %d shapes but expected %d %s %s" %
(op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes)))
for output, s in zip(op.outputs, shapes):
output.set_shape(s)
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s.",
self.statistic_type, other.statistic_type)
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def _name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
@{tf.Operation} objects,
which represent units of computation; and
@{tf.Tensor} objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@{tf.get_default_graph}.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()
```
Another typical usage involves the
@{tf.Graph.as_default}
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
@{tf.GraphKeys.GLOBAL_VARIABLES}) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects the core state that may be accessed by multiple readers.
# Only state that can be returned via public accessors (`as_graph_def()`,
# `get_operations()`, `as_graph_element()`, `get_collection()`, and
# `get_collection_ref()`) is by the lock. Thread-safety is provided on a
# best-effort basis to support buggy programs, and is not guaranteed by the
# public `tf.Graph` API.
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.Lock()
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Current name stack: uniquified names
self._name_stack = ""
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
# Functions that will be applied to choose a device if none is specified.
self._device_function_stack = []
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
self._control_dependencies_stack = []
# Arbritrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops
self._colocation_stack = []
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Resource container.
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
if _USE_C_API:
self._scoped_c_graph = c_api_util.ScopedTFGraph()
else:
self._scoped_c_graph = None
def _convert_stack(self, stack, include_func_start_lineno=False):
"""Converts a stack extracted using _extract_stack() to a traceback stack.
Args:
stack: A list of n 5-tuples,
(filename, lineno, name, frame_globals, func_start_lineno).
include_func_start_lineno: True if function start line number should be
included as the 5th entry in return tuples.
Returns:
A list of n 4-tuples or 5-tuples
(filename, lineno, name, code, [optional: func_start_lineno]), where the
code tuple element is calculated from the corresponding elements of the
input tuple.
"""
ret = []
for (filename, lineno, name, frame_globals, func_start_lineno,
unused_frame_info) in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
if include_func_start_lineno:
ret.append((filename, lineno, name, line, func_start_lineno))
else:
ret.append((filename, lineno, name, line))
return ret
def _extract_stack(self):
"""A lightweight, extensible re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Derived classes can implement _extract_frame_info() to add extra information
to the traceback.
Returns:
A list of 6-tuples
(filename, lineno, name, frame_globals, func_start_lineno, custom_info)
corresponding to the call stack of the current thread.
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
func_start_lineno = co.co_firstlineno
frame_info = self._extract_frame_info(f)
ret.append((filename, lineno, name, frame_globals, func_start_lineno,
frame_info))
f = f.f_back
ret.reverse()
return ret
def _extract_frame_info(self, frame): # pylint: disable=unused-argument
"""Extracts custom information from a frame in an op traceback."""
return None
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
@{tf.Graph.graph_def_versions}.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
return self._graph_def_versions
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a @{tf.train.QueueRunner}.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`. Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
graph = graph_pb2.GraphDef()
graph.versions.CopyFrom(self._graph_def_versions)
bytesize = 0
for op_id in sorted(self._nodes_by_id):
op = self._nodes_by_id[op_id]
if from_version is None or op_id > from_version:
graph.node.extend([op.node_def])
if op.outputs and add_shapes:
assert "_output_shapes" not in graph.node[-1].attr
graph.node[-1].attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
bytesize += op.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
if self._functions:
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph.library.gradient.extend([grad_def])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return name in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(name, None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
previous = self._functions.get(name, None)
if previous:
raise ValueError("Another function is already defined with that name")
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
self._functions[name] = function
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
def create_op(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) If True, shape inference will be performed
to compute the shapes of the outputs.
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = _name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
if key not in node_def.attr:
if callable(value):
value = value(node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" % (key,
value))
node_def.attr[key].CopyFrom(value)
# Apply a kernel label if one has been specified for this op_type.
try:
kernel_label = self._op_to_kernel_label_map[op_type]
node_def.attr["_kernel"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op_type for gradients if one has been
# specified for this op_type.
try:
mapped_op_type = self._gradient_override_map[op_type]
node_def.attr["_gradient_op_type"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
control_inputs = self._control_dependencies_for_inputs(inputs)
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
if compute_shapes:
set_shapes_for_outputs(ret)
self._add_op(ret)
self._record_op_seen_by_control_dependencies(ret)
if compute_device:
self._apply_device_functions(ret)
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack:
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to
# provide consistency between the device and the colocation
# property.
if (ret.device and pydev.canonical_name(ret.device) !=
pydev.canonical_name(colocation_op.device)):
logging.warning("Tried to colocate %s with an op %s that had "
"a different device: %s vs %s. "
"Ignoring colocation property.", name,
colocation_op.name, ret.device,
colocation_op.device)
else:
ret._set_device(colocation_op.device) # pylint: disable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
ret.node_def.attr["_class"].CopyFrom(
attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(
s=all_colocation_groups)))
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if (self._container and op_type in self._registered_ops and
self._registered_ops[op_type].is_stateful and
"container" in ret.node_def.attr and
not ret.node_def.attr["container"].s):
ret.node_def.attr["container"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(self._container)))
return ret
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." % (repr(name), repr(op_name),
len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__,
types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly. Use this method with the `with` keyword
to specify that ops created within the scope of a block should be
added to this graph.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
"""
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
"""
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
"""
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
return []
if scope is None:
return list(coll_list)
else:
c = []
regex = re.compile(scope)
for item in coll_list:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
try:
self._default_original_op = op
yield
finally:
self._default_original_op = old_original_op
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
r"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
try:
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name and name[-1] == "/":
new_stack = _name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
i = self._names_in_use.get(name, 0)
# Increment the number for "name".
if mark_as_used:
self._names_in_use[name] = i + 1
if i > 0:
base_name = name
# Make sure the composed name is not already used.
while name in self._names_in_use:
name = "%s_%d" % (base_name, i)
i += 1
# Mark the composed name as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name] = 1
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within
the context, rather than applying all colocation properties
on the stack. If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
if op is not None and not isinstance(op, Operation):
# We always want to colocate with the reference op.
op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = []
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = []
if op is not None:
self._colocation_stack.append(op)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in
the context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
"""
# pylint: enable=line-too-long
if (device_name_or_function is not None and
not callable(device_name_or_function)):
device_function = pydev.merge_device(device_name_or_function)
else:
device_function = device_name_or_function
try:
self._device_function_stack.append(device_function)
yield
finally:
self._device_function_stack.pop()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in reverse order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
for device_function in reversed(self._device_function_stack):
if device_function is None:
break
op._set_device(device_function(op)) # pylint: disable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
try:
self._container = container_name
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition
to the current control dependencies. None to indicate that
the dependencies should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs = []
self._new_stack = True
else:
self._control_inputs = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_tensors):
"""For an op that takes `input_tensors` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_tensors: The direct data dependencies for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
input_ops = set([t.op for t in input_tensors])
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
if isinstance(c, IndexedSlices):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to
AttrValue protocol buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to
kernel label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op
type strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
@{tf.Graph.device}
for more details.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
"""
if context.in_graph_mode():
return get_default_graph().device(device_name_or_function)
else:
# TODO(agarwal): support device functions in EAGER mode.
return context.device(device_name_or_function)
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def colocate_with(op, ignore_existing=False):
if context.in_graph_mode():
return get_default_graph().colocate_with(op, ignore_existing)
else:
if op is not None:
return device(op.device)
else:
return _null_contextmanager()
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See @{tf.Graph.control_dependencies}
for more details.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.in_graph_mode():
return get_default_graph().control_dependencies(control_inputs)
else:
return _null_contextmanager()
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
_default_graph_stack = _DefaultGraphStack()
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." % (item,
original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
@{tf.global_variables}
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
@{tf.trainable_variables}
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
@{tf.summary.merge_all}
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
@{tf.train.start_queue_runners}
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
@{tf.moving_average_variables}
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
def VARIABLES(cls): # pylint: disable=no-self-argument
logging.warning("VARIABLES collection name is deprecated, "
"please use GLOBAL_VARIABLES instead; "
"VARIABLES will be removed after 2017-03-02.")
return cls.GLOBAL_VARIABLES
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See @{tf.Graph.add_to_collection}
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
"""
get_default_graph().add_to_collection(name, value)
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See @{tf.Graph.add_to_collections}
for more details.
Args:
names: The key for the collections. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
get_default_graph().add_to_collections(names, value)
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See @{tf.Graph.get_collection_ref}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
"""
return get_default_graph().get_collection_ref(key)
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See @{tf.Graph.get_collection}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def name_scope(name, default_name=None, values=None):
"""Returns a context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
@{tf.Graph.name_scope}
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Returns:
A context manager for use in defining Python ops. Yields the name scope.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
n = default_name if name is None else name
if context.in_eager_mode():
if n is None:
raise ValueError(
"At least one of name (%s) and default_name (%s) should be provided" %
(name, default_name))
ctx = context.context()
old_name = ctx.scope_name
scope_name = "%s%s/" % (old_name, n) if old_name else "%s/" % n
ctx.scope_name = scope_name
try:
yield scope_name
finally:
ctx.scope_name = old_name
else:
if n is None and values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as an
# idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided." %
(name, default_name))
if values is None:
values = []
g = _get_graph_from_inputs(values)
with g.as_default(), g.name_scope(n) as scope:
yield scope
# pylint: enable=g-doc-return-or-yield
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype,
name, as_ref))
register_tensor_conversion_function(Operation, _operation_conversion_error)
| 35.852982 | 115 | 0.678387 |
31015490c79dc62d2fa9feae8734845e23f815be | 2,258 | py | Python | nml/cfg_lauer17rse/cfg_lc_ESACCI.py | c3s-magic/ESMValTool | 799150e4784f334262755a39022c72b2d39585c9 | [
"Apache-2.0"
] | null | null | null | nml/cfg_lauer17rse/cfg_lc_ESACCI.py | c3s-magic/ESMValTool | 799150e4784f334262755a39022c72b2d39585c9 | [
"Apache-2.0"
] | null | null | null | nml/cfg_lauer17rse/cfg_lc_ESACCI.py | c3s-magic/ESMValTool | 799150e4784f334262755a39022c72b2d39585c9 | [
"Apache-2.0"
] | 2 | 2017-03-24T04:18:09.000Z | 2020-12-19T06:04:05.000Z | # This is a config file for CCI data and CMIP5 land cover diagnostics
# key list of data sets from CCI and CMIP, depracted
#could be written automatically? It's a copy of namelist_lc_ESACCI.xml with smaller changes
#translatorlist={ 'bare soil' : [['Bare_Soil'],['bare soil']],
# 'natural grass' : [['Natural_Grass'],['grass']],
# 'managed grass and crops' : [['Managed_Grass'],['crop','pasture']],
# 'shrubs' : [['Shrub_Broadleaf_Evergreen','Shrub_Needleleaf_Evergreen','Shrub_Broadleaf_Deciduous','Shrub_Needleleaf_Deciduous'],['shrub']],
# 'forest' : [['Tree_Broadleaf_Evergreen','Tree_Needleleaf_Evergreen','Tree_Broadleaf_Deciduous','Tree_Needleleaf_Deciduous'],['tree']]
#}
#translatorlist={ 'bare soil' : [['Bare_Soil'],['bare soil']],
# 'grass and crop' : [['Managed_Grass','Natural_Grass'],['crop','pasture','grass']],
# 'shrub and forest' : [['Tree_Broadleaf_Evergreen','Tree_Needleleaf_Evergreen','Tree_Broadleaf_Deciduous','Tree_Needleleaf_Deciduous','Shrub_Broadleaf_Evergreen','Shrub_Needleleaf_Evergreen','Shrub_Broadleaf_Deciduous','Shrub_Needleleaf_Deciduous'],['tree','shrub']],
#}
translatorlist={ 'bare soil' : [['Bare_Soil'],['bare soil']],
'grass and crop' : [['Managed_Grass','Natural_Grass'],['crop','pasture','grass']],
'shrub and forest' : [['Tree_Broadleaf_Evergreen','Tree_Needleleaf_Evergreen','Tree_Broadleaf_Deciduous','Tree_Needleleaf_Deciduous','Shrub_Broadleaf_Evergreen','Shrub_Needleleaf_Evergreen','Shrub_Broadleaf_Deciduous','Shrub_Needleleaf_Deciduous'],['tree','shrub']],
}
# general flags for regionalization
regionalization = False
shape = "continents"
shapeNames = 2 #column of the name values
#start_year=2005
#stop_year=2005
# flags for basic diagnostics
globmeants = True
mima_globmeants=[0,100]
cmap_globmeants='YlGn'
mima_ts=[0,100]
mima_mts=[0,100]
portrait = False
globmeandiff = True
mima_globmeandiff=[-100,100]
mima_globmeandiff_r=[-1,1]
trend = False
trend_p = False
# flags for specific diagnostics
single_years=False #TODO rename variable
mima_single_year=[-100,100]
std_factor=4
#plotting parameters
projection={'projection': 'robin', 'lon_0': 180., 'lat_0': 0.}
| 46.081633 | 284 | 0.711692 |
cafb2d926f42656153980ecebb195a295258e7d8 | 1,865 | py | Python | atss_core/modeling/roi_heads/keypoint_head/roi_keypoint_feature_extractors.py | Odin-son/ATSS | b6acccae1d06dfd8e4ee743b8f446e3e82938c15 | [
"BSD-2-Clause"
] | 1,039 | 2019-12-05T14:56:22.000Z | 2022-03-30T05:44:49.000Z | atss_core/modeling/roi_heads/keypoint_head/roi_keypoint_feature_extractors.py | ObjectDetection/ATSS | e111ee9927b408c5a762d356be4bd08f63f04468 | [
"BSD-2-Clause"
] | 96 | 2019-12-06T01:37:37.000Z | 2022-03-11T10:23:37.000Z | atss_core/modeling/roi_heads/keypoint_head/roi_keypoint_feature_extractors.py | ObjectDetection/ATSS | e111ee9927b408c5a762d356be4bd08f63f04468 | [
"BSD-2-Clause"
] | 174 | 2019-12-06T01:42:25.000Z | 2022-03-28T02:57:55.000Z | from torch import nn
from torch.nn import functional as F
from atss_core.modeling import registry
from atss_core.modeling.poolers import Pooler
from atss_core.layers import Conv2d
@registry.ROI_KEYPOINT_FEATURE_EXTRACTORS.register("KeypointRCNNFeatureExtractor")
class KeypointRCNNFeatureExtractor(nn.Module):
def __init__(self, cfg, in_channels):
super(KeypointRCNNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
self.pooler = pooler
input_features = in_channels
layers = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_LAYERS
next_feature = input_features
self.blocks = []
for layer_idx, layer_features in enumerate(layers, 1):
layer_name = "conv_fcn{}".format(layer_idx)
module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1)
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(module.bias, 0)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
self.out_channels = layer_features
def forward(self, x, proposals):
x = self.pooler(x, proposals)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x
def make_roi_keypoint_feature_extractor(cfg, in_channels):
func = registry.ROI_KEYPOINT_FEATURE_EXTRACTORS[
cfg.MODEL.ROI_KEYPOINT_HEAD.FEATURE_EXTRACTOR
]
return func(cfg, in_channels)
| 36.568627 | 87 | 0.694906 |
5ca400ce6f503631ddad505e1cae01323eab1e1e | 2,235 | py | Python | no_PII_Storage_folder.py | jyoonkim/Test | c822ff057c48b10afe59146b434a2ac3db0dd712 | [
"MIT"
] | null | null | null | no_PII_Storage_folder.py | jyoonkim/Test | c822ff057c48b10afe59146b434a2ac3db0dd712 | [
"MIT"
] | null | null | null | no_PII_Storage_folder.py | jyoonkim/Test | c822ff057c48b10afe59146b434a2ac3db0dd712 | [
"MIT"
] | null | null | null | import os
import hashlib
def hash_username(n):
"""Return the digest of a hashed username using RSA's MD5 algorithm
:param n: The username to hash
:returns: The hashed usernames digest
:rtype: String
"""
return hashlib.md5(n.encode('utf-8')).hexdigest()
def hash_column(df_column):
"""Hash all usernames in a pandas dataframe column
:param df_column: The column to hash the usernames of
:returns: The column with its usernames hashed
:rtype: pandas.DataFrame
"""
return df_column.apply(lambda x: hash_username(x))
# def hash_username(n):
# return hashlib.md5(n.encode('utf-8')).hexdigest()
def hash_foldername(n):
folder_name = n.split('_')
assert len(folder_name) > 1, folder_name
assert folder_name[0] == 'ai'
username = folder_name[1]
if len(folder_name) > 2:
username = '_'.join(folder_name[1:])
n = '{}_{}'.format(folder_name[0], hash_username(username))
return n
def hash_properties(n):
data = []
with open(n, 'r') as f:
for line in f:
data.append(line)
if not data:
return
assert(data[0].startswith('main=appinventor.ai_')), data[0]
main_element = data[0].split('=')[1].split('.')
assert len(main_element) == 4
username = hash_foldername(main_element[1])
main_element[1] = username
data[0] = 'main={}'.format('.'.join(main_element))
with open(n, 'w') as f:
for d in data:
f.write(d)
def main(path):
for root, dirs, files in os.walk(path):
for _dir in dirs:
if _dir.startswith('ai'):
dst = hash_foldername(_dir)
os.rename(os.path.join(root,_dir), os.path.join(root,dst))
for _file in files:
if _file == 'project.properties':
hash_properties(os.path.join(root,_file))
pass
elif _file.endswith('.sqlite'):
path = os.path.join(root, _file)
os.remove(path)
assert not os.path.exists(path)
if __name__ == '__main__':
STORAGE_PATH = '../AppInventorQ12019_data/STORAGE'
assert(os.path.exists(STORAGE_PATH))
main(STORAGE_PATH)
| 27.592593 | 74 | 0.597763 |
e28c5e420c69b16bc7ec2d662dc08a28931ff17e | 4,937 | py | Python | zerver/lib/exceptions.py | Supermanu/zulip | 26f6d708c2e30cfe50d9d61031edb759e8117596 | [
"Apache-2.0"
] | null | null | null | zerver/lib/exceptions.py | Supermanu/zulip | 26f6d708c2e30cfe50d9d61031edb759e8117596 | [
"Apache-2.0"
] | 15 | 2020-06-05T18:44:15.000Z | 2022-03-11T23:26:03.000Z | zerver/lib/exceptions.py | Supermanu/zulip | 26f6d708c2e30cfe50d9d61031edb759e8117596 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from enum import Enum
from typing import Any, Dict, List, Optional, Text, Type
from django.core.exceptions import PermissionDenied
class AbstractEnum(Enum):
'''An enumeration whose members are used strictly for their names.'''
def __new__(cls):
# type: (Type[AbstractEnum]) -> AbstractEnum
obj = object.__new__(cls)
obj._value_ = len(cls.__members__) + 1
return obj
# Override all the `Enum` methods that use `_value_`.
def __repr__(self):
# type: () -> str
return str(self)
def value(self):
# type: () -> None
assert False
def __reduce_ex__(self, proto):
# type: (int) -> None
assert False
class ErrorCode(AbstractEnum):
BAD_REQUEST = () # Generic name, from the name of HTTP 400.
REQUEST_VARIABLE_MISSING = ()
REQUEST_VARIABLE_INVALID = ()
BAD_IMAGE = ()
QUOTA_EXCEEDED = ()
BAD_NARROW = ()
UNAUTHORIZED_PRINCIPAL = ()
BAD_EVENT_QUEUE_ID = ()
CSRF_FAILED = ()
INVITATION_FAILED = ()
class JsonableError(Exception):
'''A standardized error format we can turn into a nice JSON HTTP response.
This class can be invoked in several ways.
* Easiest, but completely machine-unreadable:
raise JsonableError(_("No such widget: {}").format(widget_name))
The message may be passed through to clients and shown to a user,
so translation is required. Because the text will vary depending
on the user's language, it's not possible for code to distinguish
this error from others in a non-buggy way.
* Partially machine-readable, with an error code:
raise JsonableError(_("No such widget: {}").format(widget_name),
ErrorCode.NO_SUCH_WIDGET)
Now the error's `code` attribute can be used, both in server
and client code, to identify this type of error. The data
(here, the widget name) is still embedded inside a translated
string, and can't be accessed by code.
* Fully machine-readable, with an error code and structured data:
class NoSuchWidgetError(JsonableError):
code = ErrorCode.NO_SUCH_WIDGET
data_fields = ['widget_name']
def __init__(self, widget_name):
# type: (str) -> None
self.widget_name = widget_name # type: str
@staticmethod
def msg_format():
# type: () -> str
return _("No such widget: {widget_name}")
raise NoSuchWidgetError(widget_name)
Now both server and client code see a `widget_name` attribute.
Subclasses may also override `http_status_code`.
'''
# Override this in subclasses, or just pass a `code` argument
# to the JsonableError constructor.
code = ErrorCode.BAD_REQUEST # type: ErrorCode
# Override this in subclasses if providing structured data.
data_fields = [] # type: List[str]
# Optionally override this in subclasses to return a different HTTP status,
# like 403 or 404.
http_status_code = 400 # type: int
def __init__(self, msg, code=None):
# type: (Text, Optional[ErrorCode]) -> None
if code is not None:
self.code = code
# `_msg` is an implementation detail of `JsonableError` itself.
self._msg = msg # type: Text
@staticmethod
def msg_format():
# type: () -> Text
'''Override in subclasses. Gets the items in `data_fields` as format args.
This should return (a translation of) a string literal.
The reason it's not simply a class attribute is to allow
translation to work.
'''
# Secretly this gets one more format arg not in `data_fields`: `_msg`.
# That's for the sake of the `JsonableError` base logic itself, for
# the simplest form of use where we just get a plain message string
# at construction time.
return '{_msg}'
#
# Infrastructure -- not intended to be overridden in subclasses.
#
@property
def msg(self):
# type: () -> Text
format_data = dict(((f, getattr(self, f)) for f in self.data_fields),
_msg=getattr(self, '_msg', None))
return self.msg_format().format(**format_data)
@property
def data(self):
# type: () -> Dict[str, Any]
return dict(((f, getattr(self, f)) for f in self.data_fields),
code=self.code.name)
def to_json(self):
# type: () -> Dict[str, Any]
d = {'result': 'error', 'msg': self.msg}
d.update(self.data)
return d
def __str__(self):
# type: () -> str
return self.msg
class RateLimited(PermissionDenied):
def __init__(self, msg=""):
# type: (str) -> None
super(RateLimited, self).__init__(msg)
| 32.267974 | 83 | 0.616366 |
23a078ce7f3ad5802f663fe637776cccbb2d74ff | 405 | py | Python | Codes/Advance python/enum.py | shohan4556/machine-learning-course-notes | 981f3d6e9861cbee0f4dec45b1d2e6a214d2a051 | [
"MIT"
] | 8 | 2019-10-12T17:08:58.000Z | 2020-02-26T02:54:01.000Z | Codes/Advance python/enum.py | shohan4556/machine-learning-course-notes | 981f3d6e9861cbee0f4dec45b1d2e6a214d2a051 | [
"MIT"
] | null | null | null | Codes/Advance python/enum.py | shohan4556/machine-learning-course-notes | 981f3d6e9861cbee0f4dec45b1d2e6a214d2a051 | [
"MIT"
] | 5 | 2019-10-30T03:50:20.000Z | 2020-07-11T20:53:47.000Z | # Create a list of strings: mutants
mutants = ['charles xavier',
'bobby drake',
'kurt wagner',
'max eisenhardt',
'kitty pryde']
# Create a list of tuples: mutant_list
mutant_list = enumerate(mutants)
print(mutant_list)
for index1, index2 in enumerate(mutants):
print(index1, index2)
for i, j in enumerate(mutants, start = 10):
print(i,j) | 23.823529 | 43 | 0.617284 |
ebe55198f20cd9636efaba277d7c6cd26762ab33 | 744 | py | Python | mvp/mvp/presenters/main_presenter.py | 2110521-2563-1-Software-Architecture/four-guys-one-cup-assignment3 | fb771f7521a15aea5c5b563ff87dbcd83edbec77 | [
"MIT"
] | null | null | null | mvp/mvp/presenters/main_presenter.py | 2110521-2563-1-Software-Architecture/four-guys-one-cup-assignment3 | fb771f7521a15aea5c5b563ff87dbcd83edbec77 | [
"MIT"
] | null | null | null | mvp/mvp/presenters/main_presenter.py | 2110521-2563-1-Software-Architecture/four-guys-one-cup-assignment3 | fb771f7521a15aea5c5b563ff87dbcd83edbec77 | [
"MIT"
] | null | null | null | from typing import List
from mvp.contracts.main_contract import MainContract
from mvp.models.repositories.note_repository import NoteRepository
from mvp.models.entities.note import Note
class MainPresenter(MainContract.Presenter):
def __init__(self, view: MainContract.View, note_repository: NoteRepository):
MainContract.Presenter.__init__(self, view)
self.note_repository = note_repository
# Your code here
def add_note(self, note: str):
self.note_repository.add_note(note)
self.get_all_notes()
def get_all_notes(self):
self.view.update_view(self.note_repository.get_all_notes())
def clear_all(self):
self.note_repository.clear_all_notes()
self.get_all_notes() | 32.347826 | 81 | 0.747312 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.